Merge branch 'devel' into feature/iam_policy_present_state_includes_policy_changes
Conflicts: cloud/amazon/iam_policy.py
This commit is contained in:
commit
2ad0be9425
79 changed files with 2238 additions and 1172 deletions
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.0.0-0.4.beta2
|
||||
2.0.0-0.5.beta3
|
||||
|
|
|
@ -66,7 +66,7 @@ options:
|
|||
default: paravirtual
|
||||
choices: ["paravirtual", "hvm"]
|
||||
|
||||
author: Lorin Hochstein
|
||||
author: "Ansible Core Team (deprecated)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -225,7 +225,7 @@ options:
|
|||
description:
|
||||
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
|
||||
required: false
|
||||
default: false
|
||||
default: 'false'
|
||||
exact_count:
|
||||
version_added: "1.5"
|
||||
description:
|
||||
|
@ -247,6 +247,12 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
aliases: ['network_interface']
|
||||
spot_launch_group:
|
||||
version_added: "2.1"
|
||||
description:
|
||||
- Launch group for spot request, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/how-spot-instances-work.html#spot-launch-group)
|
||||
required: false
|
||||
default: null
|
||||
|
||||
author:
|
||||
- "Tim Gerla (@tgerla)"
|
||||
|
@ -303,6 +309,22 @@ EXAMPLES = '''
|
|||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
|
||||
# Single instance with ssd gp2 root volume
|
||||
- ec2:
|
||||
key_name: mykey
|
||||
group: webserver
|
||||
instance_type: c3.medium
|
||||
image: ami-123456
|
||||
wait: yes
|
||||
wait_timeout: 500
|
||||
volumes:
|
||||
- device_name: /dev/xvda
|
||||
volume_type: gp2
|
||||
volume_size: 8
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
exact_count: 1
|
||||
|
||||
# Multiple groups example
|
||||
- ec2:
|
||||
key_name: mykey
|
||||
|
@ -358,6 +380,7 @@ EXAMPLES = '''
|
|||
wait: yes
|
||||
vpc_subnet_id: subnet-29e63245
|
||||
assign_public_ip: yes
|
||||
spot_launch_group: report_generators
|
||||
|
||||
# Examples using pre-existing network interfaces
|
||||
- ec2:
|
||||
|
@ -858,6 +881,7 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
source_dest_check = module.boolean(module.params.get('source_dest_check'))
|
||||
termination_protection = module.boolean(module.params.get('termination_protection'))
|
||||
network_interfaces = module.params.get('network_interfaces')
|
||||
spot_launch_group = module.params.get('spot_launch_group')
|
||||
|
||||
# group_id and group_name are exclusive of each other
|
||||
if group_id and group_name:
|
||||
|
@ -881,6 +905,9 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
grp_details = ec2.get_all_security_groups()
|
||||
if isinstance(group_name, basestring):
|
||||
group_name = [group_name]
|
||||
unmatched = set(group_name).difference(str(grp.name) for grp in grp_details)
|
||||
if len(unmatched) > 0:
|
||||
module.fail_json(msg="The following group names are not valid: %s" % ', '.join(unmatched))
|
||||
group_id = [ str(grp.id) for grp in grp_details if str(grp.name) in group_name ]
|
||||
# Now we try to lookup the group id testing if group exists.
|
||||
elif group_id:
|
||||
|
@ -1040,6 +1067,9 @@ def create_instances(module, ec2, vpc, override_count=None):
|
|||
module.fail_json(
|
||||
msg="placement_group parameter requires Boto version 2.3.0 or higher.")
|
||||
|
||||
if spot_launch_group and isinstance(spot_launch_group, basestring):
|
||||
params['launch_group'] = spot_launch_group
|
||||
|
||||
params.update(dict(
|
||||
count = count_remaining,
|
||||
type = spot_type,
|
||||
|
@ -1308,6 +1338,7 @@ def main():
|
|||
instance_type = dict(aliases=['type']),
|
||||
spot_price = dict(),
|
||||
spot_type = dict(default='one-time', choices=["one-time", "persistent"]),
|
||||
spot_launch_group = dict(),
|
||||
image = dict(),
|
||||
kernel = dict(),
|
||||
count = dict(type='int', default='1'),
|
||||
|
|
|
@ -81,7 +81,12 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
|
||||
launch_permissions:
|
||||
description:
|
||||
- Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.0"
|
||||
author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
@ -163,6 +168,25 @@ EXAMPLES = '''
|
|||
delete_snapshot: False
|
||||
state: absent
|
||||
|
||||
# Update AMI Launch Permissions, making it public
|
||||
- ec2_ami:
|
||||
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
|
||||
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
region: xxxxxx
|
||||
image_id: "{{ instance.image_id }}"
|
||||
state: present
|
||||
launch_permissions:
|
||||
group_names: ['all']
|
||||
|
||||
# Allow AMI to be launched by another account
|
||||
- ec2_ami:
|
||||
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
|
||||
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
|
||||
region: xxxxxx
|
||||
image_id: "{{ instance.image_id }}"
|
||||
state: present
|
||||
launch_permissions:
|
||||
user_ids: ['123456789012']
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
@ -193,6 +217,7 @@ def create_image(module, ec2):
|
|||
no_reboot = module.params.get('no_reboot')
|
||||
device_mapping = module.params.get('device_mapping')
|
||||
tags = module.params.get('tags')
|
||||
launch_permissions = module.params.get('launch_permissions')
|
||||
|
||||
try:
|
||||
params = {'instance_id': instance_id,
|
||||
|
@ -253,6 +278,12 @@ def create_image(module, ec2):
|
|||
ec2.create_tags(image_id, tags)
|
||||
except boto.exception.EC2ResponseError, e:
|
||||
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
|
||||
if launch_permissions:
|
||||
try:
|
||||
img = ec2.get_image(image_id)
|
||||
img.set_launch_permissions(**launch_permissions)
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
|
||||
|
||||
module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
|
||||
|
||||
|
@ -293,6 +324,36 @@ def deregister_image(module, ec2):
|
|||
sys.exit(0)
|
||||
|
||||
|
||||
def update_image(module, ec2):
|
||||
"""
|
||||
Updates AMI
|
||||
"""
|
||||
|
||||
image_id = module.params.get('image_id')
|
||||
launch_permissions = module.params.get('launch_permissions')
|
||||
if 'user_ids' in launch_permissions:
|
||||
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
|
||||
|
||||
img = ec2.get_image(image_id)
|
||||
if img == None:
|
||||
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
|
||||
|
||||
try:
|
||||
set_permissions = img.get_launch_permissions()
|
||||
if set_permissions != launch_permissions:
|
||||
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
|
||||
res = img.set_launch_permissions(**launch_permissions)
|
||||
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
|
||||
res = img.remove_launch_permissions(**set_permissions)
|
||||
else:
|
||||
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
|
||||
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
|
||||
else:
|
||||
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
|
||||
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
@ -306,7 +367,8 @@ def main():
|
|||
no_reboot = dict(default=False, type="bool"),
|
||||
state = dict(default='present'),
|
||||
device_mapping = dict(type='list'),
|
||||
tags = dict(type='dict')
|
||||
tags = dict(type='dict'),
|
||||
launch_permissions = dict(type='dict')
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
@ -326,6 +388,10 @@ def main():
|
|||
deregister_image(module, ec2)
|
||||
|
||||
elif module.params.get('state') == 'present':
|
||||
if module.params.get('image_id') and module.params.get('launch_permissions'):
|
||||
# Update image's launch permissions
|
||||
update_image(module, ec2)
|
||||
|
||||
# Changed is always set to true when provisioning new AMI
|
||||
if not module.params.get('instance_id'):
|
||||
module.fail_json(msg='instance_id parameter is required for new image')
|
||||
|
|
|
@ -46,15 +46,15 @@ options:
|
|||
required: true
|
||||
min_size:
|
||||
description:
|
||||
- Minimum number of instances in group
|
||||
- Minimum number of instances in group, if unspecified then the current group value will be used.
|
||||
required: false
|
||||
max_size:
|
||||
description:
|
||||
- Maximum number of instances in group
|
||||
- Maximum number of instances in group, if unspecified then the current group value will be used.
|
||||
required: false
|
||||
desired_capacity:
|
||||
description:
|
||||
- Desired number of instances in group
|
||||
- Desired number of instances in group, if unspecified then the current group value will be used.
|
||||
required: false
|
||||
replace_all_instances:
|
||||
description:
|
||||
|
@ -152,9 +152,9 @@ EXAMPLES = '''
|
|||
|
||||
# Rolling ASG Updates
|
||||
|
||||
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
|
||||
Below is an example of how to assign a new launch config to an ASG and terminate old instances.
|
||||
|
||||
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
|
||||
All instances in "myasg" that do not have the launch configuration named "my_new_lc" will be terminated in
|
||||
a rolling fashion with instances using the current launch configuration, "my_new_lc".
|
||||
|
||||
This could also be considered a rolling deploy of a pre-baked AMI.
|
||||
|
@ -281,7 +281,6 @@ def get_properties(autoscaling_group):
|
|||
if getattr(autoscaling_group, "tags", None):
|
||||
properties['tags'] = dict((t.key, t.value) for t in autoscaling_group.tags)
|
||||
|
||||
|
||||
return properties
|
||||
|
||||
def elb_dreg(asg_connection, module, group_name, instance_id):
|
||||
|
@ -298,7 +297,6 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
|||
else:
|
||||
return
|
||||
|
||||
exists = True
|
||||
for lb in as_group.load_balancers:
|
||||
elb_connection.deregister_instances(lb, instance_id)
|
||||
log.debug("De-registering {0} from ELB {1}".format(instance_id, lb))
|
||||
|
@ -315,10 +313,8 @@ def elb_dreg(asg_connection, module, group_name, instance_id):
|
|||
time.sleep(10)
|
||||
|
||||
if wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
# waiting took too long
|
||||
module.fail_json(msg = "Waited too long for instance to deregister. {0}".format(time.asctime()))
|
||||
|
||||
|
||||
|
||||
|
||||
def elb_healthy(asg_connection, elb_connection, module, group_name):
|
||||
|
@ -337,7 +333,7 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
|||
# but has not yet show up in the ELB
|
||||
try:
|
||||
lb_instances = elb_connection.describe_instance_health(lb, instances=instances)
|
||||
except boto.exception.InvalidInstance, e:
|
||||
except boto.exception.InvalidInstance:
|
||||
pass
|
||||
for i in lb_instances:
|
||||
if i.state == "InService":
|
||||
|
@ -346,7 +342,6 @@ def elb_healthy(asg_connection, elb_connection, module, group_name):
|
|||
return len(healthy_instances)
|
||||
|
||||
|
||||
|
||||
def wait_for_elb(asg_connection, module, group_name):
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
wait_timeout = module.params.get('wait_timeout')
|
||||
|
@ -370,7 +365,7 @@ def wait_for_elb(asg_connection, module, group_name):
|
|||
log.debug("ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||
time.sleep(10)
|
||||
if wait_timeout <= time.time():
|
||||
# waiting took too long
|
||||
# waiting took too long
|
||||
module.fail_json(msg = "Waited too long for ELB instances to be healthy. %s" % time.asctime())
|
||||
log.debug("Waiting complete. ELB thinks {0} instances are healthy.".format(healthy_instances))
|
||||
|
||||
|
@ -396,7 +391,7 @@ def create_autoscaling_group(connection, module):
|
|||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
try:
|
||||
ec2_connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
elif vpc_zone_identifier:
|
||||
vpc_zone_identifier = ','.join(vpc_zone_identifier)
|
||||
|
@ -433,7 +428,7 @@ def create_autoscaling_group(connection, module):
|
|||
|
||||
try:
|
||||
connection.create_auto_scaling_group(ag)
|
||||
if wait_for_instances == True:
|
||||
if wait_for_instances:
|
||||
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
||||
wait_for_elb(connection, module, group_name)
|
||||
as_group = connection.get_all_groups(names=[group_name])[0]
|
||||
|
@ -475,7 +470,7 @@ def create_autoscaling_group(connection, module):
|
|||
dead_tags = []
|
||||
for tag in as_group.tags:
|
||||
have_tags[tag.key] = [tag.value, tag.propagate_at_launch]
|
||||
if not tag.key in want_tags:
|
||||
if tag.key not in want_tags:
|
||||
changed = True
|
||||
dead_tags.append(tag)
|
||||
|
||||
|
@ -492,14 +487,13 @@ def create_autoscaling_group(connection, module):
|
|||
changed = True
|
||||
as_group.load_balancers = module.params.get('load_balancers')
|
||||
|
||||
|
||||
if changed:
|
||||
try:
|
||||
as_group.update()
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if wait_for_instances == True:
|
||||
if wait_for_instances:
|
||||
wait_for_new_inst(module, connection, group_name, wait_timeout, desired_capacity, 'viable_instances')
|
||||
wait_for_elb(connection, module, group_name)
|
||||
try:
|
||||
|
@ -525,7 +519,7 @@ def delete_autoscaling_group(connection, module):
|
|||
if tmp_groups:
|
||||
tmp_group = tmp_groups[0]
|
||||
if not tmp_group.instances:
|
||||
instances = False
|
||||
instances = False
|
||||
time.sleep(10)
|
||||
|
||||
group.delete()
|
||||
|
@ -580,15 +574,22 @@ def replace(connection, module):
|
|||
changed = True
|
||||
return(changed, props)
|
||||
|
||||
# we don't want to spin up extra instances if not necessary
|
||||
# we don't want to spin up extra instances if not necessary
|
||||
if num_new_inst_needed < batch_size:
|
||||
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
||||
batch_size = num_new_inst_needed
|
||||
log.debug("Overriding batch size to {0}".format(num_new_inst_needed))
|
||||
batch_size = num_new_inst_needed
|
||||
|
||||
if not old_instances:
|
||||
changed = False
|
||||
return(changed, props)
|
||||
|
||||
|
||||
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
|
||||
if min_size is None:
|
||||
min_size = as_group.min_size
|
||||
if max_size is None:
|
||||
max_size = as_group.max_size
|
||||
if desired_capacity is None:
|
||||
desired_capacity = as_group.desired_capacity
|
||||
# set temporary settings and wait for them to be reached
|
||||
# This should get overriden if the number of instances left is less than the batch size.
|
||||
|
||||
|
@ -630,7 +631,7 @@ def get_instances_by_lc(props, lc_check, initial_instances):
|
|||
new_instances.append(i)
|
||||
else:
|
||||
old_instances.append(i)
|
||||
|
||||
|
||||
else:
|
||||
log.debug("Comparing initial instances with current: {0}".format(initial_instances))
|
||||
for i in props['instances']:
|
||||
|
@ -652,10 +653,10 @@ def list_purgeable_instances(props, lc_check, replace_instances, initial_instanc
|
|||
# and they have a non-current launch config
|
||||
if lc_check:
|
||||
for i in instances:
|
||||
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
|
||||
if props['instance_facts'][i]['launch_config_name'] != props['launch_config_name']:
|
||||
instances_to_terminate.append(i)
|
||||
else:
|
||||
for i in instances:
|
||||
for i in instances:
|
||||
if i in initial_instances:
|
||||
instances_to_terminate.append(i)
|
||||
return instances_to_terminate
|
||||
|
@ -669,7 +670,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
|||
lc_check = module.params.get('lc_check')
|
||||
decrement_capacity = False
|
||||
break_loop = False
|
||||
|
||||
|
||||
as_group = connection.get_all_groups(names=[group_name])[0]
|
||||
props = get_properties(as_group)
|
||||
desired_size = as_group.min_size
|
||||
|
@ -713,7 +714,7 @@ def terminate_batch(connection, module, replace_instances, initial_instances, le
|
|||
elb_dreg(connection, module, group_name, instance_id)
|
||||
log.debug("terminating instance: {0}".format(instance_id))
|
||||
connection.terminate_instance(instance_id, decrement_capacity=decrement_capacity)
|
||||
|
||||
|
||||
# we wait to make sure the machines we marked as Unhealthy are
|
||||
# no longer in the list
|
||||
|
||||
|
@ -749,7 +750,7 @@ def wait_for_term_inst(connection, module, term_instances):
|
|||
# waiting took too long
|
||||
module.fail_json(msg = "Waited too long for old instances to terminate. %s" % time.asctime())
|
||||
|
||||
|
||||
|
||||
def wait_for_new_inst(module, connection, group_name, wait_timeout, desired_size, prop):
|
||||
|
||||
# make sure we have the latest stats after that last loop.
|
||||
|
@ -795,9 +796,9 @@ def main():
|
|||
termination_policies=dict(type='list', default='Default')
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive = [['replace_all_instances', 'replace_instances']]
|
||||
)
|
||||
|
||||
|
@ -819,13 +820,13 @@ def main():
|
|||
if state == 'present':
|
||||
create_changed, asg_properties=create_autoscaling_group(connection, module)
|
||||
elif state == 'absent':
|
||||
changed = delete_autoscaling_group(connection, module)
|
||||
module.exit_json( changed = changed )
|
||||
changed = delete_autoscaling_group(connection, module)
|
||||
module.exit_json( changed = changed )
|
||||
if replace_all_instances or replace_instances:
|
||||
replace_changed, asg_properties=replace(connection, module)
|
||||
if create_changed or replace_changed:
|
||||
changed = True
|
||||
module.exit_json( changed = changed, **asg_properties )
|
||||
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -61,7 +61,6 @@ options:
|
|||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
|
||||
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
|
||||
notes:
|
||||
- This module will return C(public_ip) on success, which will contain the
|
||||
|
|
|
@ -50,10 +50,10 @@ options:
|
|||
choices: [ "yes", "no" ]
|
||||
wait:
|
||||
description:
|
||||
- Wait for instance registration or deregistration to complete successfully before returning.
|
||||
- Wait for instance registration or deregistration to complete successfully before returning.
|
||||
required: false
|
||||
default: yes
|
||||
choices: [ "yes", "no" ]
|
||||
choices: [ "yes", "no" ]
|
||||
validate_certs:
|
||||
description:
|
||||
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
|
||||
|
@ -87,7 +87,7 @@ roles:
|
|||
- myrole
|
||||
post_tasks:
|
||||
- name: Instance Register
|
||||
local_action:
|
||||
local_action:
|
||||
module: ec2_elb
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
ec2_elbs: "{{ item }}"
|
||||
|
@ -256,12 +256,23 @@ class ElbManager:
|
|||
ec2_elbs = self._get_auto_scaling_group_lbs()
|
||||
|
||||
try:
|
||||
elb = connect_to_aws(boto.ec2.elb, self.region,
|
||||
**self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
elb = connect_to_aws(boto.ec2.elb, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
elbs = elb.get_all_load_balancers()
|
||||
elbs = []
|
||||
marker = None
|
||||
while True:
|
||||
try:
|
||||
newelbs = elb.get_all_load_balancers(marker=marker)
|
||||
marker = newelbs.next_marker
|
||||
elbs.extend(newelbs)
|
||||
if not marker:
|
||||
break
|
||||
except TypeError:
|
||||
# Older version of boto do not allow for params
|
||||
elbs = elb.get_all_load_balancers()
|
||||
break
|
||||
|
||||
if ec2_elbs:
|
||||
lbs = sorted(lb for lb in elbs if lb.name in ec2_elbs)
|
||||
|
@ -279,7 +290,7 @@ class ElbManager:
|
|||
|
||||
try:
|
||||
asg = connect_to_aws(boto.ec2.autoscale, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
asg_instances = asg.get_all_autoscaling_instances([self.instance_id])
|
||||
|
@ -302,9 +313,8 @@ class ElbManager:
|
|||
def _get_instance(self):
|
||||
"""Returns a boto.ec2.InstanceObject for self.instance_id"""
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, self.region,
|
||||
**self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
|
||||
|
||||
|
@ -330,7 +340,7 @@ def main():
|
|||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if not region:
|
||||
if not region:
|
||||
module.fail_json(msg="Region must be specified as a parameter, in EC2_REGION or AWS_REGION environment variables or in boto configuration file")
|
||||
|
||||
ec2_elbs = module.params['ec2_elbs']
|
||||
|
@ -342,8 +352,7 @@ def main():
|
|||
module.fail_json(msg="ELBs are required for registration")
|
||||
|
||||
instance_id = module.params['instance_id']
|
||||
elb_man = ElbManager(module, instance_id, ec2_elbs,
|
||||
region=region, **aws_connect_params)
|
||||
elb_man = ElbManager(module, instance_id, ec2_elbs, region=region, **aws_connect_params)
|
||||
|
||||
if ec2_elbs is not None:
|
||||
for elb in ec2_elbs:
|
||||
|
@ -365,4 +374,5 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -107,8 +107,14 @@ options:
|
|||
description:
|
||||
- Wait a specified timeout allowing connections to drain before terminating an instance
|
||||
required: false
|
||||
default: "None"
|
||||
aliases: []
|
||||
version_added: "1.8"
|
||||
idle_timeout:
|
||||
description:
|
||||
- ELB connections from clients and to servers are timed out after this amount of time
|
||||
required: false
|
||||
version_added: "2.0"
|
||||
cross_az_load_balancing:
|
||||
description:
|
||||
- Distribute load across all configured Availability Zones
|
||||
|
@ -243,13 +249,14 @@ EXAMPLES = """
|
|||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
|
||||
# Create an ELB with connection draining and cross availability
|
||||
# Create an ELB with connection draining, increased idle timeout and cross availability
|
||||
# zone load balancing
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
name: "New ELB"
|
||||
state: present
|
||||
connection_draining_timeout: 60
|
||||
idle_timeout: 300
|
||||
cross_az_load_balancing: "yes"
|
||||
region: us-east-1
|
||||
zones:
|
||||
|
@ -316,6 +323,7 @@ class ElbManager(object):
|
|||
zones=None, purge_zones=None, security_group_ids=None,
|
||||
health_check=None, subnets=None, purge_subnets=None,
|
||||
scheme="internet-facing", connection_draining_timeout=None,
|
||||
idle_timeout=None,
|
||||
cross_az_load_balancing=None, access_logs=None,
|
||||
stickiness=None, region=None, **aws_connect_params):
|
||||
|
||||
|
@ -331,6 +339,7 @@ class ElbManager(object):
|
|||
self.purge_subnets = purge_subnets
|
||||
self.scheme = scheme
|
||||
self.connection_draining_timeout = connection_draining_timeout
|
||||
self.idle_timeout = idle_timeout
|
||||
self.cross_az_load_balancing = cross_az_load_balancing
|
||||
self.access_logs = access_logs
|
||||
self.stickiness = stickiness
|
||||
|
@ -359,6 +368,8 @@ class ElbManager(object):
|
|||
# set them to avoid errors
|
||||
if self._check_attribute_support('connection_draining'):
|
||||
self._set_connection_draining_timeout()
|
||||
if self._check_attribute_support('connecting_settings'):
|
||||
self._set_idle_timeout()
|
||||
if self._check_attribute_support('cross_zone_load_balancing'):
|
||||
self._set_cross_az_load_balancing()
|
||||
if self._check_attribute_support('access_log'):
|
||||
|
@ -456,6 +467,9 @@ class ElbManager(object):
|
|||
if self._check_attribute_support('connection_draining'):
|
||||
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
|
||||
|
||||
if self._check_attribute_support('connecting_settings'):
|
||||
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
|
||||
|
||||
if self._check_attribute_support('cross_zone_load_balancing'):
|
||||
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
|
||||
if is_cross_az_lb_enabled:
|
||||
|
@ -478,7 +492,7 @@ class ElbManager(object):
|
|||
try:
|
||||
return connect_to_aws(boto.ec2.elb, self.region,
|
||||
**self.aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
self.module.fail_json(msg=str(e))
|
||||
|
||||
def _delete_elb(self):
|
||||
|
@ -745,6 +759,12 @@ class ElbManager(object):
|
|||
attributes.connection_draining.enabled = False
|
||||
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
|
||||
|
||||
def _set_idle_timeout(self):
|
||||
attributes = self.elb.get_attributes()
|
||||
if self.idle_timeout is not None:
|
||||
attributes.connecting_settings.idle_timeout = self.idle_timeout
|
||||
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
|
||||
|
||||
def _policy_name(self, policy_type):
|
||||
return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type
|
||||
|
||||
|
@ -792,21 +812,25 @@ class ElbManager(object):
|
|||
if self.stickiness['type'] == 'loadbalancer':
|
||||
policy = []
|
||||
policy_type = 'LBCookieStickinessPolicyType'
|
||||
if self.stickiness['enabled'] == True:
|
||||
|
||||
if self.module.boolean(self.stickiness['enabled']) == True:
|
||||
|
||||
if 'expiration' not in self.stickiness:
|
||||
self.module.fail_json(msg='expiration must be set when type is loadbalancer')
|
||||
|
||||
expiration = self.stickiness['expiration'] if self.stickiness['expiration'] is not 0 else None
|
||||
|
||||
policy_attrs = {
|
||||
'type': policy_type,
|
||||
'attr': 'lb_cookie_stickiness_policies',
|
||||
'method': 'create_lb_cookie_stickiness_policy',
|
||||
'dict_key': 'cookie_expiration_period',
|
||||
'param_value': self.stickiness['expiration']
|
||||
'param_value': expiration
|
||||
}
|
||||
policy.append(self._policy_name(policy_attrs['type']))
|
||||
|
||||
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
|
||||
elif self.stickiness['enabled'] == False:
|
||||
elif self.module.boolean(self.stickiness['enabled']) == False:
|
||||
if len(elb_info.policies.lb_cookie_stickiness_policies):
|
||||
if elb_info.policies.lb_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
|
||||
self.changed = True
|
||||
|
@ -818,7 +842,7 @@ class ElbManager(object):
|
|||
elif self.stickiness['type'] == 'application':
|
||||
policy = []
|
||||
policy_type = 'AppCookieStickinessPolicyType'
|
||||
if self.stickiness['enabled'] == True:
|
||||
if self.module.boolean(self.stickiness['enabled']) == True:
|
||||
|
||||
if 'cookie' not in self.stickiness:
|
||||
self.module.fail_json(msg='cookie must be set when type is application')
|
||||
|
@ -832,7 +856,7 @@ class ElbManager(object):
|
|||
}
|
||||
policy.append(self._policy_name(policy_attrs['type']))
|
||||
self._set_stickiness_policy(elb_info, listeners_dict, policy, **policy_attrs)
|
||||
elif self.stickiness['enabled'] == False:
|
||||
elif self.module.boolean(self.stickiness['enabled']) == False:
|
||||
if len(elb_info.policies.app_cookie_stickiness_policies):
|
||||
if elb_info.policies.app_cookie_stickiness_policies[0].policy_name == self._policy_name(policy_type):
|
||||
self.changed = True
|
||||
|
@ -869,6 +893,7 @@ def main():
|
|||
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
|
||||
scheme={'default': 'internet-facing', 'required': False},
|
||||
connection_draining_timeout={'default': None, 'required': False},
|
||||
idle_timeout={'default': None, 'required': False},
|
||||
cross_az_load_balancing={'default': None, 'required': False},
|
||||
stickiness={'default': None, 'required': False, 'type': 'dict'},
|
||||
access_logs={'default': None, 'required': False, 'type': 'dict'}
|
||||
|
@ -901,6 +926,7 @@ def main():
|
|||
purge_subnets = module.params['purge_subnets']
|
||||
scheme = module.params['scheme']
|
||||
connection_draining_timeout = module.params['connection_draining_timeout']
|
||||
idle_timeout = module.params['idle_timeout']
|
||||
cross_az_load_balancing = module.params['cross_az_load_balancing']
|
||||
stickiness = module.params['stickiness']
|
||||
|
||||
|
@ -928,7 +954,8 @@ def main():
|
|||
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
|
||||
purge_zones, security_group_ids, health_check,
|
||||
subnets, purge_subnets, scheme,
|
||||
connection_draining_timeout, cross_az_load_balancing,
|
||||
connection_draining_timeout, idle_timeout,
|
||||
cross_az_load_balancing,
|
||||
access_logs, stickiness,
|
||||
region=region, **aws_connect_params)
|
||||
|
||||
|
@ -939,6 +966,9 @@ def main():
|
|||
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
|
||||
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
|
||||
|
||||
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
|
||||
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
|
||||
|
||||
if state == 'present':
|
||||
elb_man.ensure_ok()
|
||||
elif state == 'absent':
|
||||
|
@ -955,4 +985,5 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -311,7 +311,7 @@ def main():
|
|||
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
state = module.params.get('state')
|
||||
|
|
|
@ -115,8 +115,6 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto.ec2.cloudwatch
|
||||
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
|
||||
|
@ -270,11 +268,11 @@ def main():
|
|||
state = module.params.get('state')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
@ -288,4 +286,5 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -178,7 +178,7 @@ def main():
|
|||
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.autoscale, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg = str(e))
|
||||
|
||||
if state == 'present':
|
||||
|
@ -187,4 +187,5 @@ def main():
|
|||
delete_scaling_policy(connection, module)
|
||||
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -74,7 +74,7 @@ options:
|
|||
- If the volume's most recent snapshot has started less than `last_snapshot_min_age' minutes ago, a new snapshot will not be created.
|
||||
required: false
|
||||
default: 0
|
||||
version_added: "1.9"
|
||||
version_added: "2.0"
|
||||
|
||||
author: "Will Thames (@willthames)"
|
||||
extends_documentation_fragment:
|
||||
|
|
|
@ -47,7 +47,7 @@ options:
|
|||
volume_type:
|
||||
description:
|
||||
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
|
||||
and continues to remain the Ansible default for backwards compatibility.
|
||||
and continues to remain the Ansible default for backwards compatibility.
|
||||
required: false
|
||||
default: standard
|
||||
version_added: "1.9"
|
||||
|
@ -69,7 +69,7 @@ options:
|
|||
default: null
|
||||
zone:
|
||||
description:
|
||||
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
|
||||
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
|
||||
required: false
|
||||
default: null
|
||||
aliases: ['aws_zone', 'ec2_zone']
|
||||
|
@ -87,7 +87,7 @@ options:
|
|||
choices: ["yes", "no"]
|
||||
version_added: "1.5"
|
||||
state:
|
||||
description:
|
||||
description:
|
||||
- whether to ensure the volume is present or absent, or to list existing volumes (The C(list) option was added in version 1.8).
|
||||
required: false
|
||||
default: present
|
||||
|
@ -101,15 +101,15 @@ extends_documentation_fragment:
|
|||
|
||||
EXAMPLES = '''
|
||||
# Simple attachment action
|
||||
- ec2_vol:
|
||||
instance: XXXXXX
|
||||
volume_size: 5
|
||||
- ec2_vol:
|
||||
instance: XXXXXX
|
||||
volume_size: 5
|
||||
device_name: sdd
|
||||
|
||||
# Example using custom iops params
|
||||
# Example using custom iops params
|
||||
- ec2_vol:
|
||||
instance: XXXXXX
|
||||
volume_size: 5
|
||||
instance: XXXXXX
|
||||
volume_size: 5
|
||||
iops: 100
|
||||
device_name: sdd
|
||||
|
||||
|
@ -118,15 +118,15 @@ EXAMPLES = '''
|
|||
instance: XXXXXX
|
||||
snapshot: "{{ snapshot }}"
|
||||
|
||||
# Playbook example combined with instance launch
|
||||
# Playbook example combined with instance launch
|
||||
- ec2:
|
||||
keypair: "{{ keypair }}"
|
||||
image: "{{ image }}"
|
||||
wait: yes
|
||||
wait: yes
|
||||
count: 3
|
||||
register: ec2
|
||||
- ec2_vol:
|
||||
instance: "{{ item.id }} "
|
||||
instance: "{{ item.id }} "
|
||||
volume_size: 5
|
||||
with_items: ec2.instances
|
||||
register: ec2_vol
|
||||
|
@ -223,7 +223,7 @@ def get_volume(module, ec2):
|
|||
return vols[0]
|
||||
|
||||
def get_volumes(module, ec2):
|
||||
|
||||
|
||||
instance = module.params.get('instance')
|
||||
|
||||
try:
|
||||
|
@ -254,12 +254,10 @@ def boto_supports_volume_encryption():
|
|||
"""
|
||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||
|
||||
|
||||
|
||||
def create_volume(module, ec2, zone):
|
||||
changed = False
|
||||
name = module.params.get('name')
|
||||
id = module.params.get('id')
|
||||
instance = module.params.get('instance')
|
||||
iops = module.params.get('iops')
|
||||
encrypted = module.params.get('encrypted')
|
||||
volume_size = module.params.get('volume_size')
|
||||
|
@ -292,16 +290,16 @@ def create_volume(module, ec2, zone):
|
|||
|
||||
|
||||
def attach_volume(module, ec2, volume, instance):
|
||||
|
||||
|
||||
device_name = module.params.get('device_name')
|
||||
changed = False
|
||||
|
||||
|
||||
# If device_name isn't set, make a choice based on best practices here:
|
||||
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
||||
|
||||
|
||||
# In future this needs to be more dynamic but combining block device mapping best practices
|
||||
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
|
||||
|
||||
|
||||
# Use password data attribute to tell whether the instance is Windows or Linux
|
||||
if device_name is None:
|
||||
try:
|
||||
|
@ -311,7 +309,7 @@ def attach_volume(module, ec2, volume, instance):
|
|||
device_name = '/dev/xvdf'
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
|
||||
if volume.attachment_state() is not None:
|
||||
adata = volume.attach_data
|
||||
if adata.instance_id != instance.id:
|
||||
|
@ -330,9 +328,9 @@ def attach_volume(module, ec2, volume, instance):
|
|||
return volume, changed
|
||||
|
||||
def detach_volume(module, ec2, volume):
|
||||
|
||||
|
||||
changed = False
|
||||
|
||||
|
||||
if volume.attachment_state() is not None:
|
||||
adata = volume.attach_data
|
||||
volume.detach()
|
||||
|
@ -340,15 +338,15 @@ def detach_volume(module, ec2, volume):
|
|||
time.sleep(3)
|
||||
volume.update()
|
||||
changed = True
|
||||
|
||||
|
||||
return volume, changed
|
||||
|
||||
|
||||
def get_volume_info(volume, state):
|
||||
|
||||
|
||||
# If we're just listing volumes then do nothing, else get the latest update for the volume
|
||||
if state != 'list':
|
||||
volume.update()
|
||||
|
||||
|
||||
volume_info = {}
|
||||
attachment = volume.attach_data
|
||||
|
||||
|
@ -369,7 +367,7 @@ def get_volume_info(volume, state):
|
|||
},
|
||||
'tags': volume.tags
|
||||
}
|
||||
|
||||
|
||||
return volume_info
|
||||
|
||||
def main():
|
||||
|
@ -381,7 +379,7 @@ def main():
|
|||
volume_size = dict(),
|
||||
volume_type = dict(choices=['standard', 'gp2', 'io1'], default='standard'),
|
||||
iops = dict(),
|
||||
encrypted = dict(),
|
||||
encrypted = dict(type='bool', default=False),
|
||||
device_name = dict(),
|
||||
zone = dict(aliases=['availability_zone', 'aws_zone', 'ec2_zone']),
|
||||
snapshot = dict(),
|
||||
|
@ -397,34 +395,32 @@ def main():
|
|||
name = module.params.get('name')
|
||||
instance = module.params.get('instance')
|
||||
volume_size = module.params.get('volume_size')
|
||||
volume_type = module.params.get('volume_type')
|
||||
iops = module.params.get('iops')
|
||||
encrypted = module.params.get('encrypted')
|
||||
device_name = module.params.get('device_name')
|
||||
zone = module.params.get('zone')
|
||||
snapshot = module.params.get('snapshot')
|
||||
state = module.params.get('state')
|
||||
|
||||
|
||||
# Ensure we have the zone or can get the zone
|
||||
if instance is None and zone is None and state == 'present':
|
||||
module.fail_json(msg="You must specify either instance or zone")
|
||||
|
||||
|
||||
# Set volume detach flag
|
||||
if instance == 'None' or instance == '':
|
||||
instance = None
|
||||
detach_vol_flag = True
|
||||
else:
|
||||
detach_vol_flag = False
|
||||
|
||||
|
||||
# Set changed flag
|
||||
changed = False
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
|
||||
if region:
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
@ -471,11 +467,11 @@ def main():
|
|||
|
||||
if volume_size and (id or snapshot):
|
||||
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
|
||||
|
||||
|
||||
if state == 'present':
|
||||
volume, changed = create_volume(module, ec2, zone)
|
||||
if detach_vol_flag:
|
||||
volume, changed = detach_volume(module, ec2, volume)
|
||||
volume, changed = detach_volume(module, ec2, volume)
|
||||
elif inst is not None:
|
||||
volume, changed = attach_volume(module, ec2, volume, inst)
|
||||
|
||||
|
@ -489,4 +485,5 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -49,19 +49,15 @@ options:
|
|||
- 'A dictionary array of subnets to add of the form: { cidr: ..., az: ... , resource_tags: ... }. Where az is the desired availability zone of the subnet, but it is not required. Tags (i.e.: resource_tags) is also optional and use dictionary form: { "Environment":"Dev", "Tier":"Web", ...}. All VPC subnets not in this list will be removed. As of 1.8, if the subnets parameter is not specified, no existing subnets will be modified.'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
vpc_id:
|
||||
description:
|
||||
- A VPC id to terminate when state=absent
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
resource_tags:
|
||||
description:
|
||||
- 'A dictionary array of resource tags of the form: { tag1: value1, tag2: value2 }. Tags in this list are used in conjunction with CIDR block to uniquely identify a VPC in lieu of vpc_id. Therefore, if CIDR/Tag combination does not exist, a new VPC will be created. VPC tags not on this list will be ignored. Prior to 1.7, specifying a resource tag was optional.'
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
internet_gateway:
|
||||
description:
|
||||
|
@ -69,31 +65,26 @@ options:
|
|||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
aliases: []
|
||||
route_tables:
|
||||
description:
|
||||
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
|
||||
- 'A dictionary array of route tables to add of the form: { subnets: [172.22.2.0/24, 172.22.3.0/24,], routes: [{ dest: 0.0.0.0/0, gw: igw},], resource_tags: ... }. Where the subnets list is those subnets the route table should be associated with, and the routes list is a list of routes to be in the table. The special keyword for the gw of igw specifies that you should the route should go through the internet gateway attached to the VPC. gw also accepts instance-ids, interface-ids, and vpc-peering-connection-ids in addition igw. resource_tags is optional and uses dictionary form: { "Name": "public", ... }. This module is currently unable to affect the "main" route table due to some limitations in boto, so you must explicitly define the associated subnets or they will be attached to the main table implicitly. As of 1.8, if the route_tables parameter is not specified, no existing routes will be modified.'
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
wait:
|
||||
description:
|
||||
- wait for the VPC to be in state 'available' before returning
|
||||
required: false
|
||||
default: "no"
|
||||
choices: [ "yes", "no" ]
|
||||
aliases: []
|
||||
wait_timeout:
|
||||
description:
|
||||
- how long before wait gives up, in seconds
|
||||
default: 300
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Create or terminate the VPC
|
||||
required: true
|
||||
default: present
|
||||
aliases: []
|
||||
choices: [ "present", "absent" ]
|
||||
author: "Carson Gee (@carsongee)"
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
@ -234,25 +225,29 @@ def routes_match(rt_list=None, rt=None, igw=None):
|
|||
|
||||
Returns:
|
||||
True when there provided routes and remote routes are the same.
|
||||
False when provided routes and remote routes are diffrent.
|
||||
False when provided routes and remote routes are different.
|
||||
"""
|
||||
|
||||
local_routes = []
|
||||
remote_routes = []
|
||||
for route in rt_list:
|
||||
route_kwargs = {}
|
||||
route_kwargs = {
|
||||
'gateway_id': None,
|
||||
'instance_id': None,
|
||||
'interface_id': None,
|
||||
'vpc_peering_connection_id': None,
|
||||
'state': 'active'
|
||||
}
|
||||
if route['gw'] == 'igw':
|
||||
route_kwargs['gateway_id'] = igw.id
|
||||
route_kwargs['instance_id'] = None
|
||||
route_kwargs['state'] = 'active'
|
||||
elif route['gw'].startswith('i-'):
|
||||
route_kwargs['instance_id'] = route['gw']
|
||||
route_kwargs['gateway_id'] = None
|
||||
route_kwargs['state'] = 'active'
|
||||
elif route['gw'].startswith('eni-'):
|
||||
route_kwargs['interface_id'] = route['gw']
|
||||
elif route['gw'].startswith('pcx-'):
|
||||
route_kwargs['vpc_peering_connection_id'] = route['gw']
|
||||
else:
|
||||
route_kwargs['gateway_id'] = route['gw']
|
||||
route_kwargs['instance_id'] = None
|
||||
route_kwargs['state'] = 'active'
|
||||
route_kwargs['destination_cidr_block'] = route['dest']
|
||||
local_routes.append(route_kwargs)
|
||||
for j in rt.routes:
|
||||
|
@ -280,7 +275,7 @@ def rtb_changed(route_tables=None, vpc_conn=None, module=None, vpc=None, igw=Non
|
|||
igw : The internet gateway object for this vpc
|
||||
|
||||
Returns:
|
||||
True when there is diffrence beween the provided routes and remote routes and if subnet assosications are diffrent.
|
||||
True when there is difference between the provided routes and remote routes and if subnet associations are different.
|
||||
False when both routes and subnet associations matched.
|
||||
|
||||
"""
|
||||
|
@ -509,6 +504,10 @@ def create_vpc(module, vpc_conn):
|
|||
route_kwargs['gateway_id'] = igw.id
|
||||
elif route['gw'].startswith('i-'):
|
||||
route_kwargs['instance_id'] = route['gw']
|
||||
elif route['gw'].startswith('eni-'):
|
||||
route_kwargs['interface_id'] = route['gw']
|
||||
elif route['gw'].startswith('pcx-'):
|
||||
route_kwargs['vpc_peering_connection_id'] = route['gw']
|
||||
else:
|
||||
route_kwargs['gateway_id'] = route['gw']
|
||||
vpc_conn.create_route(new_rt.id, route['dest'], **route_kwargs)
|
||||
|
@ -652,6 +651,7 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
|
|||
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
|
||||
)
|
||||
changed = True
|
||||
vpc_dict['state'] = "terminated"
|
||||
|
||||
return (changed, vpc_dict, terminated_vpc_id)
|
||||
|
||||
|
|
|
@ -93,9 +93,6 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
import time
|
||||
import sys
|
||||
|
||||
try:
|
||||
import boto
|
||||
import boto.ec2
|
||||
|
@ -136,15 +133,15 @@ def vpc_exists(module, vpc, name, cidr_block, multi):
|
|||
module.fail_json(msg='Currently there are %d VPCs that have the same name and '
|
||||
'CIDR block you specified. If you would like to create '
|
||||
'the VPC anyway please pass True to the multi_ok param.' % len(matching_vpcs))
|
||||
|
||||
|
||||
return matched_vpc
|
||||
|
||||
|
||||
def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
||||
|
||||
|
||||
if tags is None:
|
||||
tags = dict()
|
||||
|
||||
|
||||
tags.update({'Name': name})
|
||||
try:
|
||||
current_tags = dict((t.name, t.value) for t in vpc.get_all_tags(filters={'resource-id': vpc_obj.id}))
|
||||
|
@ -156,10 +153,10 @@ def update_vpc_tags(vpc, module, vpc_obj, tags, name):
|
|||
except Exception, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
|
||||
|
||||
def update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||
|
||||
|
||||
if vpc_obj.dhcp_options_id != dhcp_id:
|
||||
connection.associate_dhcp_options(dhcp_id, vpc_obj.id)
|
||||
return True
|
||||
|
@ -211,48 +208,47 @@ def main():
|
|||
tags=module.params.get('tags')
|
||||
state=module.params.get('state')
|
||||
multi=module.params.get('multi_ok')
|
||||
|
||||
|
||||
changed=False
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
|
||||
if dns_hostnames and not dns_support:
|
||||
module.fail_json('In order to enable DNS Hostnames you must also enable DNS support')
|
||||
|
||||
if state == 'present':
|
||||
|
||||
|
||||
# Check if VPC exists
|
||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||
|
||||
|
||||
if vpc_obj is None:
|
||||
try:
|
||||
vpc_obj = connection.create_vpc(cidr_block, instance_tenancy=tenancy)
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
if dhcp_id is not None:
|
||||
|
||||
if dhcp_id is not None:
|
||||
try:
|
||||
if update_dhcp_opts(connection, module, vpc_obj, dhcp_id):
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
if tags is not None or name is not None:
|
||||
|
||||
if tags is not None or name is not None:
|
||||
try:
|
||||
if update_vpc_tags(connection, module, vpc_obj, tags, name):
|
||||
changed = True
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg=e)
|
||||
|
||||
|
||||
# Note: Boto currently doesn't currently provide an interface to ec2-describe-vpc-attribute
|
||||
# which is needed in order to detect the current status of DNS options. For now we just update
|
||||
|
@ -263,21 +259,21 @@ def main():
|
|||
except BotoServerError, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
|
||||
# get the vpc obj again in case it has changed
|
||||
try:
|
||||
vpc_obj = connection.get_all_vpcs(vpc_obj.id)[0]
|
||||
except BotoServerError, e:
|
||||
e_msg=boto_exception(e)
|
||||
module.fail_json(msg=e_msg)
|
||||
|
||||
|
||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||
|
||||
elif state == 'absent':
|
||||
|
||||
|
||||
# Check if VPC exists
|
||||
vpc_obj = vpc_exists(module, connection, name, cidr_block, multi)
|
||||
|
||||
|
||||
if vpc_obj is not None:
|
||||
try:
|
||||
connection.delete_vpc(vpc_obj.id)
|
||||
|
@ -287,11 +283,12 @@ def main():
|
|||
e_msg = boto_exception(e)
|
||||
module.fail_json(msg="%s. You may want to use the ec2_vpc_subnet, ec2_vpc_igw, "
|
||||
"and/or ec2_vpc_route_table modules to ensure the other components are absent." % e_msg)
|
||||
|
||||
|
||||
module.exit_json(changed=changed, vpc=get_vpc_values(vpc_obj))
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -50,19 +50,20 @@ options:
|
|||
default: cache.m1.small
|
||||
num_nodes:
|
||||
description:
|
||||
- The initial number of cache nodes that the cache cluster will have
|
||||
- The initial number of cache nodes that the cache cluster will have. Required when state=present.
|
||||
required: false
|
||||
cache_port:
|
||||
description:
|
||||
- The port number on which each of the cache nodes will accept connections
|
||||
required: false
|
||||
default: none
|
||||
parameter_group:
|
||||
cache_parameter_group:
|
||||
description:
|
||||
- Specify non-default parameter group names to be associated with cache cluster
|
||||
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used.
|
||||
required: false
|
||||
default: None
|
||||
version_added: "2.0"
|
||||
aliases: [ 'parameter_group' ]
|
||||
cache_subnet_group:
|
||||
description:
|
||||
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
|
||||
|
@ -150,7 +151,7 @@ class ElastiCacheManager(object):
|
|||
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
|
||||
|
||||
def __init__(self, module, name, engine, cache_engine_version, node_type,
|
||||
num_nodes, cache_port, parameter_group, cache_subnet_group,
|
||||
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
|
||||
cache_security_groups, security_group_ids, zone, wait,
|
||||
hard_modify, region, **aws_connect_kwargs):
|
||||
self.module = module
|
||||
|
@ -160,7 +161,7 @@ class ElastiCacheManager(object):
|
|||
self.node_type = node_type
|
||||
self.num_nodes = num_nodes
|
||||
self.cache_port = cache_port
|
||||
self.parameter_group = parameter_group
|
||||
self.cache_parameter_group = cache_parameter_group
|
||||
self.cache_subnet_group = cache_subnet_group
|
||||
self.cache_security_groups = cache_security_groups
|
||||
self.security_group_ids = security_group_ids
|
||||
|
@ -219,7 +220,7 @@ class ElastiCacheManager(object):
|
|||
engine_version=self.cache_engine_version,
|
||||
cache_security_group_names=self.cache_security_groups,
|
||||
security_group_ids=self.security_group_ids,
|
||||
cache_parameter_group_name=self.parameter_group,
|
||||
cache_parameter_group_name=self.cache_parameter_group,
|
||||
cache_subnet_group_name=self.cache_subnet_group,
|
||||
preferred_availability_zone=self.zone,
|
||||
port=self.cache_port)
|
||||
|
@ -295,7 +296,7 @@ class ElastiCacheManager(object):
|
|||
num_cache_nodes=self.num_nodes,
|
||||
cache_node_ids_to_remove=nodes_to_remove,
|
||||
cache_security_group_names=self.cache_security_groups,
|
||||
cache_parameter_group_name=self.parameter_group,
|
||||
cache_parameter_group_name=self.cache_parameter_group,
|
||||
security_group_ids=self.security_group_ids,
|
||||
apply_immediately=True,
|
||||
engine_version=self.cache_engine_version)
|
||||
|
@ -486,7 +487,8 @@ def main():
|
|||
cache_engine_version={'required': False},
|
||||
node_type={'required': False, 'default': 'cache.m1.small'},
|
||||
num_nodes={'required': False, 'default': None, 'type': 'int'},
|
||||
parameter_group={'required': False, 'default': None},
|
||||
# alias for compat with the original PR 1950
|
||||
cache_parameter_group={'required': False, 'default': None, 'aliases': ['parameter_group']},
|
||||
cache_port={'required': False, 'type': 'int'},
|
||||
cache_subnet_group={'required': False, 'default': None},
|
||||
cache_security_groups={'required': False, 'default': [default],
|
||||
|
@ -521,7 +523,7 @@ def main():
|
|||
zone = module.params['zone']
|
||||
wait = module.params['wait']
|
||||
hard_modify = module.params['hard_modify']
|
||||
parameter_group = module.params['parameter_group']
|
||||
cache_parameter_group = module.params['cache_parameter_group']
|
||||
|
||||
if cache_subnet_group and cache_security_groups == [default]:
|
||||
cache_security_groups = []
|
||||
|
@ -540,7 +542,7 @@ def main():
|
|||
elasticache_manager = ElastiCacheManager(module, name, engine,
|
||||
cache_engine_version, node_type,
|
||||
num_nodes, cache_port,
|
||||
parameter_group,
|
||||
cache_parameter_group,
|
||||
cache_subnet_group,
|
||||
cache_security_groups,
|
||||
security_group_ids, zone, wait,
|
||||
|
|
|
@ -192,14 +192,24 @@ def create_user(module, iam, name, pwd, path, key_state, key_count):
|
|||
|
||||
|
||||
def delete_user(module, iam, name):
|
||||
del_meta = ''
|
||||
try:
|
||||
current_keys = [ck['access_key_id'] for ck in
|
||||
iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata]
|
||||
for key in current_keys:
|
||||
iam.delete_access_key(key, name)
|
||||
del_meta = iam.delete_user(name).delete_user_response
|
||||
except boto.exception.BotoServerError, err:
|
||||
error_msg = boto_exception(err)
|
||||
try:
|
||||
login_profile = iam.get_login_profiles(name).get_login_profile_response
|
||||
except boto.exception.BotoServerError, err:
|
||||
error_msg = boto_exception(err)
|
||||
if ('Cannot find Login Profile') in error_msg:
|
||||
|
||||
del_meta = iam.delete_user(name).delete_user_response
|
||||
else:
|
||||
iam.delete_login_profile(name)
|
||||
del_meta = iam.delete_user(name).delete_user_response
|
||||
except Exception as ex:
|
||||
module.fail_json(changed=False, msg="delete failed %s" %ex)
|
||||
if ('must detach all policies first') in error_msg:
|
||||
for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names:
|
||||
iam.delete_user_policy(name, policy)
|
||||
|
@ -213,7 +223,7 @@ def delete_user(module, iam, name):
|
|||
"currently supported by boto. Please detach the polices "
|
||||
"through the console and try again." % name)
|
||||
else:
|
||||
module.fail_json(changed=changed, msg=str(err))
|
||||
module.fail_json(changed=changed, msg=str(error_msg))
|
||||
else:
|
||||
changed = True
|
||||
return del_meta, name, changed
|
||||
|
@ -650,15 +660,20 @@ def main():
|
|||
else:
|
||||
module.exit_json(
|
||||
changed=changed, groups=user_groups, user_name=name, keys=key_list)
|
||||
|
||||
elif state == 'update' and not user_exists:
|
||||
module.fail_json(
|
||||
msg="The user %s does not exit. No update made." % name)
|
||||
|
||||
elif state == 'absent':
|
||||
if name in orig_user_list:
|
||||
set_users_groups(module, iam, name, '')
|
||||
del_meta, name, changed = delete_user(module, iam, name)
|
||||
module.exit_json(
|
||||
deletion_meta=del_meta, deleted_user=name, changed=changed)
|
||||
if user_exists:
|
||||
try:
|
||||
set_users_groups(module, iam, name, '')
|
||||
del_meta, name, changed = delete_user(module, iam, name)
|
||||
module.exit_json(deleted_user=name, changed=changed)
|
||||
|
||||
except Exception as ex:
|
||||
module.fail_json(changed=changed, msg=str(ex))
|
||||
else:
|
||||
module.exit_json(
|
||||
changed=False, msg="User %s is already absent from your AWS IAM users" % name)
|
||||
|
@ -690,9 +705,11 @@ def main():
|
|||
if not new_path and not new_name:
|
||||
module.exit_json(
|
||||
changed=changed, group_name=name, group_path=cur_path)
|
||||
|
||||
elif state == 'update' and not group_exists:
|
||||
module.fail_json(
|
||||
changed=changed, msg="Update Failed. Group %s doesn't seem to exit!" % name)
|
||||
|
||||
elif state == 'absent':
|
||||
if name in orig_group_list:
|
||||
removed_group, changed = delete_group(iam=iam, name=name)
|
||||
|
|
|
@ -64,9 +64,9 @@ extends_documentation_fragment:
|
|||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create and policy with the name of 'Admin' to the group 'administrators'
|
||||
# Create a policy with the name of 'Admin' to the group 'administrators'
|
||||
tasks:
|
||||
- name: Create two new IAM users with API keys
|
||||
- name: Assign a policy called Admin to the administrators group
|
||||
iam_policy:
|
||||
iam_type: group
|
||||
iam_name: administrators
|
||||
|
@ -87,7 +87,7 @@ task:
|
|||
- Luigi
|
||||
register: new_groups
|
||||
|
||||
- name:
|
||||
- name: Apply READ-ONLY policy to new groups that have been recently created
|
||||
iam_policy:
|
||||
iam_type: group
|
||||
iam_name: "{{ item.created_group.group_name }}"
|
||||
|
@ -146,9 +146,7 @@ def user_action(module, iam, name, policy_name, skip, pdoc, state):
|
|||
if urllib.unquote(iam.get_user_policy(name, pol).
|
||||
get_user_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
if policy_match:
|
||||
msg=("The policy document you specified already exists "
|
||||
"under the name %s." % pol)
|
||||
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
|
@ -183,13 +181,19 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
|
|||
current_policies = [cp for cp in iam.list_role_policies(name).
|
||||
list_role_policies_result.
|
||||
policy_names]
|
||||
except boto.exception.BotoServerError as e:
|
||||
if e.error_code == "NoSuchEntity":
|
||||
# Role doesn't exist so it's safe to assume the policy doesn't either
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
try:
|
||||
for pol in current_policies:
|
||||
if urllib.unquote(iam.get_role_policy(name, pol).
|
||||
get_role_policy_result.policy_document) == pdoc:
|
||||
policy_match = True
|
||||
if policy_match:
|
||||
msg=("The policy document you specified already exists "
|
||||
"under the name %s." % pol)
|
||||
|
||||
if state == 'present':
|
||||
# If policy document does not already exist (either it's changed
|
||||
# or the policy is not present) or if we're not skipping dupes then
|
||||
|
@ -297,10 +301,12 @@ def main():
|
|||
pdoc = json.dumps(json.load(json_data))
|
||||
json_data.close()
|
||||
elif module.params.get('policy_json') != None:
|
||||
try:
|
||||
pdoc = json.dumps(module.params.get('policy_json'))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e) + '\n' + module.params.get('policy_json'))
|
||||
# if its a string, assume it is already JSON
|
||||
if not isinstance(pdoc, basestring):
|
||||
try:
|
||||
pdoc = json.dumps(module.params.get('policy_json'))
|
||||
except Exception as e:
|
||||
module.fail_json(msg='Failed to convert the policy into valid JSON: %s' % str(e))
|
||||
else:
|
||||
pdoc=None
|
||||
|
||||
|
|
|
@ -271,6 +271,33 @@ EXAMPLES = '''
|
|||
command: reboot
|
||||
instance_name: database
|
||||
wait: yes
|
||||
|
||||
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
|
||||
# then modify it to add your security group. Also, display the new endpoint.
|
||||
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
|
||||
- local_action:
|
||||
module: rds
|
||||
command: restore
|
||||
snapshot: mypostgres-snapshot
|
||||
instance_name: MyNewInstanceName
|
||||
region: us-west-2
|
||||
zone: us-west-2b
|
||||
subnet: default-vpc-xx441xxx
|
||||
publicly_accessible: yes
|
||||
wait: yes
|
||||
wait_timeout: 600
|
||||
tags:
|
||||
Name: pg1_test_name_tag
|
||||
register: rds
|
||||
|
||||
- local_action:
|
||||
module: rds
|
||||
command: modify
|
||||
instance_name: MyNewInstanceName
|
||||
region: us-west-2
|
||||
vpc_security_groups: sg-xxx945xx
|
||||
|
||||
- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}"
|
||||
|
||||
'''
|
||||
|
||||
|
@ -802,13 +829,17 @@ def promote_db_instance(module, conn):
|
|||
instance_name = module.params.get('instance_name')
|
||||
|
||||
result = conn.get_db_instance(instance_name)
|
||||
if not result:
|
||||
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
|
||||
|
||||
if result.get_data().get('replication_source'):
|
||||
changed = False
|
||||
else:
|
||||
try:
|
||||
result = conn.promote_read_replica(instance_name, **params)
|
||||
changed = True
|
||||
except RDSException, e:
|
||||
module.fail_json(msg=e.message)
|
||||
else:
|
||||
changed = False
|
||||
|
||||
if module.params.get('wait'):
|
||||
resource = await_resource(conn, result, 'available', module)
|
||||
|
|
|
@ -112,7 +112,7 @@ except ImportError:
|
|||
|
||||
# returns a tuple: (whether or not a parameter was changed, the remaining parameters that weren't found in this parameter group)
|
||||
|
||||
class NotModifiableError(StandardError):
|
||||
class NotModifiableError(Exception):
|
||||
def __init__(self, error_message, *args):
|
||||
super(NotModifiableError, self).__init__(error_message, *args)
|
||||
self.error_message = error_message
|
||||
|
@ -175,7 +175,7 @@ def modify_group(group, params, immediate=False):
|
|||
new_params = dict(params)
|
||||
|
||||
for key in new_params.keys():
|
||||
if group.has_key(key):
|
||||
if key in group:
|
||||
param = group[key]
|
||||
new_value = new_params[key]
|
||||
|
||||
|
@ -281,7 +281,6 @@ def main():
|
|||
else:
|
||||
break
|
||||
|
||||
|
||||
except BotoServerError, e:
|
||||
module.fail_json(msg = e.error_message)
|
||||
|
||||
|
@ -297,4 +296,5 @@ def main():
|
|||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -295,7 +295,7 @@ def main():
|
|||
overwrite = dict(required=False, type='bool'),
|
||||
retry_interval = dict(required=False, default=500),
|
||||
private_zone = dict(required=False, type='bool', default=False),
|
||||
identifier = dict(required=False),
|
||||
identifier = dict(required=False, default=None),
|
||||
weight = dict(required=False, type='int'),
|
||||
region = dict(required=False),
|
||||
health_check = dict(required=False),
|
||||
|
@ -388,8 +388,10 @@ def main():
|
|||
# tripping of things like * and @.
|
||||
decoded_name = rset.name.replace(r'\052', '*')
|
||||
decoded_name = decoded_name.replace(r'\100', '@')
|
||||
#Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
|
||||
rset.name = decoded_name
|
||||
|
||||
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
|
||||
if rset.type == type_in and decoded_name.lower() == record_in.lower() and str(rset.identifier) == str(identifier_in):
|
||||
found_record = True
|
||||
record['zone'] = zone_in
|
||||
record['type'] = rset.type
|
||||
|
|
|
@ -146,7 +146,6 @@ options:
|
|||
requirements: [ "boto" ]
|
||||
author:
|
||||
- "Lester Wade (@lwade)"
|
||||
- "Ralph Tice (@ralph-tice)"
|
||||
extends_documentation_fragment: aws
|
||||
'''
|
||||
|
||||
|
|
|
@ -37,6 +37,7 @@ options:
|
|||
api_token:
|
||||
description:
|
||||
- DigitalOcean api token.
|
||||
version_added: "1.9.5"
|
||||
id:
|
||||
description:
|
||||
- Numeric, the droplet id you want to operate on.
|
||||
|
@ -100,8 +101,9 @@ options:
|
|||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
|
||||
- As of Ansible 2.0, Version 2 of the DigitalOcean API is used.
|
||||
- As of Ansible 2.0, the above parameters were changed significantly. If you are running 1.9.x or earlier, please use C(ansible-doc digital_ocean) to view the correct parameters for your version. Dedicated web docs will be available in the near future for the stable branch.
|
||||
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
|
||||
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
|
||||
Upgrade Ansible or, if unable to, try downloading the latest version of this module from github and putting it into a 'library' directory.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- dopy
|
||||
|
|
|
@ -29,12 +29,10 @@ options:
|
|||
- Indicate desired state of the target.
|
||||
default: present
|
||||
choices: ['present', 'absent']
|
||||
client_id:
|
||||
description:
|
||||
- DigitalOcean manager id.
|
||||
api_key:
|
||||
api_token:
|
||||
description:
|
||||
- DigitalOcean api key.
|
||||
- DigitalOcean api token.
|
||||
version_added: "1.9.5"
|
||||
id:
|
||||
description:
|
||||
- Numeric, the droplet id you want to operate on.
|
||||
|
@ -46,8 +44,9 @@ options:
|
|||
- The IP address to point a domain at.
|
||||
|
||||
notes:
|
||||
- Two environment variables can be used, DO_CLIENT_ID and DO_API_KEY.
|
||||
- Version 1 of DigitalOcean API is used.
|
||||
- Two environment variables can be used, DO_API_KEY and DO_API_TOKEN. They both refer to the v2 token.
|
||||
- As of Ansible 1.9.5 and 2.0, Version 2 of the DigitalOcean API is used, this removes C(client_id) and C(api_key) options in favor of C(api_token).
|
||||
- If you are running Ansible 1.9.4 or earlier you might not be able to use the included version of this module as the API version used has been retired.
|
||||
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
|
@ -68,9 +67,9 @@ EXAMPLES = '''
|
|||
- digital_ocean: >
|
||||
state=present
|
||||
name=test_droplet
|
||||
size_id=1
|
||||
region_id=2
|
||||
image_id=3
|
||||
size_id=1gb
|
||||
region_id=sgp1
|
||||
image_id=ubuntu-14-04-x64
|
||||
register: test_droplet
|
||||
|
||||
- digital_ocean_domain: >
|
||||
|
@ -135,8 +134,8 @@ class Domain(JsonfyMixIn):
|
|||
return cls(json)
|
||||
|
||||
@classmethod
|
||||
def setup(cls, client_id, api_key):
|
||||
cls.manager = DoManager(client_id, api_key)
|
||||
def setup(cls, api_token):
|
||||
cls.manager = DoManager(None, api_token, api_version=2)
|
||||
DomainRecord.manager = cls.manager
|
||||
|
||||
@classmethod
|
||||
|
@ -171,16 +170,14 @@ def core(module):
|
|||
return v
|
||||
|
||||
try:
|
||||
# params['client_id'] will be None even if client_id is not passed in
|
||||
client_id = module.params['client_id'] or os.environ['DO_CLIENT_ID']
|
||||
api_key = module.params['api_key'] or os.environ['DO_API_KEY']
|
||||
api_token = module.params['api_token'] or os.environ['DO_API_TOKEN'] or os.environ['DO_API_KEY']
|
||||
except KeyError, e:
|
||||
module.fail_json(msg='Unable to load %s' % e.message)
|
||||
|
||||
changed = True
|
||||
state = module.params['state']
|
||||
|
||||
Domain.setup(client_id, api_key)
|
||||
Domain.setup(api_token)
|
||||
if state in ('present'):
|
||||
domain = Domain.find(id=module.params["id"])
|
||||
|
||||
|
@ -223,8 +220,7 @@ def main():
|
|||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(choices=['present', 'absent'], default='present'),
|
||||
client_id = dict(aliases=['CLIENT_ID'], no_log=True),
|
||||
api_key = dict(aliases=['API_KEY'], no_log=True),
|
||||
api_token = dict(aliases=['API_TOKEN'], no_log=True),
|
||||
name = dict(type='str'),
|
||||
id = dict(aliases=['droplet_id'], type='int'),
|
||||
ip = dict(type='str'),
|
||||
|
|
|
@ -46,6 +46,14 @@ options:
|
|||
default: missing
|
||||
choices: [ "missing", "always" ]
|
||||
version_added: "1.9"
|
||||
entrypoint:
|
||||
description:
|
||||
- Corresponds to ``--entrypoint`` option of ``docker run`` command and
|
||||
``ENTRYPOINT`` directive of Dockerfile.
|
||||
Used to match and launch containers.
|
||||
default: null
|
||||
required: false
|
||||
version_added: "2.1"
|
||||
command:
|
||||
description:
|
||||
- Command used to match and launch containers.
|
||||
|
@ -79,8 +87,11 @@ options:
|
|||
version_added: "1.5"
|
||||
volumes:
|
||||
description:
|
||||
- List of volumes to mount within the container using docker CLI-style
|
||||
- 'syntax: C(/host:/container[:mode]) where "mode" may be "rw" or "ro".'
|
||||
- List of volumes to mount within the container
|
||||
- 'Use docker CLI-style syntax: C(/host:/container[:mode])'
|
||||
- You can specify a read mode for the mount with either C(ro) or C(rw).
|
||||
Starting at version 2.1, SELinux hosts can additionally use C(z) or C(Z)
|
||||
mount options to use a shared or private label for the volume.
|
||||
default: null
|
||||
volumes_from:
|
||||
description:
|
||||
|
@ -92,6 +103,12 @@ options:
|
|||
- 'alias. Use docker CLI-style syntax: C(redis:myredis).'
|
||||
default: null
|
||||
version_added: "1.5"
|
||||
devices:
|
||||
description:
|
||||
- List of host devices to expose to container
|
||||
default: null
|
||||
required: false
|
||||
version_added: "2.1"
|
||||
log_driver:
|
||||
description:
|
||||
- You can specify a different logging driver for the container than for the daemon.
|
||||
|
@ -326,11 +343,25 @@ options:
|
|||
default: false
|
||||
aliases: []
|
||||
version_added: "2.0"
|
||||
labels:
|
||||
description:
|
||||
- Set container labels. Requires docker >= 1.6 and docker-py >= 1.2.0.
|
||||
required: false
|
||||
default: null
|
||||
version_added: "2.1"
|
||||
stop_timeout:
|
||||
description:
|
||||
- How many seconds to wait for the container to stop before killing it.
|
||||
required: false
|
||||
default: 10
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- "Cove Schneider (@cove)"
|
||||
- "Joshua Conner (@joshuaconner)"
|
||||
- "Pavel Antonov (@softzilla)"
|
||||
- "Ash Wilson (@smashwilson)"
|
||||
- "Thomas Steinbach (@ThomasSteinbach)"
|
||||
- "Philippe Jandot (@zfil)"
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "docker-py >= 0.3.0"
|
||||
|
@ -375,6 +406,8 @@ EXAMPLES = '''
|
|||
# stopped and removed, and a new one will be launched in its place.
|
||||
# - link this container to the existing redis container launched above with
|
||||
# an alias.
|
||||
# - grant the container read write permissions for the host's /dev/sda device
|
||||
# through a node named /dev/xvda
|
||||
# - bind TCP port 9000 within the container to port 8080 on all interfaces
|
||||
# on the host.
|
||||
# - bind UDP port 9001 within the container to port 8081 on the host, only
|
||||
|
@ -389,6 +422,8 @@ EXAMPLES = '''
|
|||
pull: always
|
||||
links:
|
||||
- "myredis:aliasedredis"
|
||||
devices:
|
||||
- "/dev/sda:/dev/xvda:rwm"
|
||||
ports:
|
||||
- "8080:9000"
|
||||
- "127.0.0.1:8081:9001/udp"
|
||||
|
@ -591,6 +626,7 @@ class DockerManager(object):
|
|||
# docker-py version is a tuple of ints because we have to compare them
|
||||
# server APIVersion is passed to a docker-py function that takes strings
|
||||
_cap_ver_req = {
|
||||
'devices': ((0, 7, 0), '1.2'),
|
||||
'dns': ((0, 3, 0), '1.10'),
|
||||
'volumes_from': ((0, 3, 0), '1.10'),
|
||||
'restart_policy': ((0, 5, 0), '1.14'),
|
||||
|
@ -603,6 +639,8 @@ class DockerManager(object):
|
|||
'cap_add': ((0, 5, 0), '1.14'),
|
||||
'cap_drop': ((0, 5, 0), '1.14'),
|
||||
'read_only': ((1, 0, 0), '1.17'),
|
||||
'labels': ((1, 2, 0), '1.18'),
|
||||
'stop_timeout': ((0, 5, 0), '1.0'),
|
||||
# Clientside only
|
||||
'insecure_registry': ((0, 5, 0), '0.0')
|
||||
}
|
||||
|
@ -624,14 +662,14 @@ class DockerManager(object):
|
|||
# host mount (e.g. /mnt:/tmp, bind mounts host's /tmp to /mnt in the container)
|
||||
elif 2 <= len(parts) <= 3:
|
||||
# default to read-write
|
||||
ro = False
|
||||
mode = 'rw'
|
||||
# with supplied bind mode
|
||||
if len(parts) == 3:
|
||||
if parts[2] not in ['ro', 'rw']:
|
||||
self.module.fail_json(msg='bind mode needs to either be "ro" or "rw"')
|
||||
if parts[2] not in ["rw", "rw,Z", "rw,z", "z,rw", "Z,rw", "Z", "z", "ro", "ro,Z", "ro,z", "z,ro", "Z,ro"]:
|
||||
self.module.fail_json(msg='invalid bind mode ' + parts[2])
|
||||
else:
|
||||
ro = parts[2] == 'ro'
|
||||
self.binds[parts[0]] = {'bind': parts[1], 'ro': ro }
|
||||
mode = parts[2]
|
||||
self.binds[parts[0]] = {'bind': parts[1], 'mode': mode }
|
||||
else:
|
||||
self.module.fail_json(msg='volumes support 1 to 3 arguments')
|
||||
|
||||
|
@ -827,11 +865,15 @@ class DockerManager(object):
|
|||
}
|
||||
|
||||
optionals = {}
|
||||
for optional_param in ('dns', 'volumes_from', 'restart_policy',
|
||||
'restart_policy_retry', 'pid', 'extra_hosts', 'log_driver',
|
||||
'cap_add', 'cap_drop', 'read_only', 'log_opt'):
|
||||
for optional_param in ('devices', 'dns', 'volumes_from',
|
||||
'restart_policy', 'restart_policy_retry', 'pid', 'extra_hosts',
|
||||
'log_driver', 'cap_add', 'cap_drop', 'read_only', 'log_opt'):
|
||||
optionals[optional_param] = self.module.params.get(optional_param)
|
||||
|
||||
if optionals['devices'] is not None:
|
||||
self.ensure_capability('devices')
|
||||
params['devices'] = optionals['devices']
|
||||
|
||||
if optionals['dns'] is not None:
|
||||
self.ensure_capability('dns')
|
||||
params['dns'] = optionals['dns']
|
||||
|
@ -1049,6 +1091,21 @@ class DockerManager(object):
|
|||
differing.append(container)
|
||||
continue
|
||||
|
||||
# ENTRYPOINT
|
||||
|
||||
expected_entrypoint = self.module.params.get('entrypoint')
|
||||
if expected_entrypoint:
|
||||
expected_entrypoint = shlex.split(expected_entrypoint)
|
||||
actual_entrypoint = container["Config"]["Entrypoint"]
|
||||
|
||||
if actual_entrypoint != expected_entrypoint:
|
||||
self.reload_reasons.append(
|
||||
'entrypoint ({0} => {1})'
|
||||
.format(actual_entrypoint, expected_entrypoint)
|
||||
)
|
||||
differing.append(container)
|
||||
continue
|
||||
|
||||
# COMMAND
|
||||
|
||||
expected_command = self.module.params.get('command')
|
||||
|
@ -1094,7 +1151,7 @@ class DockerManager(object):
|
|||
self.module.fail_json(msg=str(e))
|
||||
|
||||
#For v1.19 API and above use HostConfig, otherwise use Config
|
||||
if api_version >= 1.19:
|
||||
if docker.utils.compare_version('1.19', api_version) >= 0:
|
||||
actual_mem = container['HostConfig']['Memory']
|
||||
else:
|
||||
actual_mem = container['Config']['Memory']
|
||||
|
@ -1129,6 +1186,22 @@ class DockerManager(object):
|
|||
differing.append(container)
|
||||
continue
|
||||
|
||||
# LABELS
|
||||
|
||||
expected_labels = {}
|
||||
for name, value in self.module.params.get('labels').iteritems():
|
||||
expected_labels[name] = str(value)
|
||||
|
||||
actual_labels = {}
|
||||
for container_label in container['Config']['Labels'] or []:
|
||||
name, value = container_label.split('=', 1)
|
||||
actual_labels[name] = value
|
||||
|
||||
if actual_labels != expected_labels:
|
||||
self.reload_reasons.append('labels {0} => {1}'.format(actual_labels, expected_labels))
|
||||
differing.append(container)
|
||||
continue
|
||||
|
||||
# HOSTNAME
|
||||
|
||||
expected_hostname = self.module.params.get('hostname')
|
||||
|
@ -1195,10 +1268,7 @@ class DockerManager(object):
|
|||
for host_path, config in self.binds.iteritems():
|
||||
if isinstance(config, dict):
|
||||
container_path = config['bind']
|
||||
if config['ro']:
|
||||
mode = 'ro'
|
||||
else:
|
||||
mode = 'rw'
|
||||
mode = config['mode']
|
||||
else:
|
||||
container_path = config
|
||||
mode = 'rw'
|
||||
|
@ -1274,6 +1344,24 @@ class DockerManager(object):
|
|||
differing.append(container)
|
||||
continue
|
||||
|
||||
# DEVICES
|
||||
|
||||
expected_devices = set()
|
||||
for device in (self.module.params.get('devices') or []):
|
||||
if len(device.split(':')) == 2:
|
||||
expected_devices.add(device + ":rwm")
|
||||
else:
|
||||
expected_devices.add(device)
|
||||
|
||||
actual_devices = set()
|
||||
for device in (container['HostConfig']['Devices'] or []):
|
||||
actual_devices.add("{PathOnHost}:{PathInContainer}:{CgroupPermissions}".format(**device))
|
||||
|
||||
if actual_devices != expected_devices:
|
||||
self.reload_reasons.append('devices ({0} => {1})'.format(actual_devices, expected_devices))
|
||||
differing.append(container)
|
||||
continue
|
||||
|
||||
# DNS
|
||||
|
||||
expected_dns = set(self.module.params.get('dns') or [])
|
||||
|
@ -1319,9 +1407,12 @@ class DockerManager(object):
|
|||
Return any matching containers that are already present.
|
||||
"""
|
||||
|
||||
entrypoint = self.module.params.get('entrypoint')
|
||||
if entrypoint is not None:
|
||||
entrypoint = shlex.split(entrypoint)
|
||||
command = self.module.params.get('command')
|
||||
if command:
|
||||
command = command.strip()
|
||||
if command is not None:
|
||||
command = shlex.split(command)
|
||||
name = self.module.params.get('name')
|
||||
if name and not name.startswith('/'):
|
||||
name = '/' + name
|
||||
|
@ -1348,15 +1439,16 @@ class DockerManager(object):
|
|||
details = _docker_id_quirk(details)
|
||||
|
||||
running_image = normalize_image(details['Config']['Image'])
|
||||
running_command = container['Command'].strip()
|
||||
|
||||
image_matches = running_image in repo_tags
|
||||
|
||||
# if a container has an entrypoint, `command` will actually equal
|
||||
# '{} {}'.format(entrypoint, command)
|
||||
command_matches = (not command or running_command.endswith(command))
|
||||
command_matches = command == details['Config']['Cmd']
|
||||
entrypoint_matches = (
|
||||
entrypoint == details['Config']['Entrypoint']
|
||||
)
|
||||
|
||||
matches = image_matches and command_matches
|
||||
matches = (image_matches and command_matches and
|
||||
entrypoint_matches)
|
||||
|
||||
if matches:
|
||||
if not details:
|
||||
|
@ -1421,10 +1513,12 @@ class DockerManager(object):
|
|||
api_version = self.client.version()['ApiVersion']
|
||||
|
||||
params = {'image': self.module.params.get('image'),
|
||||
'entrypoint': self.module.params.get('entrypoint'),
|
||||
'command': self.module.params.get('command'),
|
||||
'ports': self.exposed_ports,
|
||||
'volumes': self.volumes,
|
||||
'environment': self.env,
|
||||
'labels': self.module.params.get('labels'),
|
||||
'hostname': self.module.params.get('hostname'),
|
||||
'domainname': self.module.params.get('domainname'),
|
||||
'detach': self.module.params.get('detach'),
|
||||
|
@ -1438,7 +1532,7 @@ class DockerManager(object):
|
|||
params['host_config'] = self.create_host_config()
|
||||
|
||||
#For v1.19 API and above use HostConfig, otherwise use Config
|
||||
if api_version < 1.19:
|
||||
if docker.utils.compare_version('1.19', api_version) < 0:
|
||||
params['mem_limit'] = mem_limit
|
||||
else:
|
||||
params['host_config']['Memory'] = mem_limit
|
||||
|
@ -1483,7 +1577,7 @@ class DockerManager(object):
|
|||
|
||||
def stop_containers(self, containers):
|
||||
for i in containers:
|
||||
self.client.stop(i['Id'])
|
||||
self.client.stop(i['Id'], self.module.params.get('stop_timeout'))
|
||||
self.increment_counter('stopped')
|
||||
|
||||
return [self.client.wait(i['Id']) for i in containers]
|
||||
|
@ -1536,7 +1630,8 @@ def present(manager, containers, count, name):
|
|||
delta = count - len(containers.deployed)
|
||||
|
||||
if delta > 0:
|
||||
containers.notice_changed(manager.create_containers(delta))
|
||||
created = manager.create_containers(delta)
|
||||
containers.notice_changed(manager.get_inspect_containers(created))
|
||||
|
||||
if delta < 0:
|
||||
# If both running and stopped containers exist, remove
|
||||
|
@ -1551,8 +1646,8 @@ def present(manager, containers, count, name):
|
|||
to_remove.append(c)
|
||||
|
||||
manager.stop_containers(to_stop)
|
||||
containers.notice_changed(manager.get_inspect_containers(to_remove))
|
||||
manager.remove_containers(to_remove)
|
||||
containers.notice_changed(to_remove)
|
||||
|
||||
def started(manager, containers, count, name):
|
||||
'''Ensure that exactly `count` matching containers exist and are running.'''
|
||||
|
@ -1568,13 +1663,13 @@ def started(manager, containers, count, name):
|
|||
|
||||
created = manager.create_containers(delta)
|
||||
manager.start_containers(created)
|
||||
containers.notice_changed(created)
|
||||
containers.notice_changed(manager.get_inspect_containers(created))
|
||||
|
||||
if delta < 0:
|
||||
excess = containers.running[0:-delta]
|
||||
containers.notice_changed(manager.get_inspect_containers(excess))
|
||||
manager.stop_containers(excess)
|
||||
manager.remove_containers(excess)
|
||||
containers.notice_changed(excess)
|
||||
|
||||
def reloaded(manager, containers, count, name):
|
||||
'''
|
||||
|
@ -1599,6 +1694,10 @@ def restarted(manager, containers, count, name):
|
|||
|
||||
containers.refresh()
|
||||
|
||||
for container in manager.get_differing_containers():
|
||||
manager.stop_containers([container])
|
||||
manager.remove_containers([container])
|
||||
|
||||
manager.restart_containers(containers.running)
|
||||
started(manager, containers, count, name)
|
||||
|
||||
|
@ -1608,7 +1707,7 @@ def stopped(manager, containers, count, name):
|
|||
containers.refresh()
|
||||
|
||||
manager.stop_containers(containers.running)
|
||||
containers.notice_changed(containers.running)
|
||||
containers.notice_changed(manager.get_inspect_containers(containers.running))
|
||||
|
||||
def killed(manager, containers, count, name):
|
||||
'''Kill any matching containers that are running.'''
|
||||
|
@ -1616,7 +1715,7 @@ def killed(manager, containers, count, name):
|
|||
containers.refresh()
|
||||
|
||||
manager.kill_containers(containers.running)
|
||||
containers.notice_changed(containers.running)
|
||||
containers.notice_changed(manager.get_inspect_containers(containers.running))
|
||||
|
||||
def absent(manager, containers, count, name):
|
||||
'''Stop and remove any matching containers.'''
|
||||
|
@ -1624,8 +1723,8 @@ def absent(manager, containers, count, name):
|
|||
containers.refresh()
|
||||
|
||||
manager.stop_containers(containers.running)
|
||||
containers.notice_changed(manager.get_inspect_containers(containers.deployed))
|
||||
manager.remove_containers(containers.deployed)
|
||||
containers.notice_changed(containers.deployed)
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
|
@ -1633,6 +1732,7 @@ def main():
|
|||
count = dict(default=1),
|
||||
image = dict(required=True),
|
||||
pull = dict(required=False, default='missing', choices=['missing', 'always']),
|
||||
entrypoint = dict(required=False, default=None, type='str'),
|
||||
command = dict(required=False, default=None),
|
||||
expose = dict(required=False, default=None, type='list'),
|
||||
ports = dict(required=False, default=None, type='list'),
|
||||
|
@ -1640,6 +1740,7 @@ def main():
|
|||
volumes = dict(default=None, type='list'),
|
||||
volumes_from = dict(default=None),
|
||||
links = dict(default=None, type='list'),
|
||||
devices = dict(default=None, type='list'),
|
||||
memory_limit = dict(default=0),
|
||||
memory_swap = dict(default=0),
|
||||
docker_url = dict(),
|
||||
|
@ -1679,6 +1780,8 @@ def main():
|
|||
cap_add = dict(default=None, type='list'),
|
||||
cap_drop = dict(default=None, type='list'),
|
||||
read_only = dict(default=None, type='bool'),
|
||||
labels = dict(default={}, type='dict'),
|
||||
stop_timeout = dict(default=10, type='int'),
|
||||
),
|
||||
required_together = (
|
||||
['tls_client_cert', 'tls_client_key'],
|
||||
|
@ -1738,9 +1841,8 @@ def main():
|
|||
module.exit_json(changed=manager.has_changed(),
|
||||
msg=manager.get_summary_message(),
|
||||
summary=manager.counters,
|
||||
containers=containers.changed,
|
||||
reload_reasons=manager.get_reload_reason_message(),
|
||||
ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
|
||||
ansible_facts=_ansible_facts(containers.changed))
|
||||
|
||||
except DockerAPIError as e:
|
||||
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)
|
||||
|
|
|
@ -63,8 +63,43 @@ options:
|
|||
description:
|
||||
- URL of docker host to issue commands to
|
||||
required: false
|
||||
default: unix://var/run/docker.sock
|
||||
default: ${DOCKER_HOST} or unix://var/run/docker.sock
|
||||
aliases: []
|
||||
use_tls:
|
||||
description:
|
||||
- Whether to use tls to connect to the docker server. "no" means not to
|
||||
use tls (and ignore any other tls related parameters). "encrypt" means
|
||||
to use tls to encrypt the connection to the server. "verify" means to
|
||||
also verify that the server's certificate is valid for the server
|
||||
(this both verifies the certificate against the CA and that the
|
||||
certificate was issued for that host. If this is unspecified, tls will
|
||||
only be used if one of the other tls options require it.
|
||||
choices: [ "no", "encrypt", "verify" ]
|
||||
version_added: "2.0"
|
||||
tls_client_cert:
|
||||
description:
|
||||
- Path to the PEM-encoded certificate used to authenticate docker client.
|
||||
If specified tls_client_key must be valid
|
||||
default: ${DOCKER_CERT_PATH}/cert.pem
|
||||
version_added: "2.0"
|
||||
tls_client_key:
|
||||
description:
|
||||
- Path to the PEM-encoded key used to authenticate docker client. If
|
||||
specified tls_client_cert must be valid
|
||||
default: ${DOCKER_CERT_PATH}/key.pem
|
||||
version_added: "2.0"
|
||||
tls_ca_cert:
|
||||
description:
|
||||
- Path to a PEM-encoded certificate authority to secure the Docker connection.
|
||||
This has no effect if use_tls is encrypt.
|
||||
default: ${DOCKER_CERT_PATH}/ca.pem
|
||||
version_added: "2.0"
|
||||
tls_hostname:
|
||||
description:
|
||||
- A hostname to check matches what's supplied in the docker server's
|
||||
certificate. If unspecified, the hostname is taken from the docker_url.
|
||||
default: Taken from docker_url
|
||||
version_added: "2.0"
|
||||
docker_api_version:
|
||||
description:
|
||||
- Remote API version to use. This defaults to the current default as
|
||||
|
@ -118,6 +153,7 @@ Remove image from local docker storage:
|
|||
'''
|
||||
|
||||
import re
|
||||
import os
|
||||
from urlparse import urlparse
|
||||
|
||||
try:
|
||||
|
@ -161,11 +197,90 @@ class DockerImageManager:
|
|||
self.name = self.module.params.get('name')
|
||||
self.tag = self.module.params.get('tag')
|
||||
self.nocache = self.module.params.get('nocache')
|
||||
docker_url = urlparse(module.params.get('docker_url'))
|
||||
|
||||
# Connect to the docker server using any configured host and TLS settings.
|
||||
|
||||
env_host = os.getenv('DOCKER_HOST')
|
||||
env_docker_verify = os.getenv('DOCKER_TLS_VERIFY')
|
||||
env_cert_path = os.getenv('DOCKER_CERT_PATH')
|
||||
env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME')
|
||||
|
||||
docker_url = module.params.get('docker_url')
|
||||
if not docker_url:
|
||||
if env_host:
|
||||
docker_url = env_host
|
||||
else:
|
||||
docker_url = 'unix://var/run/docker.sock'
|
||||
|
||||
docker_api_version = module.params.get('docker_api_version')
|
||||
|
||||
tls_client_cert = module.params.get('tls_client_cert', None)
|
||||
if not tls_client_cert and env_cert_path:
|
||||
tls_client_cert = os.path.join(env_cert_path, 'cert.pem')
|
||||
|
||||
tls_client_key = module.params.get('tls_client_key', None)
|
||||
if not tls_client_key and env_cert_path:
|
||||
tls_client_key = os.path.join(env_cert_path, 'key.pem')
|
||||
|
||||
tls_ca_cert = module.params.get('tls_ca_cert')
|
||||
if not tls_ca_cert and env_cert_path:
|
||||
tls_ca_cert = os.path.join(env_cert_path, 'ca.pem')
|
||||
|
||||
tls_hostname = module.params.get('tls_hostname')
|
||||
if tls_hostname is None:
|
||||
if env_docker_hostname:
|
||||
tls_hostname = env_docker_hostname
|
||||
else:
|
||||
parsed_url = urlparse(docker_url)
|
||||
if ':' in parsed_url.netloc:
|
||||
tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
|
||||
else:
|
||||
tls_hostname = parsed_url
|
||||
if not tls_hostname:
|
||||
tls_hostname = True
|
||||
|
||||
# use_tls can be one of four values:
|
||||
# no: Do not use tls
|
||||
# encrypt: Use tls. We may do client auth. We will not verify the server
|
||||
# verify: Use tls. We may do client auth. We will verify the server
|
||||
# None: Only use tls if the parameters for client auth were specified
|
||||
# or tls_ca_cert (which requests verifying the server with
|
||||
# a specific ca certificate)
|
||||
use_tls = module.params.get('use_tls')
|
||||
if use_tls is None and env_docker_verify is not None:
|
||||
use_tls = 'verify'
|
||||
|
||||
tls_config = None
|
||||
if use_tls != 'no':
|
||||
params = {}
|
||||
|
||||
# Setup client auth
|
||||
if tls_client_cert and tls_client_key:
|
||||
params['client_cert'] = (tls_client_cert, tls_client_key)
|
||||
|
||||
# We're allowed to verify the connection to the server
|
||||
if use_tls == 'verify' or (use_tls is None and tls_ca_cert):
|
||||
if tls_ca_cert:
|
||||
params['ca_cert'] = tls_ca_cert
|
||||
params['verify'] = True
|
||||
params['assert_hostname'] = tls_hostname
|
||||
else:
|
||||
params['verify'] = True
|
||||
params['assert_hostname'] = tls_hostname
|
||||
elif use_tls == 'encrypt':
|
||||
params['verify'] = False
|
||||
|
||||
if params:
|
||||
# See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296
|
||||
docker_url = docker_url.replace('tcp://', 'https://')
|
||||
tls_config = docker.tls.TLSConfig(**params)
|
||||
|
||||
self.client = docker.Client(
|
||||
base_url=docker_url.geturl(),
|
||||
base_url=docker_url,
|
||||
version=module.params.get('docker_api_version'),
|
||||
timeout=module.params.get('timeout'))
|
||||
timeout=module.params.get('timeout'),
|
||||
tls=tls_config)
|
||||
|
||||
self.changed = False
|
||||
self.log = []
|
||||
self.error_msg = None
|
||||
|
@ -244,7 +359,12 @@ def main():
|
|||
tag = dict(required=False, default="latest"),
|
||||
nocache = dict(default=False, type='bool'),
|
||||
state = dict(default='present', choices=['absent', 'present', 'build']),
|
||||
docker_url = dict(default='unix://var/run/docker.sock'),
|
||||
use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']),
|
||||
tls_client_cert = dict(required=False, default=None, type='str'),
|
||||
tls_client_key = dict(required=False, default=None, type='str'),
|
||||
tls_ca_cert = dict(required=False, default=None, type='str'),
|
||||
tls_hostname = dict(required=False, type='str', default=None),
|
||||
docker_url = dict(),
|
||||
docker_api_version = dict(required=False,
|
||||
default=DEFAULT_DOCKER_API_VERSION,
|
||||
type='str'),
|
||||
|
@ -286,6 +406,45 @@ def main():
|
|||
|
||||
module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id)
|
||||
|
||||
except SSLError as e:
|
||||
if get_platform() == "Darwin":
|
||||
# Ensure that the environment variables has been set
|
||||
if "DOCKER_HOST" not in os.environ:
|
||||
environment_error = '''
|
||||
It looks like you have not set your docker environment
|
||||
variables. Please ensure that you have set the requested
|
||||
variables as instructed when running boot2docker up. If
|
||||
they are set in .bash_profile you will need to symlink
|
||||
it to .bashrc.
|
||||
'''
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error)
|
||||
# If the above is true it's likely the hostname does not match
|
||||
else:
|
||||
environment_error = '''
|
||||
You may need to ignore hostname missmatches by setting
|
||||
tls_hostname=boot2docker in your role. If this does not
|
||||
resolve the issue please open an issue at
|
||||
ansible/ansible-modules-core and ping michaeljs1990
|
||||
'''
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error)
|
||||
# General error for non darwin users
|
||||
else:
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e))
|
||||
|
||||
except ConnectionError as e:
|
||||
if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ:
|
||||
# Ensure that the environment variables has been set
|
||||
environment_error = '''
|
||||
It looks like you have not set your docker environment
|
||||
variables. Please ensure that you have set the requested
|
||||
variables as instructed when running boot2docker up. If
|
||||
they are set in .bash_profile you will need to symlink
|
||||
it to .bashrc.
|
||||
'''
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error)
|
||||
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e))
|
||||
|
||||
except DockerAPIError as e:
|
||||
module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation)
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ options:
|
|||
default: "n1-standard-1"
|
||||
metadata:
|
||||
description:
|
||||
- a hash/dictionary of custom data for the instance; '{"key":"value",...}'
|
||||
- a hash/dictionary of custom data for the instance;
|
||||
'{"key":"value", ...}'
|
||||
required: false
|
||||
default: null
|
||||
service_account_email:
|
||||
|
@ -56,10 +57,17 @@ options:
|
|||
service_account_permissions:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- service account permissions (see U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create), --scopes section for detailed information)
|
||||
- service account permissions (see
|
||||
U(https://cloud.google.com/sdk/gcloud/reference/compute/instances/create),
|
||||
--scopes section for detailed information)
|
||||
required: false
|
||||
default: null
|
||||
choices: ["bigquery", "cloud-platform", "compute-ro", "compute-rw", "computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write", "monitoring", "sql", "sql-admin", "storage-full", "storage-ro", "storage-rw", "taskqueue", "userinfo-email"]
|
||||
choices: [
|
||||
"bigquery", "cloud-platform", "compute-ro", "compute-rw",
|
||||
"computeaccounts-ro", "computeaccounts-rw", "datastore", "logging-write",
|
||||
"monitoring", "sql", "sql-admin", "storage-full", "storage-ro",
|
||||
"storage-rw", "taskqueue", "userinfo-email"
|
||||
]
|
||||
pem_file:
|
||||
version_added: "1.5.1"
|
||||
description:
|
||||
|
@ -88,7 +96,10 @@ options:
|
|||
default: "false"
|
||||
disks:
|
||||
description:
|
||||
- a list of persistent disks to attach to the instance; a string value gives the name of the disk; alternatively, a dictionary value can define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry will be the boot disk (which must be READ_WRITE).
|
||||
- a list of persistent disks to attach to the instance; a string value
|
||||
gives the name of the disk; alternatively, a dictionary value can
|
||||
define 'name' and 'mode' ('READ_ONLY' or 'READ_WRITE'). The first entry
|
||||
will be the boot disk (which must be READ_WRITE).
|
||||
required: false
|
||||
default: null
|
||||
version_added: "1.7"
|
||||
|
@ -111,7 +122,8 @@ options:
|
|||
ip_forward:
|
||||
version_added: "1.9"
|
||||
description:
|
||||
- set to true if the instance can forward ip packets (useful for gateways)
|
||||
- set to true if the instance can forward ip packets (useful for
|
||||
gateways)
|
||||
required: false
|
||||
default: "false"
|
||||
external_ip:
|
||||
|
@ -167,7 +179,8 @@ EXAMPLES = '''
|
|||
tasks:
|
||||
- name: Launch instances
|
||||
local_action: gce instance_names={{names}} machine_type={{machine_type}}
|
||||
image={{image}} zone={{zone}} service_account_email={{ service_account_email }}
|
||||
image={{image}} zone={{zone}}
|
||||
service_account_email={{ service_account_email }}
|
||||
pem_file={{ pem_file }} project_id={{ project_id }}
|
||||
register: gce
|
||||
- name: Wait for SSH to come up
|
||||
|
@ -195,10 +208,11 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
try:
|
||||
import libcloud
|
||||
from libcloud.compute.types import Provider
|
||||
from libcloud.compute.providers import get_driver
|
||||
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
|
||||
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
|
||||
ResourceExistsError, ResourceInUseError, ResourceNotFoundError
|
||||
_ = Provider.GCE
|
||||
HAS_LIBCLOUD = True
|
||||
except ImportError:
|
||||
|
@ -239,7 +253,7 @@ def get_instance_info(inst):
|
|||
public_ip = inst.public_ips[0]
|
||||
|
||||
return({
|
||||
'image': not inst.image is None and inst.image.split('/')[-1] or None,
|
||||
'image': inst.image is not None and inst.image.split('/')[-1] or None,
|
||||
'disks': disk_names,
|
||||
'machine_type': inst.size,
|
||||
'metadata': metadata,
|
||||
|
@ -250,7 +264,8 @@ def get_instance_info(inst):
|
|||
'status': ('status' in inst.extra) and inst.extra['status'] or None,
|
||||
'tags': ('tags' in inst.extra) and inst.extra['tags'] or [],
|
||||
'zone': ('zone' in inst.extra) and inst.extra['zone'].name or None,
|
||||
})
|
||||
})
|
||||
|
||||
|
||||
def create_instances(module, gce, instance_names):
|
||||
"""Creates new instances. Attributes other than instance_names are picked
|
||||
|
@ -308,25 +323,31 @@ def create_instances(module, gce, instance_names):
|
|||
# with:
|
||||
# [ {'key': key1, 'value': value1}, {'key': key2, 'value': value2}, ...]
|
||||
if metadata:
|
||||
try:
|
||||
md = literal_eval(str(metadata))
|
||||
if not isinstance(md, dict):
|
||||
raise ValueError('metadata must be a dict')
|
||||
except ValueError, e:
|
||||
module.fail_json(msg='bad metadata: %s' % str(e))
|
||||
except SyntaxError, e:
|
||||
module.fail_json(msg='bad metadata syntax')
|
||||
if isinstance(metadata, dict):
|
||||
md = metadata
|
||||
else:
|
||||
try:
|
||||
md = literal_eval(str(metadata))
|
||||
if not isinstance(md, dict):
|
||||
raise ValueError('metadata must be a dict')
|
||||
except ValueError as e:
|
||||
module.fail_json(msg='bad metadata: %s' % str(e))
|
||||
except SyntaxError as e:
|
||||
module.fail_json(msg='bad metadata syntax')
|
||||
|
||||
if hasattr(libcloud, '__version__') and libcloud.__version__ < '0.15':
|
||||
items = []
|
||||
for k,v in md.items():
|
||||
items.append({"key": k,"value": v})
|
||||
for k, v in md.items():
|
||||
items.append({"key": k, "value": v})
|
||||
metadata = {'items': items}
|
||||
else:
|
||||
metadata = md
|
||||
|
||||
ex_sa_perms = []
|
||||
bad_perms = []
|
||||
if service_account_permissions:
|
||||
for perm in service_account_permissions:
|
||||
if not perm in gce.SA_SCOPES_MAP.keys():
|
||||
if perm not in gce.SA_SCOPES_MAP.keys():
|
||||
bad_perms.append(perm)
|
||||
if len(bad_perms) > 0:
|
||||
module.fail_json(msg='bad permissions: %s' % str(bad_perms))
|
||||
|
@ -339,7 +360,7 @@ def create_instances(module, gce, instance_names):
|
|||
# These variables all have default values but check just in case
|
||||
if not lc_image or not lc_network or not lc_machine_type or not lc_zone:
|
||||
module.fail_json(msg='Missing required create instance variable',
|
||||
changed=False)
|
||||
changed=False)
|
||||
|
||||
for name in instance_names:
|
||||
pd = None
|
||||
|
@ -352,16 +373,19 @@ def create_instances(module, gce, instance_names):
|
|||
pd = gce.ex_get_volume("%s" % name, lc_zone)
|
||||
inst = None
|
||||
try:
|
||||
inst = gce.create_node(name, lc_machine_type, lc_image,
|
||||
location=lc_zone, ex_network=network, ex_tags=tags,
|
||||
ex_metadata=metadata, ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
|
||||
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete, ex_service_accounts=ex_sa_perms)
|
||||
inst = gce.create_node(
|
||||
name, lc_machine_type, lc_image, location=lc_zone,
|
||||
ex_network=network, ex_tags=tags, ex_metadata=metadata,
|
||||
ex_boot_disk=pd, ex_can_ip_forward=ip_forward,
|
||||
external_ip=external_ip, ex_disk_auto_delete=disk_auto_delete,
|
||||
ex_service_accounts=ex_sa_perms
|
||||
)
|
||||
changed = True
|
||||
except ResourceExistsError:
|
||||
inst = gce.ex_get_node(name, lc_zone)
|
||||
except GoogleBaseError, e:
|
||||
module.fail_json(msg='Unexpected error attempting to create ' + \
|
||||
'instance %s, error: %s' % (name, e.value))
|
||||
except GoogleBaseError as e:
|
||||
module.fail_json(msg='Unexpected error attempting to create ' +
|
||||
'instance %s, error: %s' % (name, e.value))
|
||||
|
||||
for i, lc_disk in enumerate(lc_disks):
|
||||
# Check whether the disk is already attached
|
||||
|
@ -417,7 +441,7 @@ def terminate_instances(module, gce, instance_names, zone_name):
|
|||
inst = gce.ex_get_node(name, zone_name)
|
||||
except ResourceNotFoundError:
|
||||
pass
|
||||
except Exception, e:
|
||||
except Exception as e:
|
||||
module.fail_json(msg=unexpected_error_msg(e), changed=False)
|
||||
if inst:
|
||||
gce.destroy_node(inst)
|
||||
|
@ -429,27 +453,27 @@ def terminate_instances(module, gce, instance_names, zone_name):
|
|||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
image = dict(default='debian-7'),
|
||||
instance_names = dict(),
|
||||
machine_type = dict(default='n1-standard-1'),
|
||||
metadata = dict(),
|
||||
name = dict(),
|
||||
network = dict(default='default'),
|
||||
persistent_boot_disk = dict(type='bool', default=False),
|
||||
disks = dict(type='list'),
|
||||
state = dict(choices=['active', 'present', 'absent', 'deleted'],
|
||||
default='present'),
|
||||
tags = dict(type='list'),
|
||||
zone = dict(default='us-central1-a'),
|
||||
service_account_email = dict(),
|
||||
service_account_permissions = dict(type='list'),
|
||||
pem_file = dict(),
|
||||
project_id = dict(),
|
||||
ip_forward = dict(type='bool', default=False),
|
||||
external_ip = dict(choices=['ephemeral', 'none'],
|
||||
default='ephemeral'),
|
||||
disk_auto_delete = dict(type='bool', default=True),
|
||||
argument_spec=dict(
|
||||
image=dict(default='debian-7'),
|
||||
instance_names=dict(),
|
||||
machine_type=dict(default='n1-standard-1'),
|
||||
metadata=dict(),
|
||||
name=dict(),
|
||||
network=dict(default='default'),
|
||||
persistent_boot_disk=dict(type='bool', default=False),
|
||||
disks=dict(type='list'),
|
||||
state=dict(choices=['active', 'present', 'absent', 'deleted'],
|
||||
default='present'),
|
||||
tags=dict(type='list'),
|
||||
zone=dict(default='us-central1-a'),
|
||||
service_account_email=dict(),
|
||||
service_account_permissions=dict(type='list'),
|
||||
pem_file=dict(),
|
||||
project_id=dict(),
|
||||
ip_forward=dict(type='bool', default=False),
|
||||
external_ip=dict(choices=['ephemeral', 'none'],
|
||||
default='ephemeral'),
|
||||
disk_auto_delete=dict(type='bool', default=True),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -482,15 +506,15 @@ def main():
|
|||
inames.append(name)
|
||||
if not inames:
|
||||
module.fail_json(msg='Must specify a "name" or "instance_names"',
|
||||
changed=False)
|
||||
changed=False)
|
||||
if not zone:
|
||||
module.fail_json(msg='Must specify a "zone"', changed=False)
|
||||
|
||||
json_output = {'zone': zone}
|
||||
if state in ['absent', 'deleted']:
|
||||
json_output['state'] = 'absent'
|
||||
(changed, terminated_instance_names) = terminate_instances(module,
|
||||
gce, inames, zone)
|
||||
(changed, terminated_instance_names) = terminate_instances(
|
||||
module, gce, inames, zone)
|
||||
|
||||
# based on what user specified, return the same variable, although
|
||||
# value could be different if an instance could not be destroyed
|
||||
|
@ -501,15 +525,14 @@ def main():
|
|||
|
||||
elif state in ['active', 'present']:
|
||||
json_output['state'] = 'present'
|
||||
(changed, instance_data,instance_name_list) = create_instances(
|
||||
module, gce, inames)
|
||||
(changed, instance_data, instance_name_list) = create_instances(
|
||||
module, gce, inames)
|
||||
json_output['instance_data'] = instance_data
|
||||
if instance_names:
|
||||
json_output['instance_names'] = instance_name_list
|
||||
elif name:
|
||||
json_output['name'] = name
|
||||
|
||||
|
||||
json_output['changed'] = changed
|
||||
module.exit_json(**json_output)
|
||||
|
||||
|
|
|
@ -40,6 +40,7 @@ options:
|
|||
ipv4_range:
|
||||
description:
|
||||
- the IPv4 address range in CIDR notation for the network
|
||||
this parameter is not mandatory when you specified existing network in name parameter, but when you create new network, this parameter is mandatory
|
||||
required: false
|
||||
aliases: ['cidr']
|
||||
fwname:
|
||||
|
@ -212,7 +213,7 @@ def main():
|
|||
# user wants to create a new network that doesn't yet exist
|
||||
if name and not network:
|
||||
if not ipv4_range:
|
||||
module.fail_json(msg="Missing required 'ipv4_range' parameter",
|
||||
module.fail_json(msg="Network '" + name + "' is not found. To create network, 'ipv4_range' parameter is required",
|
||||
changed=False)
|
||||
|
||||
try:
|
||||
|
|
|
@ -90,7 +90,7 @@ options:
|
|||
requirements:
|
||||
- "python >= 2.6"
|
||||
- python-keystoneclient
|
||||
author: "Lorin Hochstein (@lorin)"
|
||||
author: "Ansible Core Team (deprecated)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -164,18 +164,17 @@ def _get_neutron_client(module, kwargs):
|
|||
def _set_tenant_id(module):
|
||||
global _os_tenant_id
|
||||
if not module.params['tenant_name']:
|
||||
tenant_name = module.params['login_tenant_name']
|
||||
_os_tenant_id = _os_keystone.tenant_id
|
||||
else:
|
||||
tenant_name = module.params['tenant_name']
|
||||
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
if not _os_tenant_id:
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
|
||||
|
||||
def _get_net_id(neutron, module):
|
||||
kwargs = {
|
||||
'tenant_id': _os_tenant_id,
|
||||
|
|
|
@ -136,17 +136,16 @@ def _get_neutron_client(module, kwargs):
|
|||
def _set_tenant_id(module):
|
||||
global _os_tenant_id
|
||||
if not module.params['tenant_name']:
|
||||
login_tenant_name = module.params['login_tenant_name']
|
||||
_os_tenant_id = _os_keystone.tenant_id
|
||||
else:
|
||||
login_tenant_name = module.params['tenant_name']
|
||||
tenant_name = module.params['tenant_name']
|
||||
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == login_tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
if not _os_tenant_id:
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
|
||||
def _get_router_id(module, neutron):
|
||||
kwargs = {
|
||||
|
|
|
@ -138,18 +138,17 @@ def _get_neutron_client(module, kwargs):
|
|||
def _set_tenant_id(module):
|
||||
global _os_tenant_id
|
||||
if not module.params['tenant_name']:
|
||||
login_tenant_name = module.params['login_tenant_name']
|
||||
_os_tenant_id = _os_keystone.tenant_id
|
||||
else:
|
||||
login_tenant_name = module.params['tenant_name']
|
||||
tenant_name = module.params['tenant_name']
|
||||
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == login_tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
if not _os_tenant_id:
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
|
||||
|
||||
def _get_router_id(module, neutron):
|
||||
kwargs = {
|
||||
'name': module.params['router_name'],
|
||||
|
|
|
@ -170,16 +170,16 @@ def _get_neutron_client(module, kwargs):
|
|||
def _set_tenant_id(module):
|
||||
global _os_tenant_id
|
||||
if not module.params['tenant_name']:
|
||||
tenant_name = module.params['login_tenant_name']
|
||||
_os_tenant_id = _os_keystone.tenant_id
|
||||
else:
|
||||
tenant_name = module.params['tenant_name']
|
||||
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
for tenant in _os_keystone.tenants.list():
|
||||
if tenant.name == tenant_name:
|
||||
_os_tenant_id = tenant.id
|
||||
break
|
||||
if not _os_tenant_id:
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
module.fail_json(msg = "The tenant id cannot be found, please check the parameters")
|
||||
|
||||
def _get_net_id(neutron, module):
|
||||
kwargs = {
|
||||
|
|
|
@ -122,10 +122,10 @@ def main():
|
|||
argument_spec = openstack_full_argument_spec(
|
||||
server=dict(required=True),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
network=dict(required=False),
|
||||
floating_ip_address=dict(required=False),
|
||||
network=dict(required=False, default=None),
|
||||
floating_ip_address=dict(required=False, default=None),
|
||||
reuse=dict(required=False, type='bool', default=False),
|
||||
fixed_address=dict(required=False),
|
||||
fixed_address=dict(required=False, default=None),
|
||||
wait=dict(required=False, type='bool', default=False),
|
||||
timeout=dict(required=False, type='int', default=60),
|
||||
)
|
||||
|
@ -154,23 +154,12 @@ def main():
|
|||
msg="server {0} not found".format(server_name_or_id))
|
||||
|
||||
if state == 'present':
|
||||
if floating_ip_address is None:
|
||||
if reuse:
|
||||
f_ip = cloud.available_floating_ip(network=network)
|
||||
else:
|
||||
f_ip = cloud.create_floating_ip(network=network)
|
||||
else:
|
||||
f_ip = _get_floating_ip(cloud, floating_ip_address)
|
||||
if f_ip is None:
|
||||
module.fail_json(
|
||||
msg="floating IP {0} not found".format(
|
||||
floating_ip_address))
|
||||
|
||||
cloud.attach_ip_to_server(
|
||||
server_id=server['id'], floating_ip_id=f_ip['id'],
|
||||
server = cloud.add_ips_to_server(
|
||||
server=server, ips=floating_ip_address, reuse=reuse,
|
||||
fixed_address=fixed_address, wait=wait, timeout=timeout)
|
||||
fip_address = cloud.get_server_public_ip(server)
|
||||
# Update the floating IP status
|
||||
f_ip = cloud.get_floating_ip(id=f_ip['id'])
|
||||
f_ip = _get_floating_ip(cloud, fip_address)
|
||||
module.exit_json(changed=True, floating_ip=f_ip)
|
||||
|
||||
elif state == 'absent':
|
||||
|
|
|
@ -56,12 +56,12 @@ options:
|
|||
default: None
|
||||
min_disk:
|
||||
description:
|
||||
- The minimum disk space required to deploy this image
|
||||
- The minimum disk space (in GB) required to boot this image
|
||||
required: false
|
||||
default: None
|
||||
min_ram:
|
||||
description:
|
||||
- The minimum ram required to deploy this image
|
||||
- The minimum ram (in MB) required to boot this image
|
||||
required: false
|
||||
default: None
|
||||
is_public:
|
||||
|
@ -125,8 +125,8 @@ def main():
|
|||
disk_format = dict(default='qcow2', choices=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2', 'vdi', 'iso']),
|
||||
container_format = dict(default='bare', choices=['ami', 'aki', 'ari', 'bare', 'ovf', 'ova']),
|
||||
owner = dict(default=None),
|
||||
min_disk = dict(default=None),
|
||||
min_ram = dict(default=None),
|
||||
min_disk = dict(type='int', default=0),
|
||||
min_ram = dict(type='int', default=0),
|
||||
is_public = dict(default=False),
|
||||
filename = dict(default=None),
|
||||
ramdisk = dict(default=None),
|
||||
|
@ -156,6 +156,8 @@ def main():
|
|||
wait=module.params['wait'],
|
||||
timeout=module.params['timeout'],
|
||||
is_public=module.params['is_public'],
|
||||
min_disk=module.params['min_disk'],
|
||||
min_ram=module.params['min_ram']
|
||||
)
|
||||
changed = True
|
||||
if not module.params['wait']:
|
||||
|
|
|
@ -183,17 +183,11 @@ def _choose_id_value(module):
|
|||
return None
|
||||
|
||||
|
||||
def _is_value_true(value):
|
||||
true_values = [True, 'yes', 'Yes', 'True', 'true']
|
||||
if value in true_values:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _choose_if_password_only(module, patch):
|
||||
if len(patch) is 1:
|
||||
if 'password' in patch[0]['path'] and _is_value_true(
|
||||
module.params['skip_update_of_masked_password']):
|
||||
if 'password' in patch[0]['path'] and module.params['skip_update_of_masked_password']:
|
||||
# Return false to aabort update as the password appears
|
||||
# to be the only element in the patch.
|
||||
return False
|
||||
|
@ -219,7 +213,7 @@ def main():
|
|||
properties=dict(type='dict', default={}),
|
||||
ironic_url=dict(required=False),
|
||||
chassis_uuid=dict(required=False),
|
||||
skip_update_of_masked_password=dict(required=False, choices=BOOLEANS),
|
||||
skip_update_of_masked_password=dict(required=False, type='bool'),
|
||||
state=dict(required=False, default='present')
|
||||
)
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
|
|
|
@ -61,8 +61,7 @@ options:
|
|||
security_groups:
|
||||
description:
|
||||
- Security group(s) ID(s) or name(s) associated with the port (comma
|
||||
separated for multiple security groups - no spaces between comma(s)
|
||||
or YAML list).
|
||||
separated string or YAML list)
|
||||
required: false
|
||||
default: None
|
||||
no_security_groups:
|
||||
|
@ -220,7 +219,7 @@ def _needs_update(module, port, cloud):
|
|||
'device_id']
|
||||
compare_dict = ['allowed_address_pairs',
|
||||
'extra_dhcp_opt']
|
||||
compare_comma_separated_list = ['security_groups']
|
||||
compare_list = ['security_groups']
|
||||
|
||||
for key in compare_simple:
|
||||
if module.params[key] is not None and module.params[key] != port[key]:
|
||||
|
@ -229,7 +228,7 @@ def _needs_update(module, port, cloud):
|
|||
if module.params[key] is not None and cmp(module.params[key],
|
||||
port[key]) != 0:
|
||||
return True
|
||||
for key in compare_comma_separated_list:
|
||||
for key in compare_list:
|
||||
if module.params[key] is not None and (set(module.params[key]) !=
|
||||
set(port[key])):
|
||||
return True
|
||||
|
@ -309,7 +308,7 @@ def main():
|
|||
fixed_ips=dict(default=None),
|
||||
admin_state_up=dict(default=None),
|
||||
mac_address=dict(default=None),
|
||||
security_groups=dict(default=None),
|
||||
security_groups=dict(default=None, type='list'),
|
||||
no_security_groups=dict(default=False, type='bool'),
|
||||
allowed_address_pairs=dict(default=None),
|
||||
extra_dhcp_opt=dict(default=None),
|
||||
|
@ -336,13 +335,11 @@ def main():
|
|||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
if module.params['security_groups']:
|
||||
if type(module.params['security_groups']) == str:
|
||||
module.params['security_groups'] = module.params[
|
||||
'security_groups'].split(',')
|
||||
# translate security_groups to UUID's if names where provided
|
||||
module.params['security_groups'] = map(
|
||||
lambda v: get_security_group_id(module, cloud, v),
|
||||
module.params['security_groups'])
|
||||
module.params['security_groups'] = [
|
||||
get_security_group_id(module, cloud, v)
|
||||
for v in module.params['security_groups']
|
||||
]
|
||||
|
||||
port = None
|
||||
network_id = None
|
||||
|
|
|
@ -335,7 +335,9 @@ def main():
|
|||
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, router=router)
|
||||
module.exit_json(changed=changed,
|
||||
router=router,
|
||||
id=router['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not router:
|
||||
|
|
|
@ -77,7 +77,14 @@ options:
|
|||
security_groups:
|
||||
description:
|
||||
- Names of the security groups to which the instance should be
|
||||
added. This may be a YAML list or a common separated string.
|
||||
added. This may be a YAML list or a comma separated string.
|
||||
required: false
|
||||
default: None
|
||||
network:
|
||||
description:
|
||||
- Name or ID of a network to attach this instance to. A simpler
|
||||
version of the nics parameter, only one of network or nics should
|
||||
be supplied.
|
||||
required: false
|
||||
default: None
|
||||
nics:
|
||||
|
@ -86,7 +93,8 @@ options:
|
|||
be attached. Networks may be referenced by net-id/net-name/port-id
|
||||
or port-name.
|
||||
- 'Also this accepts a string containing a list of (net/port)-(id/name)
|
||||
Eg: nics: "net-id=uuid-1,port-name=myport"'
|
||||
Eg: nics: "net-id=uuid-1,port-name=myport"
|
||||
Only one of network or nics should be supplied.'
|
||||
required: false
|
||||
default: None
|
||||
auto_ip:
|
||||
|
@ -133,15 +141,32 @@ options:
|
|||
- Opaque blob of data which is made available to the instance
|
||||
required: false
|
||||
default: None
|
||||
root_volume:
|
||||
boot_from_volume:
|
||||
description:
|
||||
- Boot instance from a volume
|
||||
- Should the instance boot from a persistent volume created based on
|
||||
the image given. Mututally exclusive with boot_volume.
|
||||
required: false
|
||||
default: false
|
||||
volume_size:
|
||||
description:
|
||||
- The size of the volume to create in GB if booting from volume based
|
||||
on an image.
|
||||
boot_volume:
|
||||
description:
|
||||
- Volume name or id to use as the volume to boot from. Implies
|
||||
boot_from_volume. Mutually exclusive with image and boot_from_volume.
|
||||
required: false
|
||||
default: None
|
||||
aliases: ['root_volume']
|
||||
terminate_volume:
|
||||
description:
|
||||
- If true, delete volume when deleting instance (if booted from volume)
|
||||
default: false
|
||||
volumes:
|
||||
description:
|
||||
- A list of preexisting volumes names or ids to attach to the instance
|
||||
required: false
|
||||
default: []
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
|
@ -280,6 +305,52 @@ EXAMPLES = '''
|
|||
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
|
||||
- net-name: another_network
|
||||
meta: "hostname=test1,group=uge_master"
|
||||
|
||||
# Creates a new instance and attaches to a specific network
|
||||
- os_server:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: vm1
|
||||
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
|
||||
key_name: ansible_key
|
||||
timeout: 200
|
||||
flavor: 4
|
||||
network: another_network
|
||||
|
||||
# Creates a new instance with 4G of RAM on a 75G Ubuntu Trusty volume
|
||||
- name: launch a compute instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
os_server:
|
||||
name: vm1
|
||||
state: present
|
||||
cloud: mordred
|
||||
region_name: ams01
|
||||
image: Ubuntu Server 14.04
|
||||
flavor_ram: 4096
|
||||
boot_from_volume: True
|
||||
volume_size: 75
|
||||
|
||||
# Creates a new instance with 2 volumes attached
|
||||
- name: launch a compute instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
os_server:
|
||||
name: vm1
|
||||
state: present
|
||||
cloud: mordred
|
||||
region_name: ams01
|
||||
image: Ubuntu Server 14.04
|
||||
flavor_ram: 4096
|
||||
volumes:
|
||||
- photos
|
||||
- music
|
||||
'''
|
||||
|
||||
|
||||
|
@ -301,7 +372,14 @@ def _network_args(module, cloud):
|
|||
args = []
|
||||
nics = module.params['nics']
|
||||
|
||||
if type(nics) != list:
|
||||
module.fail_json(msg='The \'nics\' parameter must be a list.')
|
||||
|
||||
for net in _parse_nics(nics):
|
||||
if type(net) != dict:
|
||||
module.fail_json(
|
||||
msg='Each entry in the \'nics\' parameter must be a dict.')
|
||||
|
||||
if net.get('net-id'):
|
||||
args.append(net)
|
||||
elif net.get('net-name'):
|
||||
|
@ -339,7 +417,7 @@ def _create_server(module, cloud):
|
|||
flavor_include = module.params['flavor_include']
|
||||
|
||||
image_id = None
|
||||
if not module.params['root_volume']:
|
||||
if not module.params['boot_volume']:
|
||||
image_id = cloud.get_image_id(
|
||||
module.params['image'], module.params['image_exclude'])
|
||||
|
||||
|
@ -371,7 +449,9 @@ def _create_server(module, cloud):
|
|||
userdata=module.params['userdata'],
|
||||
config_drive=module.params['config_drive'],
|
||||
)
|
||||
for optional_param in ('region_name', 'key_name', 'availability_zone'):
|
||||
for optional_param in (
|
||||
'region_name', 'key_name', 'availability_zone', 'network',
|
||||
'volume_size', 'volumes'):
|
||||
if module.params[optional_param]:
|
||||
bootkwargs[optional_param] = module.params[optional_param]
|
||||
|
||||
|
@ -379,7 +459,8 @@ def _create_server(module, cloud):
|
|||
ip_pool=module.params['floating_ip_pools'],
|
||||
ips=module.params['floating_ips'],
|
||||
auto_ip=module.params['auto_ip'],
|
||||
root_volume=module.params['root_volume'],
|
||||
boot_volume=module.params['boot_volume'],
|
||||
boot_from_volume=module.params['boot_from_volume'],
|
||||
terminate_volume=module.params['terminate_volume'],
|
||||
wait=module.params['wait'], timeout=module.params['timeout'],
|
||||
**bootkwargs
|
||||
|
@ -461,6 +542,7 @@ def main():
|
|||
flavor_include = dict(default=None),
|
||||
key_name = dict(default=None),
|
||||
security_groups = dict(default=['default'], type='list'),
|
||||
network = dict(default=None),
|
||||
nics = dict(default=[], type='list'),
|
||||
meta = dict(default=None),
|
||||
userdata = dict(default=None),
|
||||
|
@ -468,8 +550,11 @@ def main():
|
|||
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
|
||||
floating_ips = dict(default=None),
|
||||
floating_ip_pools = dict(default=None),
|
||||
root_volume = dict(default=None),
|
||||
volume_size = dict(default=False, type='int'),
|
||||
boot_from_volume = dict(default=False, type='bool'),
|
||||
boot_volume = dict(default=None, aliases=['root_volume']),
|
||||
terminate_volume = dict(default=False, type='bool'),
|
||||
volumes = dict(default=[], type='list'),
|
||||
state = dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
module_kwargs = openstack_module_kwargs(
|
||||
|
@ -478,7 +563,12 @@ def main():
|
|||
['auto_ip', 'floating_ip_pools'],
|
||||
['floating_ips', 'floating_ip_pools'],
|
||||
['flavor', 'flavor_ram'],
|
||||
['image', 'root_volume'],
|
||||
['image', 'boot_volume'],
|
||||
['boot_from_volume', 'boot_volume'],
|
||||
['nics', 'network'],
|
||||
],
|
||||
required_if=[
|
||||
('boot_from_volume', True, ['volume_size', 'image']),
|
||||
],
|
||||
)
|
||||
module = AnsibleModule(argument_spec, **module_kwargs)
|
||||
|
@ -488,14 +578,14 @@ def main():
|
|||
|
||||
state = module.params['state']
|
||||
image = module.params['image']
|
||||
root_volume = module.params['root_volume']
|
||||
boot_volume = module.params['boot_volume']
|
||||
flavor = module.params['flavor']
|
||||
flavor_ram = module.params['flavor_ram']
|
||||
|
||||
if state == 'present':
|
||||
if not (image or root_volume):
|
||||
if not (image or boot_volume):
|
||||
module.fail_json(
|
||||
msg="Parameter 'image' or 'root_volume' is required "
|
||||
msg="Parameter 'image' or 'boot_volume' is required "
|
||||
"if state == 'present'"
|
||||
)
|
||||
if not flavor and not flavor_ram:
|
||||
|
|
|
@ -15,6 +15,8 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import fnmatch
|
||||
|
||||
try:
|
||||
import shade
|
||||
from shade import meta
|
||||
|
@ -25,36 +27,47 @@ except ImportError:
|
|||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_server_facts
|
||||
short_description: Retrieve facts about a compute instance
|
||||
short_description: Retrieve facts about one or more compute instances
|
||||
version_added: "2.0"
|
||||
author: "Monty Taylor (@emonty)"
|
||||
description:
|
||||
- Retrieve facts about a server instance from OpenStack.
|
||||
- Retrieve facts about server instances from OpenStack.
|
||||
notes:
|
||||
- Facts are placed in the C(openstack) variable.
|
||||
- This module creates a new top-level C(openstack_servers) fact, which
|
||||
contains a list of servers.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "shade"
|
||||
options:
|
||||
server:
|
||||
description:
|
||||
- Name or ID of the instance
|
||||
required: true
|
||||
- restrict results to servers with names matching
|
||||
this glob expression (e.g., C<web*>).
|
||||
required: false
|
||||
default: None
|
||||
detailed:
|
||||
description:
|
||||
- when true, return additional detail about servers at the expense
|
||||
of additional API calls.
|
||||
required: false
|
||||
default: false
|
||||
extends_documentation_fragment: openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about a previously created server named vm1
|
||||
# Gather facts about all servers named C<web*>:
|
||||
- os_server_facts:
|
||||
cloud: rax-dfw
|
||||
server: vm1
|
||||
- debug: var=openstack
|
||||
server: web*
|
||||
- debug:
|
||||
var: openstack_servers
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
server=dict(required=True),
|
||||
server=dict(required=False),
|
||||
detailed=dict(required=False, type='bool'),
|
||||
)
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
module = AnsibleModule(argument_spec, **module_kwargs)
|
||||
|
@ -64,10 +77,16 @@ def main():
|
|||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
server = cloud.get_server(module.params['server'])
|
||||
hostvars = dict(openstack=meta.get_hostvars_from_server(
|
||||
cloud, server))
|
||||
module.exit_json(changed=False, ansible_facts=hostvars)
|
||||
openstack_servers = cloud.list_servers(
|
||||
detailed=module.params['detailed'])
|
||||
|
||||
if module.params['server']:
|
||||
# filter servers by name
|
||||
pattern = module.params['server']
|
||||
openstack_servers = [server for server in openstack_servers
|
||||
if fnmatch.fnmatch(server['name'], pattern)]
|
||||
module.exit_json(changed=False, ansible_facts=dict(
|
||||
openstack_servers=openstack_servers))
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
@ -77,4 +96,3 @@ from ansible.module_utils.basic import *
|
|||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
try:
|
||||
import shade
|
||||
from shade import meta
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
@ -28,6 +27,7 @@ module: os_user_group
|
|||
short_description: Associate OpenStack Identity users and groups
|
||||
extends_documentation_fragment: openstack
|
||||
version_added: "2.0"
|
||||
author: "Monty Taylor (@emonty)"
|
||||
description:
|
||||
- Add and remove users from groups
|
||||
options:
|
||||
|
@ -51,57 +51,66 @@ requirements:
|
|||
|
||||
EXAMPLES = '''
|
||||
# Add the demo user to the demo group
|
||||
- os_user_group: user=demo group=demo
|
||||
- os_user_group:
|
||||
cloud: mycloud
|
||||
user: demo
|
||||
group: demo
|
||||
'''
|
||||
|
||||
|
||||
def main():
|
||||
def _system_state_change(state, in_group):
|
||||
if state == 'present' and not in_group:
|
||||
return True
|
||||
if state == 'absent' and in_group:
|
||||
return True
|
||||
return False
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
argument_spec = dict(
|
||||
user=dict(required=True),
|
||||
group=dict(required=True),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
))
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
module = AnsibleModule(argument_spec, **module_kwargs)
|
||||
module = AnsibleModule(argument_spec,
|
||||
supports_check_mode=True,
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
user = module.params.pop('user')
|
||||
group = module.params.pop('group')
|
||||
state = module.params.pop('state')
|
||||
user = module.params['user']
|
||||
group = module.params['group']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
cloud = shade.operator_cloud(**module.params)
|
||||
|
||||
in_group = cloud.is_user_in_group(user, group)
|
||||
|
||||
if state == 'present':
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(state, in_group))
|
||||
|
||||
if in_group:
|
||||
changed = False
|
||||
else:
|
||||
cloud.add_user_to_group(
|
||||
user_name_or_id=user, group_name_or_id=group)
|
||||
changed = False
|
||||
if state == 'present':
|
||||
if not in_group:
|
||||
cloud.add_user_to_group(user, group)
|
||||
changed = True
|
||||
|
||||
elif state == 'absent':
|
||||
if in_group:
|
||||
cloud.remove_user_from_group(
|
||||
user_name_or_id=user, group_name_or_id=group)
|
||||
cloud.remove_user_from_group(user, group)
|
||||
changed=True
|
||||
else:
|
||||
changed=False
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message, extra_data=e.extra_data)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
#!/usr/bin/python
|
||||
#!/usr/bin/env python2
|
||||
|
||||
# -*- coding: utf-8 -*-
|
||||
# This file is part of Ansible
|
||||
|
@ -170,11 +170,16 @@ EXAMPLES = '''
|
|||
vcpu.hotadd: yes
|
||||
mem.hotadd: yes
|
||||
notes: This is a test VM
|
||||
folder: MyFolder
|
||||
vm_disk:
|
||||
disk1:
|
||||
size_gb: 10
|
||||
type: thin
|
||||
datastore: storage001
|
||||
# VMs can be put into folders. The value given here is either the full path
|
||||
# to the folder (e.g. production/customerA/lamp) or just the last component
|
||||
# of the path (e.g. lamp):
|
||||
folder: production/customerA/lamp
|
||||
vm_nic:
|
||||
nic1:
|
||||
type: vmxnet3
|
||||
|
@ -241,6 +246,8 @@ EXAMPLES = '''
|
|||
template_src: centosTemplate
|
||||
cluster: MainCluster
|
||||
resource_pool: "/Resources"
|
||||
vm_extra_config:
|
||||
folder: MyFolder
|
||||
|
||||
# Task to gather facts from a vSphere cluster only if the system is a VMWare guest
|
||||
|
||||
|
@ -597,7 +604,7 @@ def vmdisk_id(vm, current_datastore_name):
|
|||
return id_list
|
||||
|
||||
|
||||
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone):
|
||||
def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, module, cluster_name, snapshot_to_clone, power_on_after_clone, vm_extra_config):
|
||||
vmTemplate = vsphere_client.get_vm_by_name(template_src)
|
||||
vmTarget = None
|
||||
|
||||
|
@ -653,7 +660,7 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
|||
elif resource_pool:
|
||||
try:
|
||||
cluster = [k for k,
|
||||
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
|
||||
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
|
||||
except IndexError, e:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Cannot find Cluster named: %s" %
|
||||
|
@ -689,6 +696,10 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
|||
cloneArgs["linked"] = True
|
||||
cloneArgs["snapshot"] = snapshot_to_clone
|
||||
|
||||
if vm_extra_config.get("folder") is not None:
|
||||
# if a folder is specified, clone the VM into it
|
||||
cloneArgs["folder"] = vm_extra_config.get("folder")
|
||||
|
||||
vmTemplate.clone(guest, **cloneArgs)
|
||||
changed = True
|
||||
else:
|
||||
|
@ -701,12 +712,77 @@ def deploy_template(vsphere_client, guest, resource_pool, template_src, esxi, mo
|
|||
msg="Could not clone selected machine: %s" % e
|
||||
)
|
||||
|
||||
# example from https://github.com/kalazzerx/pysphere/blob/master/examples/pysphere_create_disk_and_add_to_vm.py
|
||||
# was used.
|
||||
def update_disks(vsphere_client, vm, module, vm_disk, changes):
|
||||
request = VI.ReconfigVM_TaskRequestMsg()
|
||||
changed = False
|
||||
|
||||
for cnf_disk in vm_disk:
|
||||
disk_id = re.sub("disk", "", cnf_disk)
|
||||
found = False
|
||||
for dev_key in vm._devices:
|
||||
if vm._devices[dev_key]['type'] == 'VirtualDisk':
|
||||
hdd_id = vm._devices[dev_key]['label'].split()[2]
|
||||
if disk_id == hdd_id:
|
||||
found = True
|
||||
continue
|
||||
if not found:
|
||||
it = VI.ReconfigVM_TaskRequestMsg()
|
||||
_this = request.new__this(vm._mor)
|
||||
_this.set_attribute_type(vm._mor.get_attribute_type())
|
||||
request.set_element__this(_this)
|
||||
|
||||
spec = request.new_spec()
|
||||
|
||||
dc = spec.new_deviceChange()
|
||||
dc.Operation = "add"
|
||||
dc.FileOperation = "create"
|
||||
|
||||
hd = VI.ns0.VirtualDisk_Def("hd").pyclass()
|
||||
hd.Key = -100
|
||||
hd.UnitNumber = int(disk_id)
|
||||
hd.CapacityInKB = int(vm_disk[cnf_disk]['size_gb']) * 1024 * 1024
|
||||
hd.ControllerKey = 1000
|
||||
|
||||
# module.fail_json(msg="peos : %s" % vm_disk[cnf_disk])
|
||||
backing = VI.ns0.VirtualDiskFlatVer2BackingInfo_Def("backing").pyclass()
|
||||
backing.FileName = "[%s]" % vm_disk[cnf_disk]['datastore']
|
||||
backing.DiskMode = "persistent"
|
||||
backing.Split = False
|
||||
backing.WriteThrough = False
|
||||
backing.ThinProvisioned = False
|
||||
backing.EagerlyScrub = False
|
||||
hd.Backing = backing
|
||||
|
||||
dc.Device = hd
|
||||
|
||||
spec.DeviceChange = [dc]
|
||||
request.set_element_spec(spec)
|
||||
|
||||
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
|
||||
|
||||
# Wait for the task to finish
|
||||
task = VITask(ret, vsphere_client)
|
||||
status = task.wait_for_state([task.STATE_SUCCESS,
|
||||
task.STATE_ERROR])
|
||||
|
||||
if status == task.STATE_SUCCESS:
|
||||
changed = True
|
||||
changes[cnf_disk] = vm_disk[cnf_disk]
|
||||
elif status == task.STATE_ERROR:
|
||||
module.fail_json(
|
||||
msg="Error reconfiguring vm: %s, [%s]" % (
|
||||
task.get_error_message(),
|
||||
vm_disk[cnf_disk]))
|
||||
return changed, changes
|
||||
|
||||
|
||||
def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, state, force):
|
||||
spec = None
|
||||
changed = False
|
||||
changes = {}
|
||||
request = VI.ReconfigVM_TaskRequestMsg()
|
||||
request = None
|
||||
shutdown = False
|
||||
poweron = vm.is_powered_on()
|
||||
|
||||
|
@ -714,6 +790,10 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
|||
cpuHotAddEnabled = bool(vm.properties.config.cpuHotAddEnabled)
|
||||
cpuHotRemoveEnabled = bool(vm.properties.config.cpuHotRemoveEnabled)
|
||||
|
||||
changed, changes = update_disks(vsphere_client, vm,
|
||||
module, vm_disk, changes)
|
||||
request = VI.ReconfigVM_TaskRequestMsg()
|
||||
|
||||
# Change Memory
|
||||
if 'memory_mb' in vm_hardware:
|
||||
|
||||
|
@ -743,6 +823,9 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
|||
# set the new RAM size
|
||||
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
|
||||
changes['memory'] = vm_hardware['memory_mb']
|
||||
# ===( Reconfigure Network )====#
|
||||
if vm_nic:
|
||||
changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name)
|
||||
|
||||
# ====( Config Memory )====#
|
||||
if 'num_cpus' in vm_hardware:
|
||||
|
@ -814,6 +897,146 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
|
|||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
|
||||
s = vsphere_client
|
||||
nics = {}
|
||||
request = VI.ReconfigVM_TaskRequestMsg()
|
||||
_this = request.new__this(vm._mor)
|
||||
_this.set_attribute_type(vm._mor.get_attribute_type())
|
||||
request.set_element__this(_this)
|
||||
nic_changes = []
|
||||
datacenter = esxi['datacenter']
|
||||
# Datacenter managed object reference
|
||||
dclist = [k for k,
|
||||
v in vsphere_client.get_datacenters().items() if v == datacenter]
|
||||
if dclist:
|
||||
dcmor=dclist[0]
|
||||
else:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
|
||||
dcprops = VIProperty(vsphere_client, dcmor)
|
||||
nfmor = dcprops.networkFolder._obj
|
||||
for k,v in vm_nic.iteritems():
|
||||
nicNum = k[len(k) -1]
|
||||
if vm_nic[k]['network_type'] == 'dvs':
|
||||
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
|
||||
todvs = True
|
||||
elif vm_nic[k]['network_type'] == 'standard':
|
||||
todvs = False
|
||||
# Detect cards that need to be changed and network type (and act accordingly)
|
||||
for dev in vm.properties.config.hardware.device:
|
||||
if dev._type in ["VirtualE1000", "VirtualE1000e",
|
||||
"VirtualPCNet32", "VirtualVmxnet",
|
||||
"VirtualNmxnet2", "VirtualVmxnet3"]:
|
||||
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
|
||||
if devNum == nicNum:
|
||||
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
|
||||
if todvs and fromdvs:
|
||||
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
|
||||
nics[k] = (dev, portgroupKey, 1)
|
||||
elif fromdvs and not todvs:
|
||||
nics[k] = (dev, '', 2)
|
||||
elif not fromdvs and todvs:
|
||||
nics[k] = (dev, portgroupKey, 3)
|
||||
elif not fromdvs and not todvs:
|
||||
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
|
||||
nics[k] = (dev, '', 2)
|
||||
else:
|
||||
pass
|
||||
else:
|
||||
module.exit_json()
|
||||
|
||||
if len(nics) > 0:
|
||||
for nic, obj in nics.iteritems():
|
||||
"""
|
||||
1,2 and 3 are used to mark which action should be taken
|
||||
1 = from a distributed switch to a distributed switch
|
||||
2 = to a standard switch
|
||||
3 = to a distributed switch
|
||||
"""
|
||||
dev = obj[0]
|
||||
pgKey = obj[1]
|
||||
dvsKey = obj[2]
|
||||
if dvsKey == 1:
|
||||
dev.backing.port._obj.set_element_portgroupKey(pgKey)
|
||||
dev.backing.port._obj.set_element_portKey('')
|
||||
if dvsKey == 3:
|
||||
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
|
||||
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
|
||||
"nic_backing_port").pyclass()
|
||||
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
|
||||
nic_backing_port.set_element_portgroupKey(pgKey)
|
||||
nic_backing_port.set_element_portKey('')
|
||||
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
|
||||
"nic_backing").pyclass()
|
||||
nic_backing.set_element_port(nic_backing_port)
|
||||
dev._obj.set_element_backing(nic_backing)
|
||||
if dvsKey == 2:
|
||||
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
|
||||
"nic_backing").pyclass()
|
||||
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
|
||||
dev._obj.set_element_backing(nic_backing)
|
||||
for nic, obj in nics.iteritems():
|
||||
dev = obj[0]
|
||||
spec = request.new_spec()
|
||||
nic_change = spec.new_deviceChange()
|
||||
nic_change.set_element_device(dev._obj)
|
||||
nic_change.set_element_operation("edit")
|
||||
nic_changes.append(nic_change)
|
||||
spec.set_element_deviceChange(nic_changes)
|
||||
request.set_element_spec(spec)
|
||||
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
|
||||
task = VITask(ret, vsphere_client)
|
||||
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
|
||||
if status == task.STATE_SUCCESS:
|
||||
return(True)
|
||||
elif status == task.STATE_ERROR:
|
||||
module.fail_json(msg="Could not change network %s" % task.get_error_message())
|
||||
elif len(nics) == 0:
|
||||
return(False)
|
||||
|
||||
|
||||
def _build_folder_tree(nodes, parent):
|
||||
tree = {}
|
||||
|
||||
for node in nodes:
|
||||
if node['parent'] == parent:
|
||||
tree[node['name']] = dict.copy(node)
|
||||
tree[node['name']]['subfolders'] = _build_folder_tree(nodes, node['id'])
|
||||
del tree[node['name']]['parent']
|
||||
|
||||
return tree
|
||||
|
||||
|
||||
def _find_path_in_tree(tree, path):
|
||||
for name, o in tree.iteritems():
|
||||
if name == path[0]:
|
||||
if len(path) == 1:
|
||||
return o
|
||||
else:
|
||||
return _find_path_in_tree(o['subfolders'], path[1:])
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def _get_folderid_for_path(vsphere_client, datacenter, path):
|
||||
content = vsphere_client._retrieve_properties_traversal(property_names=['name', 'parent'], obj_type=MORTypes.Folder)
|
||||
if not content: return {}
|
||||
|
||||
node_list = [
|
||||
{
|
||||
'id': o.Obj,
|
||||
'name': o.PropSet[0].Val,
|
||||
'parent': (o.PropSet[1].Val if len(o.PropSet) > 1 else None)
|
||||
} for o in content
|
||||
]
|
||||
|
||||
tree = _build_folder_tree(node_list, datacenter)
|
||||
tree = _find_path_in_tree(tree, ['vm'])['subfolders']
|
||||
folder = _find_path_in_tree(tree, path.split('/'))
|
||||
return folder['id'] if folder else None
|
||||
|
||||
|
||||
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
|
||||
|
||||
datacenter = esxi['datacenter']
|
||||
|
@ -834,13 +1057,19 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
|
|||
|
||||
# virtualmachineFolder managed object reference
|
||||
if vm_extra_config.get('folder'):
|
||||
if vm_extra_config['folder'] not in vsphere_client._get_managed_objects(MORTypes.Folder).values():
|
||||
# try to find the folder by its full path, e.g. 'production/customerA/lamp'
|
||||
vmfmor = _get_folderid_for_path(vsphere_client, dcmor, vm_extra_config.get('folder'))
|
||||
|
||||
# try the legacy behaviour of just matching the folder name, so 'lamp' alone matches 'production/customerA/lamp'
|
||||
if vmfmor is None:
|
||||
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
|
||||
if name == vm_extra_config['folder']:
|
||||
vmfmor = mor
|
||||
|
||||
# if neither of strategies worked, bail out
|
||||
if vmfmor is None:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Cannot find folder named: %s" % vm_extra_config['folder'])
|
||||
|
||||
for mor, name in vsphere_client._get_managed_objects(MORTypes.Folder).iteritems():
|
||||
if name == vm_extra_config['folder']:
|
||||
vmfmor = mor
|
||||
else:
|
||||
vmfmor = dcprops.vmFolder._obj
|
||||
|
||||
|
@ -882,7 +1111,7 @@ def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest,
|
|||
if resource_pool:
|
||||
try:
|
||||
cluster = [k for k,
|
||||
v in vsphere_client.get_clusters().items() if v == cluster_name][0]
|
||||
v in vsphere_client.get_clusters().items() if v == cluster_name][0] if cluster_name else None
|
||||
except IndexError, e:
|
||||
vsphere_client.disconnect()
|
||||
module.fail_json(msg="Cannot find Cluster named: %s" %
|
||||
|
@ -1306,7 +1535,7 @@ def main():
|
|||
argument_spec=dict(
|
||||
vcenter_hostname=dict(required=True, type='str'),
|
||||
username=dict(required=True, type='str'),
|
||||
password=dict(required=True, type='str'),
|
||||
password=dict(required=True, type='str', no_log=True),
|
||||
state=dict(
|
||||
required=False,
|
||||
choices=[
|
||||
|
@ -1455,7 +1684,8 @@ def main():
|
|||
module=module,
|
||||
cluster_name=cluster,
|
||||
snapshot_to_clone=snapshot_to_clone,
|
||||
power_on_after_clone=power_on_after_clone
|
||||
power_on_after_clone=power_on_after_clone,
|
||||
vm_extra_config=vm_extra_config
|
||||
)
|
||||
|
||||
if state in ['restarted', 'reconfigured']:
|
||||
|
|
|
@ -47,12 +47,12 @@ options:
|
|||
default: null
|
||||
creates:
|
||||
description:
|
||||
- a filename or glob pattern, when it already exists, this step will B(not) be run.
|
||||
- a filename or (since 2.0) glob pattern, when it already exists, this step will B(not) be run.
|
||||
required: no
|
||||
default: null
|
||||
removes:
|
||||
description:
|
||||
- a filename or glob pattern, when it does not exist, this step will B(not) be run.
|
||||
- a filename or (since 2.0) glob pattern, when it does not exist, this step will B(not) be run.
|
||||
version_added: "0.8"
|
||||
required: no
|
||||
default: null
|
||||
|
@ -140,9 +140,9 @@ def check_command(commandline):
|
|||
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' }
|
||||
commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url', 'wget': 'get_url',
|
||||
'svn': 'subversion', 'service': 'service',
|
||||
'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get',
|
||||
'mount': 'mount', 'rpm': 'yum, dnf or zypper', 'yum': 'yum', 'apt-get': 'apt-get',
|
||||
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
|
||||
'rsync': 'synchronize' }
|
||||
'rsync': 'synchronize', 'dnf': 'dnf', 'zypper': 'zypper' }
|
||||
become = [ 'sudo', 'su', 'pbrun', 'pfexec', 'runas' ]
|
||||
warnings = list()
|
||||
command = os.path.basename(commandline.split()[0])
|
||||
|
|
|
@ -30,36 +30,11 @@ options:
|
|||
name:
|
||||
description:
|
||||
- name of the database to add or remove
|
||||
- name=all May only be provided if I(state) is C(dump) or C(import).
|
||||
- name=all May only be provided if I(state) is C(dump) or C(import).
|
||||
- if name=all Works like --all-databases option for mysqldump (Added in 2.0)
|
||||
required: true
|
||||
default: null
|
||||
aliases: [ db ]
|
||||
login_user:
|
||||
description:
|
||||
- The username used to authenticate with
|
||||
required: false
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with
|
||||
required: false
|
||||
default: null
|
||||
login_host:
|
||||
description:
|
||||
- Host running the database
|
||||
required: false
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- Port of the MySQL server. Requires login_host be defined as other then localhost if login_port is used
|
||||
required: false
|
||||
default: 3306
|
||||
login_unix_socket:
|
||||
description:
|
||||
- The path to a Unix domain socket for local connections
|
||||
required: false
|
||||
default: null
|
||||
state:
|
||||
description:
|
||||
- The database state
|
||||
|
@ -68,7 +43,7 @@ options:
|
|||
choices: [ "present", "absent", "dump", "import" ]
|
||||
collation:
|
||||
description:
|
||||
- Collation mode
|
||||
- Collation mode (sorting). This only applies to new table/databases and does not update existing ones, this is a limitation of MySQL.
|
||||
required: false
|
||||
default: null
|
||||
encoding:
|
||||
|
@ -79,18 +54,10 @@ options:
|
|||
target:
|
||||
description:
|
||||
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
|
||||
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz compressed files are supported.
|
||||
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported.
|
||||
required: false
|
||||
notes:
|
||||
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
|
||||
is as easy as apt-get install python-mysqldb. (See M(apt).) For CentOS/Fedora, this
|
||||
is as easy as yum install MySQL-python. (See M(yum).)
|
||||
- Both I(login_password) and I(login_user) are required when you are
|
||||
passing credentials. If none are present, the module will attempt to read
|
||||
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
|
||||
default login of C(root) with no password.
|
||||
requirements: [ ConfigParser ]
|
||||
author: "Mark Theunissen (@marktheunissen)"
|
||||
author: "Ansible Core Team"
|
||||
extends_documentation_fragment: mysql
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -108,11 +75,11 @@ EXAMPLES = '''
|
|||
- mysql_db: state=import name=all target=/tmp/{{ inventory_hostname }}.sql
|
||||
'''
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import pipes
|
||||
import stat
|
||||
import subprocess
|
||||
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
|
@ -133,9 +100,22 @@ def db_delete(cursor, db):
|
|||
cursor.execute(query)
|
||||
return True
|
||||
|
||||
def db_dump(module, host, user, password, db_name, target, all_databases, port, socket=None):
|
||||
def db_dump(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None):
|
||||
cmd = module.get_bin_path('mysqldump', True)
|
||||
cmd += " --quick --user=%s --password=%s" % (pipes.quote(user), pipes.quote(password))
|
||||
# If defined, mysqldump demands --defaults-extra-file be the first option
|
||||
if config_file:
|
||||
cmd += " --defaults-extra-file=%s" % pipes.quote(config_file)
|
||||
cmd += " --quick"
|
||||
if user is not None:
|
||||
cmd += " --user=%s" % pipes.quote(user)
|
||||
if password is not None:
|
||||
cmd += " --password=%s" % pipes.quote(password)
|
||||
if ssl_cert is not None:
|
||||
cmd += " --ssl-cert=%s" % pipes.quote(ssl_cert)
|
||||
if ssl_key is not None:
|
||||
cmd += " --ssl-key=%s" % pipes.quote(ssl_key)
|
||||
if ssl_cert is not None:
|
||||
cmd += " --ssl-ca=%s" % pipes.quote(ssl_ca)
|
||||
if socket is not None:
|
||||
cmd += " --socket=%s" % pipes.quote(socket)
|
||||
else:
|
||||
|
@ -161,17 +141,26 @@ def db_dump(module, host, user, password, db_name, target, all_databases, port,
|
|||
rc, stdout, stderr = module.run_command(cmd, use_unsafe_shell=True)
|
||||
return rc, stdout, stderr
|
||||
|
||||
def db_import(module, host, user, password, db_name, target, all_databases, port, socket=None):
|
||||
def db_import(module, host, user, password, db_name, target, all_databases, port, config_file, socket=None, ssl_cert=None, ssl_key=None, ssl_ca=None):
|
||||
if not os.path.exists(target):
|
||||
return module.fail_json(msg="target %s does not exist on the host" % target)
|
||||
|
||||
cmd = [module.get_bin_path('mysql', True)]
|
||||
# --defaults-file must go first, or errors out
|
||||
if config_file:
|
||||
cmd.append("--defaults-extra-file=%s" % pipes.quote(config_file))
|
||||
if user:
|
||||
cmd.append("--user=%s" % pipes.quote(user))
|
||||
if password:
|
||||
cmd.append("--password=%s" % pipes.quote(password))
|
||||
if socket is not None:
|
||||
cmd.append("--socket=%s" % pipes.quote(socket))
|
||||
if ssl_cert is not None:
|
||||
cmd.append("--ssl-cert=%s" % pipes.quote(ssl_cert))
|
||||
if ssl_key is not None:
|
||||
cmd.append("--ssl-key=%s" % pipes.quote(ssl_key))
|
||||
if ssl_cert is not None:
|
||||
cmd.append("--ssl-ca=%s" % pipes.quote(ssl_ca))
|
||||
else:
|
||||
cmd.append("--host=%s" % pipes.quote(host))
|
||||
cmd.append("--port=%i" % port)
|
||||
|
@ -215,61 +204,6 @@ def db_create(cursor, db, encoding, collation):
|
|||
res = cursor.execute(query, query_params)
|
||||
return True
|
||||
|
||||
def strip_quotes(s):
|
||||
""" Remove surrounding single or double quotes
|
||||
|
||||
>>> print strip_quotes('hello')
|
||||
hello
|
||||
>>> print strip_quotes('"hello"')
|
||||
hello
|
||||
>>> print strip_quotes("'hello'")
|
||||
hello
|
||||
>>> print strip_quotes("'hello")
|
||||
'hello
|
||||
|
||||
"""
|
||||
single_quote = "'"
|
||||
double_quote = '"'
|
||||
|
||||
if s.startswith(single_quote) and s.endswith(single_quote):
|
||||
s = s.strip(single_quote)
|
||||
elif s.startswith(double_quote) and s.endswith(double_quote):
|
||||
s = s.strip(double_quote)
|
||||
return s
|
||||
|
||||
|
||||
def config_get(config, section, option):
|
||||
""" Calls ConfigParser.get and strips quotes
|
||||
|
||||
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
|
||||
"""
|
||||
return strip_quotes(config.get(section, option))
|
||||
|
||||
|
||||
def load_mycnf():
|
||||
config = ConfigParser.RawConfigParser()
|
||||
mycnf = os.path.expanduser('~/.my.cnf')
|
||||
if not os.path.exists(mycnf):
|
||||
return False
|
||||
try:
|
||||
config.readfp(open(mycnf))
|
||||
except (IOError):
|
||||
return False
|
||||
# We support two forms of passwords in .my.cnf, both pass= and password=,
|
||||
# as these are both supported by MySQL.
|
||||
try:
|
||||
passwd = config_get(config, 'client', 'password')
|
||||
except (ConfigParser.NoOptionError):
|
||||
try:
|
||||
passwd = config_get(config, 'client', 'pass')
|
||||
except (ConfigParser.NoOptionError):
|
||||
return False
|
||||
try:
|
||||
creds = dict(user=config_get(config, 'client', 'user'),passwd=passwd)
|
||||
except (ConfigParser.NoOptionError):
|
||||
return False
|
||||
return creds
|
||||
|
||||
# ===========================================
|
||||
# Module execution.
|
||||
#
|
||||
|
@ -287,6 +221,10 @@ def main():
|
|||
collation=dict(default=""),
|
||||
target=dict(default=None),
|
||||
state=dict(default="present", choices=["absent", "present","dump", "import"]),
|
||||
ssl_cert=dict(default=None),
|
||||
ssl_key=dict(default=None),
|
||||
ssl_ca=dict(default=None),
|
||||
config_file=dict(default="~/.my.cnf"),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -302,64 +240,41 @@ def main():
|
|||
login_port = module.params["login_port"]
|
||||
if login_port < 0 or login_port > 65535:
|
||||
module.fail_json(msg="login_port must be a valid unix port number (0-65535)")
|
||||
ssl_cert = module.params["ssl_cert"]
|
||||
ssl_key = module.params["ssl_key"]
|
||||
ssl_ca = module.params["ssl_ca"]
|
||||
config_file = module.params['config_file']
|
||||
config_file = os.path.expanduser(os.path.expandvars(config_file))
|
||||
login_password = module.params["login_password"]
|
||||
login_user = module.params["login_user"]
|
||||
login_host = module.params["login_host"]
|
||||
|
||||
# make sure the target path is expanded for ~ and $HOME
|
||||
if target is not None:
|
||||
target = os.path.expandvars(os.path.expanduser(target))
|
||||
|
||||
# Either the caller passes both a username and password with which to connect to
|
||||
# mysql, or they pass neither and allow this module to read the credentials from
|
||||
# ~/.my.cnf.
|
||||
login_password = module.params["login_password"]
|
||||
login_user = module.params["login_user"]
|
||||
if login_user is None and login_password is None:
|
||||
mycnf_creds = load_mycnf()
|
||||
if mycnf_creds is False:
|
||||
login_user = "root"
|
||||
login_password = ""
|
||||
else:
|
||||
login_user = mycnf_creds["user"]
|
||||
login_password = mycnf_creds["passwd"]
|
||||
elif login_password is None or login_user is None:
|
||||
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
|
||||
login_host = module.params["login_host"]
|
||||
|
||||
if state in ['dump','import']:
|
||||
if target is None:
|
||||
module.fail_json(msg="with state=%s target is required" % (state))
|
||||
if db == 'all':
|
||||
connect_to_db = 'mysql'
|
||||
db = 'mysql'
|
||||
all_databases = True
|
||||
else:
|
||||
connect_to_db = db
|
||||
all_databases = False
|
||||
else:
|
||||
if db == 'all':
|
||||
module.fail_json(msg="name is not allowed to equal 'all' unless state equals import, or dump.")
|
||||
connect_to_db = ''
|
||||
try:
|
||||
if socket:
|
||||
try:
|
||||
socketmode = os.stat(socket).st_mode
|
||||
if not stat.S_ISSOCK(socketmode):
|
||||
module.fail_json(msg="%s, is not a socket, unable to connect" % socket)
|
||||
except OSError:
|
||||
module.fail_json(msg="%s, does not exist, unable to connect" % socket)
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], unix_socket=socket, user=login_user, passwd=login_password, db=connect_to_db)
|
||||
elif login_port != 3306 and module.params["login_host"] == "localhost":
|
||||
module.fail_json(msg="login_host is required when login_port is defined, login_host cannot be localhost when login_port is defined")
|
||||
else:
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=login_port, user=login_user, passwd=login_password, db=connect_to_db)
|
||||
cursor = db_connection.cursor()
|
||||
cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca)
|
||||
except Exception, e:
|
||||
errno, errstr = e.args
|
||||
if "Unknown database" in str(e):
|
||||
module.fail_json(msg="ERROR: %s %s" % (errno, errstr))
|
||||
if os.path.exists(config_file):
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
|
||||
else:
|
||||
module.fail_json(msg="unable to connect, check login credentials (login_user, and login_password, which can be defined in ~/.my.cnf), check that mysql socket exists and mysql server is running (ERROR: %s %s)" % (errno, errstr))
|
||||
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
|
||||
|
||||
changed = False
|
||||
if not os.path.exists(config_file):
|
||||
config_file = None
|
||||
if db_exists(cursor, db):
|
||||
if state == "absent":
|
||||
try:
|
||||
|
@ -367,19 +282,17 @@ def main():
|
|||
except Exception, e:
|
||||
module.fail_json(msg="error deleting database: " + str(e))
|
||||
elif state == "dump":
|
||||
rc, stdout, stderr = db_dump(module, login_host, login_user,
|
||||
rc, stdout, stderr = db_dump(module, login_host, login_user,
|
||||
login_password, db, target, all_databases,
|
||||
port=login_port,
|
||||
socket=module.params['login_unix_socket'])
|
||||
login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="%s" % stderr)
|
||||
else:
|
||||
module.exit_json(changed=True, db=db, msg=stdout)
|
||||
elif state == "import":
|
||||
rc, stdout, stderr = db_import(module, login_host, login_user,
|
||||
rc, stdout, stderr = db_import(module, login_host, login_user,
|
||||
login_password, db, target, all_databases,
|
||||
port=login_port,
|
||||
socket=module.params['login_unix_socket'])
|
||||
login_port, config_file, socket, ssl_cert, ssl_key, ssl_ca)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="%s" % stderr)
|
||||
else:
|
||||
|
@ -396,5 +309,6 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.database import *
|
||||
from ansible.module_utils.mysql import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -32,40 +32,30 @@ options:
|
|||
required: true
|
||||
password:
|
||||
description:
|
||||
- set the user's password
|
||||
- set the user's password. (Required when adding a user)
|
||||
required: false
|
||||
default: null
|
||||
encrypted:
|
||||
description:
|
||||
- Indicate that the 'password' field is a `mysql_native_password` hash
|
||||
required: false
|
||||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
version_added: "2.0"
|
||||
host:
|
||||
description:
|
||||
- the 'host' part of the MySQL username
|
||||
required: false
|
||||
default: localhost
|
||||
login_user:
|
||||
host_all:
|
||||
description:
|
||||
- The username used to authenticate with
|
||||
- override the host option, making ansible apply changes to
|
||||
all hostnames for a given user. This option cannot be used
|
||||
when creating users
|
||||
required: false
|
||||
default: null
|
||||
login_password:
|
||||
description:
|
||||
- The password used to authenticate with
|
||||
required: false
|
||||
default: null
|
||||
login_host:
|
||||
description:
|
||||
- Host running the database
|
||||
required: false
|
||||
default: localhost
|
||||
login_port:
|
||||
description:
|
||||
- Port of the MySQL server
|
||||
required: false
|
||||
default: 3306
|
||||
version_added: '1.4'
|
||||
login_unix_socket:
|
||||
description:
|
||||
- The path to a Unix domain socket for local connections
|
||||
required: false
|
||||
default: null
|
||||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
version_added: "2.1"
|
||||
priv:
|
||||
description:
|
||||
- "MySQL privileges string in the format: C(db.table:priv1,priv2)"
|
||||
|
@ -100,46 +90,47 @@ options:
|
|||
version_added: "2.0"
|
||||
description:
|
||||
- C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users.
|
||||
config_file:
|
||||
description:
|
||||
- Specify a config file from which user and password are to be read
|
||||
required: false
|
||||
default: '~/.my.cnf'
|
||||
version_added: "2.0"
|
||||
notes:
|
||||
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this
|
||||
is as easy as apt-get install python-mysqldb.
|
||||
- Both C(login_password) and C(login_user) are required when you are
|
||||
passing credentials. If none are present, the module will attempt to read
|
||||
the credentials from C(~/.my.cnf), and finally fall back to using the MySQL
|
||||
default login of 'root' with no password.
|
||||
- "MySQL server installs with default login_user of 'root' and no password. To secure this user
|
||||
as part of an idempotent playbook, you must create at least two tasks: the first must change the root user's password,
|
||||
without providing any login_user/login_password details. The second must drop a ~/.my.cnf file containing
|
||||
the new root credentials. Subsequent runs of the playbook will then succeed by reading the new credentials from
|
||||
the file."
|
||||
- Currently, there is only support for the `mysql_native_password` encryted password hash module.
|
||||
|
||||
requirements: [ "MySQLdb" ]
|
||||
author: "Mark Theunissen (@marktheunissen)"
|
||||
author: "Jonathan Mainguy (@Jmainguy)"
|
||||
extends_documentation_fragment: mysql
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
# Removes anonymous user account for localhost
|
||||
- mysql_user: name='' host=localhost state=absent
|
||||
|
||||
# Removes all anonymous user accounts
|
||||
- mysql_user: name='' host_all=yes state=absent
|
||||
|
||||
# Create database user with name 'bob' and password '12345' with all database privileges
|
||||
- mysql_user: name=bob password=12345 priv=*.*:ALL state=present
|
||||
|
||||
# Create database user with name 'bob' and previously hashed mysql native password '*EE0D72C1085C46C5278932678FBE2C6A782821B4' with all database privileges
|
||||
- mysql_user: name=bob password='*EE0D72C1085C46C5278932678FBE2C6A782821B4' encrypted=yes priv=*.*:ALL state=present
|
||||
|
||||
# Creates database user 'bob' and password '12345' with all database privileges and 'WITH GRANT OPTION'
|
||||
- mysql_user: name=bob password=12345 priv=*.*:ALL,GRANT state=present
|
||||
|
||||
# Modify user Bob to require SSL connections. Note that REQUIRESSL is a special privilege that should only apply to *.* by itself.
|
||||
- mysql_user: name=bob append_privs=true priv=*.*:REQUIRESSL state=present
|
||||
|
||||
# Ensure no user named 'sally' exists, also passing in the auth credentials.
|
||||
# Ensure no user named 'sally'@'localhost' exists, also passing in the auth credentials.
|
||||
- mysql_user: login_user=root login_password=123456 name=sally state=absent
|
||||
|
||||
# Ensure no user named 'sally' exists at all
|
||||
- mysql_user: name=sally host_all=yes state=absent
|
||||
|
||||
# Specify grants composed of more than one word
|
||||
- mysql_user: name=replication password=12345 priv=*.*:"REPLICATION CLIENT" state=present
|
||||
|
||||
# Revoke all privileges for user 'bob' and password '12345'
|
||||
# Revoke all privileges for user 'bob' and password '12345'
|
||||
- mysql_user: name=bob password=12345 priv=*.*:USAGE state=present
|
||||
|
||||
# Example privileges string format
|
||||
|
@ -158,6 +149,7 @@ password=n<_665{vS43y
|
|||
import getpass
|
||||
import tempfile
|
||||
import re
|
||||
import string
|
||||
try:
|
||||
import MySQLdb
|
||||
except ImportError:
|
||||
|
@ -182,95 +174,155 @@ class InvalidPrivsError(Exception):
|
|||
# MySQL module specific support methods.
|
||||
#
|
||||
|
||||
def connect(module, login_user=None, login_password=None, config_file=''):
|
||||
config = {
|
||||
'host': module.params['login_host'],
|
||||
'db': 'mysql'
|
||||
}
|
||||
# User Authentication Management was change in MySQL 5.7
|
||||
# This is a generic check for if the server version is less than version 5.7
|
||||
def server_version_check(cursor):
|
||||
cursor.execute("SELECT VERSION()");
|
||||
result = cursor.fetchone()
|
||||
version_str = result[0]
|
||||
version = version_str.split('.')
|
||||
|
||||
if module.params['login_unix_socket']:
|
||||
config['unix_socket'] = module.params['login_unix_socket']
|
||||
# Currently we have no facility to handle new-style password update on
|
||||
# mariadb and the old-style update continues to work
|
||||
if 'mariadb' in version_str.lower():
|
||||
return True
|
||||
if (int(version[0]) <= 5 and int(version[1]) < 7):
|
||||
return True
|
||||
else:
|
||||
config['port'] = module.params['login_port']
|
||||
return False
|
||||
|
||||
if os.path.exists(config_file):
|
||||
config['read_default_file'] = config_file
|
||||
def user_exists(cursor, user, host, host_all):
|
||||
if host_all:
|
||||
cursor.execute("SELECT count(*) FROM user WHERE user = %s", user)
|
||||
else:
|
||||
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
|
||||
|
||||
# If login_user or login_password are given, they should override the
|
||||
# config file
|
||||
if login_user is not None:
|
||||
config['user'] = login_user
|
||||
if login_password is not None:
|
||||
config['passwd'] = login_password
|
||||
|
||||
db_connection = MySQLdb.connect(**config)
|
||||
return db_connection.cursor()
|
||||
|
||||
def user_exists(cursor, user, host):
|
||||
cursor.execute("SELECT count(*) FROM user WHERE user = %s AND host = %s", (user,host))
|
||||
count = cursor.fetchone()
|
||||
return count[0] > 0
|
||||
|
||||
def user_add(cursor, user, host, password, new_priv):
|
||||
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
|
||||
def user_add(cursor, user, host, host_all, password, encrypted, new_priv):
|
||||
# we cannot create users without a proper hostname
|
||||
if host_all:
|
||||
return False
|
||||
|
||||
if password and encrypted:
|
||||
cursor.execute("CREATE USER %s@%s IDENTIFIED BY PASSWORD %s", (user,host,password))
|
||||
elif password and not encrypted:
|
||||
cursor.execute("CREATE USER %s@%s IDENTIFIED BY %s", (user,host,password))
|
||||
|
||||
if new_priv is not None:
|
||||
for db_table, priv in new_priv.iteritems():
|
||||
privileges_grant(cursor, user,host,db_table,priv)
|
||||
return True
|
||||
|
||||
def user_mod(cursor, user, host, password, new_priv, append_privs):
|
||||
def is_hash(password):
|
||||
ishash = False
|
||||
if len(password) == 41 and password[0] == '*':
|
||||
if frozenset(password[1:]).issubset(string.hexdigits):
|
||||
ishash = True
|
||||
return ishash
|
||||
|
||||
def user_mod(cursor, user, host, host_all, password, encrypted, new_priv, append_privs):
|
||||
changed = False
|
||||
grant_option = False
|
||||
|
||||
# Handle passwords
|
||||
if password is not None:
|
||||
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
|
||||
current_pass_hash = cursor.fetchone()
|
||||
cursor.execute("SELECT PASSWORD(%s)", (password,))
|
||||
new_pass_hash = cursor.fetchone()
|
||||
if current_pass_hash[0] != new_pass_hash[0]:
|
||||
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user,host,password))
|
||||
changed = True
|
||||
if host_all:
|
||||
hostnames = user_get_hostnames(cursor, user)
|
||||
else:
|
||||
hostnames = [host]
|
||||
|
||||
# Handle privileges
|
||||
if new_priv is not None:
|
||||
curr_priv = privileges_get(cursor, user,host)
|
||||
for host in hostnames:
|
||||
# Handle clear text and hashed passwords.
|
||||
if bool(password):
|
||||
# Determine what user management method server uses
|
||||
old_user_mgmt = server_version_check(cursor)
|
||||
|
||||
if old_user_mgmt:
|
||||
cursor.execute("SELECT password FROM user WHERE user = %s AND host = %s", (user,host))
|
||||
else:
|
||||
cursor.execute("SELECT authentication_string FROM user WHERE user = %s AND host = %s", (user,host))
|
||||
current_pass_hash = cursor.fetchone()
|
||||
|
||||
if encrypted:
|
||||
encrypted_string = (password)
|
||||
if is_hash(password):
|
||||
if current_pass_hash[0] != encrypted_string:
|
||||
if old_user_mgmt:
|
||||
cursor.execute("SET PASSWORD FOR %s@%s = %s", (user, host, password))
|
||||
else:
|
||||
cursor.execute("ALTER USER %s@%s IDENTIFIED WITH mysql_native_password AS %s", (user, host, password))
|
||||
changed = True
|
||||
else:
|
||||
module.fail_json(msg="encrypted was specified however it does not appear to be a valid hash expecting: *SHA1(SHA1(your_password))")
|
||||
else:
|
||||
if old_user_mgmt:
|
||||
cursor.execute("SELECT PASSWORD(%s)", (password,))
|
||||
else:
|
||||
cursor.execute("SELECT CONCAT('*', UCASE(SHA1(UNHEX(SHA1(%s)))))", (password,))
|
||||
new_pass_hash = cursor.fetchone()
|
||||
if current_pass_hash[0] != new_pass_hash[0]:
|
||||
if old_user_mgmt:
|
||||
cursor.execute("SET PASSWORD FOR %s@%s = PASSWORD(%s)", (user, host, password))
|
||||
else:
|
||||
cursor.execute("ALTER USER %s@%s IDENTIFIED BY %s", (user, host, password))
|
||||
changed = True
|
||||
|
||||
# Handle privileges
|
||||
if new_priv is not None:
|
||||
curr_priv = privileges_get(cursor, user,host)
|
||||
|
||||
# If the user has privileges on a db.table that doesn't appear at all in
|
||||
# the new specification, then revoke all privileges on it.
|
||||
for db_table, priv in curr_priv.iteritems():
|
||||
# If the user has the GRANT OPTION on a db.table, revoke it first.
|
||||
if "GRANT" in priv:
|
||||
grant_option = True
|
||||
if db_table not in new_priv:
|
||||
if user != "root" and "PROXY" not in priv and not append_privs:
|
||||
privileges_revoke(cursor, user,host,db_table,priv,grant_option)
|
||||
# If the user has privileges on a db.table that doesn't appear at all in
|
||||
# the new specification, then revoke all privileges on it.
|
||||
for db_table, priv in curr_priv.iteritems():
|
||||
# If the user has the GRANT OPTION on a db.table, revoke it first.
|
||||
if "GRANT" in priv:
|
||||
grant_option = True
|
||||
if db_table not in new_priv:
|
||||
if user != "root" and "PROXY" not in priv and not append_privs:
|
||||
privileges_revoke(cursor, user,host,db_table,priv,grant_option)
|
||||
changed = True
|
||||
|
||||
# If the user doesn't currently have any privileges on a db.table, then
|
||||
# we can perform a straight grant operation.
|
||||
for db_table, priv in new_priv.iteritems():
|
||||
if db_table not in curr_priv:
|
||||
privileges_grant(cursor, user,host,db_table,priv)
|
||||
changed = True
|
||||
|
||||
# If the user doesn't currently have any privileges on a db.table, then
|
||||
# we can perform a straight grant operation.
|
||||
for db_table, priv in new_priv.iteritems():
|
||||
if db_table not in curr_priv:
|
||||
privileges_grant(cursor, user,host,db_table,priv)
|
||||
changed = True
|
||||
|
||||
# If the db.table specification exists in both the user's current privileges
|
||||
# and in the new privileges, then we need to see if there's a difference.
|
||||
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
|
||||
for db_table in db_table_intersect:
|
||||
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
|
||||
if (len(priv_diff) > 0):
|
||||
if not append_privs:
|
||||
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
|
||||
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
|
||||
changed = True
|
||||
# If the db.table specification exists in both the user's current privileges
|
||||
# and in the new privileges, then we need to see if there's a difference.
|
||||
db_table_intersect = set(new_priv.keys()) & set(curr_priv.keys())
|
||||
for db_table in db_table_intersect:
|
||||
priv_diff = set(new_priv[db_table]) ^ set(curr_priv[db_table])
|
||||
if (len(priv_diff) > 0):
|
||||
if not append_privs:
|
||||
privileges_revoke(cursor, user,host,db_table,curr_priv[db_table],grant_option)
|
||||
privileges_grant(cursor, user,host,db_table,new_priv[db_table])
|
||||
changed = True
|
||||
|
||||
return changed
|
||||
|
||||
def user_delete(cursor, user, host):
|
||||
cursor.execute("DROP USER %s@%s", (user, host))
|
||||
def user_delete(cursor, user, host, host_all):
|
||||
if host_all:
|
||||
hostnames = user_get_hostnames(cursor, user)
|
||||
|
||||
for hostname in hostnames:
|
||||
cursor.execute("DROP USER %s@%s", (user, hostname))
|
||||
else:
|
||||
cursor.execute("DROP USER %s@%s", (user, host))
|
||||
|
||||
return True
|
||||
|
||||
def user_get_hostnames(cursor, user):
|
||||
cursor.execute("SELECT Host FROM mysql.user WHERE user = %s", user)
|
||||
hostnames_raw = cursor.fetchall()
|
||||
hostnames = []
|
||||
|
||||
for hostname_raw in hostnames_raw:
|
||||
hostnames.append(hostname_raw[0])
|
||||
|
||||
return hostnames
|
||||
|
||||
def privileges_get(cursor, user,host):
|
||||
""" MySQL doesn't have a better method of getting privileges aside from the
|
||||
SHOW GRANTS query syntax, which requires us to then parse the returned string.
|
||||
|
@ -388,26 +440,37 @@ def main():
|
|||
login_unix_socket=dict(default=None),
|
||||
user=dict(required=True, aliases=['name']),
|
||||
password=dict(default=None, no_log=True),
|
||||
encrypted=dict(default=False, type='bool'),
|
||||
host=dict(default="localhost"),
|
||||
host_all=dict(type="bool", default="no"),
|
||||
state=dict(default="present", choices=["absent", "present"]),
|
||||
priv=dict(default=None),
|
||||
append_privs=dict(default=False, type='bool'),
|
||||
check_implicit_admin=dict(default=False, type='bool'),
|
||||
update_password=dict(default="always", choices=["always", "on_create"]),
|
||||
config_file=dict(default="~/.my.cnf"),
|
||||
ssl_cert=dict(default=None),
|
||||
ssl_key=dict(default=None),
|
||||
ssl_ca=dict(default=None),
|
||||
)
|
||||
)
|
||||
login_user = module.params["login_user"]
|
||||
login_password = module.params["login_password"]
|
||||
user = module.params["user"]
|
||||
password = module.params["password"]
|
||||
encrypted = module.boolean(module.params["encrypted"])
|
||||
host = module.params["host"].lower()
|
||||
host_all = module.params["host_all"]
|
||||
state = module.params["state"]
|
||||
priv = module.params["priv"]
|
||||
check_implicit_admin = module.params['check_implicit_admin']
|
||||
config_file = module.params['config_file']
|
||||
append_privs = module.boolean(module.params["append_privs"])
|
||||
update_password = module.params['update_password']
|
||||
ssl_cert = module.params["ssl_cert"]
|
||||
ssl_key = module.params["ssl_key"]
|
||||
ssl_ca = module.params["ssl_ca"]
|
||||
db = 'mysql'
|
||||
|
||||
config_file = os.path.expanduser(os.path.expandvars(config_file))
|
||||
if not mysqldb_found:
|
||||
|
@ -423,35 +486,37 @@ def main():
|
|||
try:
|
||||
if check_implicit_admin:
|
||||
try:
|
||||
cursor = connect(module, 'root', '', config_file)
|
||||
cursor = mysql_connect(module, 'root', '', config_file, ssl_cert, ssl_key, ssl_ca, db)
|
||||
except:
|
||||
pass
|
||||
|
||||
if not cursor:
|
||||
cursor = connect(module, login_user, login_password, config_file)
|
||||
cursor = mysql_connect(module, login_user, login_password, config_file, ssl_cert, ssl_key, ssl_ca, db)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials. Exception message: %s" % e)
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
|
||||
|
||||
if state == "present":
|
||||
if user_exists(cursor, user, host):
|
||||
if user_exists(cursor, user, host, host_all):
|
||||
try:
|
||||
if update_password == 'always':
|
||||
changed = user_mod(cursor, user, host, password, priv, append_privs)
|
||||
changed = user_mod(cursor, user, host, host_all, password, encrypted, priv, append_privs)
|
||||
else:
|
||||
changed = user_mod(cursor, user, host, None, priv, append_privs)
|
||||
changed = user_mod(cursor, user, host, host_all, None, encrypted, priv, append_privs)
|
||||
|
||||
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
if password is None:
|
||||
module.fail_json(msg="password parameter required when adding a user")
|
||||
if host_all:
|
||||
module.fail_json(msg="host_all parameter cannot be used when adding a user")
|
||||
try:
|
||||
changed = user_add(cursor, user, host, password, priv)
|
||||
changed = user_add(cursor, user, host, host_all, password, encrypted, priv)
|
||||
except (SQLParseError, InvalidPrivsError, MySQLdb.Error), e:
|
||||
module.fail_json(msg=str(e))
|
||||
elif state == "absent":
|
||||
if user_exists(cursor, user, host):
|
||||
changed = user_delete(cursor, user, host)
|
||||
if user_exists(cursor, user, host, host_all):
|
||||
changed = user_delete(cursor, user, host, host_all)
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, user=user)
|
||||
|
@ -459,5 +524,6 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.database import *
|
||||
from ansible.module_utils.mysql import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -40,26 +40,7 @@ options:
|
|||
description:
|
||||
- If set, then sets variable value to this
|
||||
required: False
|
||||
login_user:
|
||||
description:
|
||||
- username to connect mysql host, if defined login_password also needed.
|
||||
required: False
|
||||
login_password:
|
||||
description:
|
||||
- password to connect mysql host, if defined login_user also needed.
|
||||
required: False
|
||||
login_host:
|
||||
description:
|
||||
- mysql host to connect
|
||||
required: False
|
||||
login_port:
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- mysql port to connect
|
||||
required: False
|
||||
login_unix_socket:
|
||||
description:
|
||||
- unix socket to connect mysql server
|
||||
extends_documentation_fragment: mysql
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Check for sync_binlog setting
|
||||
|
@ -70,7 +51,6 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
|
||||
import ConfigParser
|
||||
import os
|
||||
import warnings
|
||||
from re import match
|
||||
|
@ -134,66 +114,6 @@ def setvariable(cursor, mysqlvar, value):
|
|||
result = str(e)
|
||||
return result
|
||||
|
||||
|
||||
def strip_quotes(s):
|
||||
""" Remove surrounding single or double quotes
|
||||
|
||||
>>> print strip_quotes('hello')
|
||||
hello
|
||||
>>> print strip_quotes('"hello"')
|
||||
hello
|
||||
>>> print strip_quotes("'hello'")
|
||||
hello
|
||||
>>> print strip_quotes("'hello")
|
||||
'hello
|
||||
|
||||
"""
|
||||
single_quote = "'"
|
||||
double_quote = '"'
|
||||
|
||||
if s.startswith(single_quote) and s.endswith(single_quote):
|
||||
s = s.strip(single_quote)
|
||||
elif s.startswith(double_quote) and s.endswith(double_quote):
|
||||
s = s.strip(double_quote)
|
||||
return s
|
||||
|
||||
|
||||
def config_get(config, section, option):
|
||||
""" Calls ConfigParser.get and strips quotes
|
||||
|
||||
See: http://dev.mysql.com/doc/refman/5.0/en/option-files.html
|
||||
"""
|
||||
return strip_quotes(config.get(section, option))
|
||||
|
||||
|
||||
def load_mycnf():
|
||||
config = ConfigParser.RawConfigParser()
|
||||
mycnf = os.path.expanduser('~/.my.cnf')
|
||||
if not os.path.exists(mycnf):
|
||||
return False
|
||||
try:
|
||||
config.readfp(open(mycnf))
|
||||
except (IOError):
|
||||
return False
|
||||
# We support two forms of passwords in .my.cnf, both pass= and password=,
|
||||
# as these are both supported by MySQL.
|
||||
try:
|
||||
passwd = config_get(config, 'client', 'password')
|
||||
except (ConfigParser.NoOptionError):
|
||||
try:
|
||||
passwd = config_get(config, 'client', 'pass')
|
||||
except (ConfigParser.NoOptionError):
|
||||
return False
|
||||
|
||||
# If .my.cnf doesn't specify a user, default to user login name
|
||||
try:
|
||||
user = config_get(config, 'client', 'user')
|
||||
except (ConfigParser.NoOptionError):
|
||||
user = getpass.getuser()
|
||||
creds = dict(user=user, passwd=passwd)
|
||||
return creds
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
|
@ -203,14 +123,24 @@ def main():
|
|||
login_port=dict(default="3306", type='int'),
|
||||
login_unix_socket=dict(default=None),
|
||||
variable=dict(default=None),
|
||||
value=dict(default=None)
|
||||
|
||||
value=dict(default=None),
|
||||
ssl_cert=dict(default=None),
|
||||
ssl_key=dict(default=None),
|
||||
ssl_ca=dict(default=None),
|
||||
config_file=dict(default="~/.my.cnf")
|
||||
)
|
||||
)
|
||||
user = module.params["login_user"]
|
||||
password = module.params["login_password"]
|
||||
host = module.params["login_host"]
|
||||
port = module.params["login_port"]
|
||||
ssl_cert = module.params["ssl_cert"]
|
||||
ssl_key = module.params["ssl_key"]
|
||||
ssl_ca = module.params["ssl_ca"]
|
||||
config_file = module.params['config_file']
|
||||
config_file = os.path.expanduser(os.path.expandvars(config_file))
|
||||
db = 'mysql'
|
||||
|
||||
mysqlvar = module.params["variable"]
|
||||
value = module.params["value"]
|
||||
if mysqlvar is None:
|
||||
|
@ -222,29 +152,14 @@ def main():
|
|||
else:
|
||||
warnings.filterwarnings('error', category=MySQLdb.Warning)
|
||||
|
||||
# Either the caller passes both a username and password with which to connect to
|
||||
# mysql, or they pass neither and allow this module to read the credentials from
|
||||
# ~/.my.cnf.
|
||||
login_password = module.params["login_password"]
|
||||
login_user = module.params["login_user"]
|
||||
if login_user is None and login_password is None:
|
||||
mycnf_creds = load_mycnf()
|
||||
if mycnf_creds is False:
|
||||
login_user = "root"
|
||||
login_password = ""
|
||||
else:
|
||||
login_user = mycnf_creds["user"]
|
||||
login_password = mycnf_creds["passwd"]
|
||||
elif login_password is None or login_user is None:
|
||||
module.fail_json(msg="when supplying login arguments, both login_user and login_password must be provided")
|
||||
try:
|
||||
if module.params["login_unix_socket"]:
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], unix_socket=module.params["login_unix_socket"], user=login_user, passwd=login_password, db="mysql")
|
||||
else:
|
||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
|
||||
cursor = db_connection.cursor()
|
||||
cursor = mysql_connect(module, user, password, config_file, ssl_cert, ssl_key, ssl_ca, db)
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
|
||||
if os.path.exists(config_file):
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or %s has the credentials. Exception message: %s" % (config_file, e))
|
||||
else:
|
||||
module.fail_json(msg="unable to find %s. Exception message: %s" % (config_file, e))
|
||||
|
||||
mysqlvar_val = getvariable(cursor, mysqlvar)
|
||||
if mysqlvar_val is None:
|
||||
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
|
||||
|
@ -268,4 +183,5 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.database import *
|
||||
from ansible.module_utils.mysql import *
|
||||
main()
|
||||
|
|
|
@ -95,7 +95,7 @@ notes:
|
|||
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
|
||||
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
|
||||
requirements: [ psycopg2 ]
|
||||
author: "Lorin Hochstein (@lorin)"
|
||||
author: "Ansible Core Team"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -137,7 +137,7 @@ notes:
|
|||
to all users. You may not specify password or role_attr_flags when the
|
||||
PUBLIC user is specified.
|
||||
requirements: [ psycopg2 ]
|
||||
author: "Lorin Hochstein (@lorin)"
|
||||
author: "Ansible Core Team"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
25
files/acl.py
25
files/acl.py
|
@ -127,10 +127,17 @@ def split_entry(entry):
|
|||
''' splits entry and ensures normalized return'''
|
||||
|
||||
a = entry.split(':')
|
||||
|
||||
d = None
|
||||
if entry.lower().startswith("d"):
|
||||
d = True
|
||||
a.pop(0)
|
||||
|
||||
if len(a) == 2:
|
||||
a.append(None)
|
||||
|
||||
t, e, p = a
|
||||
t = t.lower()
|
||||
|
||||
if t.startswith("u"):
|
||||
t = "user"
|
||||
|
@ -143,7 +150,7 @@ def split_entry(entry):
|
|||
else:
|
||||
t = None
|
||||
|
||||
return [t, e, p]
|
||||
return [d, t, e, p]
|
||||
|
||||
|
||||
def build_entry(etype, entity, permissions=None):
|
||||
|
@ -176,9 +183,9 @@ def build_command(module, mode, path, follow, default, recursive, entry=''):
|
|||
|
||||
if default:
|
||||
if(mode == 'rm'):
|
||||
cmd.append('-k')
|
||||
cmd.insert(1, '-k')
|
||||
else: # mode == 'set' or mode == 'get'
|
||||
cmd.append('-d')
|
||||
cmd.insert(1, '-d')
|
||||
|
||||
cmd.append(path)
|
||||
return cmd
|
||||
|
@ -269,16 +276,18 @@ def main():
|
|||
if etype or entity or permissions:
|
||||
module.fail_json(msg="'entry' MUST NOT be set when 'entity', 'etype' or 'permissions' are set.")
|
||||
|
||||
if state == 'present' and entry.count(":") != 2:
|
||||
module.fail_json(msg="'entry' MUST have 3 sections divided by ':' when 'state=present'.")
|
||||
if state == 'present' and not entry.count(":") in [2, 3]:
|
||||
module.fail_json(msg="'entry' MUST have 3 or 4 sections divided by ':' when 'state=present'.")
|
||||
|
||||
if state == 'absent' and entry.count(":") != 1:
|
||||
module.fail_json(msg="'entry' MUST have 2 sections divided by ':' when 'state=absent'.")
|
||||
if state == 'absent' and not entry.count(":") in [1, 2]:
|
||||
module.fail_json(msg="'entry' MUST have 2 or 3 sections divided by ':' when 'state=absent'.")
|
||||
|
||||
if state == 'query':
|
||||
module.fail_json(msg="'entry' MUST NOT be set when 'state=query'.")
|
||||
|
||||
etype, entity, permissions = split_entry(entry)
|
||||
default_flag, etype, entity, permissions = split_entry(entry)
|
||||
if default_flag != None:
|
||||
default = default_flag
|
||||
|
||||
changed = False
|
||||
msg = ""
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import os
|
||||
import time
|
||||
import tempfile
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
@ -27,7 +27,7 @@ module: copy
|
|||
version_added: "historical"
|
||||
short_description: Copies files to remote locations.
|
||||
description:
|
||||
- The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box.
|
||||
- The M(copy) module copies a file on the local box to remote locations. Use the M(fetch) module to copy files from remote locations to the local box. If you need variable interpolation in copied files, use the M(template) module.
|
||||
options:
|
||||
src:
|
||||
description:
|
||||
|
@ -214,7 +214,8 @@ def main():
|
|||
backup = dict(default=False, type='bool'),
|
||||
force = dict(default=True, aliases=['thirsty'], type='bool'),
|
||||
validate = dict(required=False, type='str'),
|
||||
directory_mode = dict(required=False)
|
||||
directory_mode = dict(required=False),
|
||||
remote_src = dict(required=False, type='bool'),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
supports_check_mode=True,
|
||||
|
@ -228,6 +229,7 @@ def main():
|
|||
validate = module.params.get('validate',None)
|
||||
follow = module.params['follow']
|
||||
mode = module.params['mode']
|
||||
remote_src = module.params['remote_src']
|
||||
|
||||
if not os.path.exists(src):
|
||||
module.fail_json(msg="Source %s failed to transfer" % (src))
|
||||
|
@ -307,7 +309,12 @@ def main():
|
|||
(rc,out,err) = module.run_command(validate % src)
|
||||
if rc != 0:
|
||||
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err))
|
||||
module.atomic_move(src, dest)
|
||||
if remote_src:
|
||||
_, tmpdest = tempfile.mkstemp(dir=os.path.dirname(dest))
|
||||
shutil.copy2(src, tmpdest)
|
||||
module.atomic_move(tmpdest, dest)
|
||||
else:
|
||||
module.atomic_move(src, dest)
|
||||
except IOError:
|
||||
module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
|
||||
changed = True
|
||||
|
|
|
@ -157,8 +157,8 @@ def main():
|
|||
original_basename = dict(required=False), # Internal use only, for recursive ops
|
||||
recurse = dict(default=False, type='bool'),
|
||||
force = dict(required=False, default=False, type='bool'),
|
||||
diff_peek = dict(default=None),
|
||||
validate = dict(required=False, default=None),
|
||||
diff_peek = dict(default=None), # Internal use only, for internal checks in the action plugins
|
||||
validate = dict(required=False, default=None), # Internal use only, for template and copy
|
||||
src = dict(required=False, default=None),
|
||||
),
|
||||
add_file_common_args=True,
|
||||
|
@ -190,6 +190,7 @@ def main():
|
|||
|
||||
prev_state = get_state(path)
|
||||
|
||||
|
||||
# state should default to file, but since that creates many conflicts,
|
||||
# default to 'current' when it exists.
|
||||
if state is None:
|
||||
|
@ -226,10 +227,23 @@ def main():
|
|||
module.fail_json(path=path, msg="recurse option requires state to be 'directory'")
|
||||
|
||||
file_args = module.load_file_common_arguments(params)
|
||||
|
||||
changed = False
|
||||
diff = {'before':
|
||||
{'path': path}
|
||||
,
|
||||
'after':
|
||||
{'path': path}
|
||||
}
|
||||
|
||||
state_change = False
|
||||
if prev_state != state:
|
||||
diff['before']['state'] = prev_state
|
||||
diff['after']['state'] = state
|
||||
state_change = True
|
||||
|
||||
if state == 'absent':
|
||||
if state != prev_state:
|
||||
if state_change:
|
||||
if not module.check_mode:
|
||||
if prev_state == 'directory':
|
||||
try:
|
||||
|
@ -241,13 +255,13 @@ def main():
|
|||
os.unlink(path)
|
||||
except Exception, e:
|
||||
module.fail_json(path=path, msg="unlinking failed: %s " % str(e))
|
||||
module.exit_json(path=path, changed=True)
|
||||
module.exit_json(path=path, changed=True, diff=diff)
|
||||
else:
|
||||
module.exit_json(path=path, changed=False)
|
||||
|
||||
elif state == 'file':
|
||||
|
||||
if state != prev_state:
|
||||
if state_change:
|
||||
if follow and prev_state == 'link':
|
||||
# follow symlink and operate on original
|
||||
path = os.path.realpath(path)
|
||||
|
@ -258,8 +272,8 @@ def main():
|
|||
# file is not absent and any other state is a conflict
|
||||
module.fail_json(path=path, msg='file (%s) is %s, cannot continue' % (path, prev_state))
|
||||
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
module.exit_json(path=path, changed=changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed, diff)
|
||||
module.exit_json(path=path, changed=changed, diff=diff)
|
||||
|
||||
elif state == 'directory':
|
||||
if follow and prev_state == 'link':
|
||||
|
@ -268,7 +282,7 @@ def main():
|
|||
|
||||
if prev_state == 'absent':
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
module.exit_json(changed=True, diff=diff)
|
||||
changed = True
|
||||
curpath = ''
|
||||
|
||||
|
@ -288,11 +302,11 @@ def main():
|
|||
except OSError, ex:
|
||||
# Possibly something else created the dir since the os.path.exists
|
||||
# check above. As long as it's a dir, we don't need to error out.
|
||||
if not (ex.errno == errno.EEXISTS and os.isdir(curpath)):
|
||||
if not (ex.errno == errno.EEXIST and os.isdir(curpath)):
|
||||
raise
|
||||
tmp_file_args = file_args.copy()
|
||||
tmp_file_args['path']=curpath
|
||||
changed = module.set_fs_attributes_if_different(tmp_file_args, changed)
|
||||
changed = module.set_fs_attributes_if_different(tmp_file_args, changed, diff)
|
||||
except Exception, e:
|
||||
module.fail_json(path=path, msg='There was an issue creating %s as requested: %s' % (curpath, str(e)))
|
||||
|
||||
|
@ -300,12 +314,12 @@ def main():
|
|||
elif prev_state != 'directory':
|
||||
module.fail_json(path=path, msg='%s already exists as a %s' % (path, prev_state))
|
||||
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed, diff)
|
||||
|
||||
if recurse:
|
||||
changed |= recursive_set_attributes(module, file_args['path'], follow, file_args)
|
||||
|
||||
module.exit_json(path=path, changed=changed)
|
||||
module.exit_json(path=path, changed=changed, diff=diff)
|
||||
|
||||
elif state in ['link','hard']:
|
||||
|
||||
|
@ -374,10 +388,10 @@ def main():
|
|||
module.fail_json(path=path, msg='Error while linking: %s' % str(e))
|
||||
|
||||
if module.check_mode and not os.path.exists(path):
|
||||
module.exit_json(dest=path, src=src, changed=changed)
|
||||
module.exit_json(dest=path, src=src, changed=changed, diff=diff)
|
||||
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||
module.exit_json(dest=path, src=src, changed=changed)
|
||||
changed = module.set_fs_attributes_if_different(file_args, changed, diff)
|
||||
module.exit_json(dest=path, src=src, changed=changed, diff=diff)
|
||||
|
||||
elif state == 'touch':
|
||||
if not module.check_mode:
|
||||
|
@ -395,7 +409,7 @@ def main():
|
|||
else:
|
||||
module.fail_json(msg='Cannot touch other than files, directories, and hardlinks (%s is %s)' % (path, prev_state))
|
||||
try:
|
||||
module.set_fs_attributes_if_different(file_args, True)
|
||||
module.set_fs_attributes_if_different(file_args, True, diff)
|
||||
except SystemExit, e:
|
||||
if e.code:
|
||||
# We take this to mean that fail_json() was called from
|
||||
|
@ -405,7 +419,7 @@ def main():
|
|||
os.remove(path)
|
||||
raise e
|
||||
|
||||
module.exit_json(dest=path, changed=True)
|
||||
module.exit_json(dest=path, changed=True, diff=diff)
|
||||
|
||||
module.fail_json(path=path, msg='unexpected position reached')
|
||||
|
||||
|
|
|
@ -267,6 +267,7 @@ def main():
|
|||
get_checksum = dict(default="False", type='bool'),
|
||||
use_regex = dict(default="False", type='bool'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
params = module.params
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
|
||||
# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
|
@ -28,8 +29,7 @@ description:
|
|||
- Manage (add, remove, change) individual settings in an INI-style file without having
|
||||
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
|
||||
sections if they don't exist.
|
||||
- Comments are discarded when the source file is read, and therefore will not
|
||||
show up in the destination file.
|
||||
- Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
|
||||
version_added: "0.9"
|
||||
options:
|
||||
dest:
|
||||
|
@ -65,6 +65,12 @@ options:
|
|||
description:
|
||||
- all arguments accepted by the M(file) module also work here
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- If set to C(absent) the option or section will be removed if present instead of created.
|
||||
required: false
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
notes:
|
||||
- While it is possible to add an I(option) without specifying a I(value), this makes
|
||||
no sense.
|
||||
|
@ -73,7 +79,9 @@ notes:
|
|||
Either use M(template) to create a base INI file with a C([default]) section, or use
|
||||
M(lineinfile) to add the missing line.
|
||||
requirements: [ ConfigParser ]
|
||||
author: "Jan-Piet Mens (@jpmens)"
|
||||
author:
|
||||
- "Jan-Piet Mens (@jpmens)"
|
||||
- "Ales Nosek (@noseka1)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -89,92 +97,109 @@ EXAMPLES = '''
|
|||
|
||||
import ConfigParser
|
||||
import sys
|
||||
import os
|
||||
|
||||
# ==============================================================
|
||||
# match_opt
|
||||
|
||||
def match_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('%s *=' % option, line) \
|
||||
or re.match('# *%s *=' % option, line) \
|
||||
or re.match('; *%s *=' % option, line)
|
||||
|
||||
# ==============================================================
|
||||
# match_active_opt
|
||||
|
||||
def match_active_opt(option, line):
|
||||
option = re.escape(option)
|
||||
return re.match('%s *=' % option, line)
|
||||
|
||||
# ==============================================================
|
||||
# do_ini
|
||||
|
||||
def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False):
|
||||
|
||||
changed = False
|
||||
if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3:
|
||||
cp = ConfigParser.ConfigParser(allow_no_value=True)
|
||||
else:
|
||||
cp = ConfigParser.ConfigParser()
|
||||
cp.optionxform = identity
|
||||
|
||||
if not os.path.exists(filename):
|
||||
try:
|
||||
open(filename,'w').close()
|
||||
except:
|
||||
module.fail_json(msg="Destination file %s not writable" % filename)
|
||||
ini_file = open(filename, 'r')
|
||||
try:
|
||||
f = open(filename)
|
||||
cp.readfp(f)
|
||||
except IOError:
|
||||
pass
|
||||
ini_lines = ini_file.readlines()
|
||||
# append a fake section line to simplify the logic
|
||||
ini_lines.append('[')
|
||||
finally:
|
||||
ini_file.close()
|
||||
|
||||
within_section = not section
|
||||
section_start = 0
|
||||
changed = False
|
||||
|
||||
if state == 'absent':
|
||||
if option is None and value is None:
|
||||
if cp.has_section(section):
|
||||
cp.remove_section(section)
|
||||
changed = True
|
||||
for index, line in enumerate(ini_lines):
|
||||
if line.startswith('[%s]' % section):
|
||||
within_section = True
|
||||
section_start = index
|
||||
elif line.startswith('['):
|
||||
if within_section:
|
||||
if state == 'present':
|
||||
# insert missing option line at the end of the section
|
||||
ini_lines.insert(index, '%s = %s\n' % (option, value))
|
||||
changed = True
|
||||
elif state == 'absent' and not option:
|
||||
# remove the entire section
|
||||
del ini_lines[section_start:index]
|
||||
changed = True
|
||||
break
|
||||
else:
|
||||
if option is not None:
|
||||
try:
|
||||
if cp.get(section, option):
|
||||
cp.remove_option(section, option)
|
||||
if within_section and option:
|
||||
if state == 'present':
|
||||
# change the existing option line
|
||||
if match_opt(option, line):
|
||||
newline = '%s = %s\n' % (option, value)
|
||||
changed = ini_lines[index] != newline
|
||||
ini_lines[index] = newline
|
||||
if changed:
|
||||
# remove all possible option occurences from the rest of the section
|
||||
index = index + 1
|
||||
while index < len(ini_lines):
|
||||
line = ini_lines[index]
|
||||
if line.startswith('['):
|
||||
break
|
||||
if match_active_opt(option, line):
|
||||
del ini_lines[index]
|
||||
else:
|
||||
index = index + 1
|
||||
break
|
||||
else:
|
||||
# comment out the existing option line
|
||||
if match_active_opt(option, line):
|
||||
ini_lines[index] = '#%s' % ini_lines[index]
|
||||
changed = True
|
||||
except ConfigParser.InterpolationError:
|
||||
cp.remove_option(section, option)
|
||||
changed = True
|
||||
except:
|
||||
pass
|
||||
break
|
||||
|
||||
if state == 'present':
|
||||
# remove the fake section line
|
||||
del ini_lines[-1:]
|
||||
|
||||
# DEFAULT section is always there by DEFAULT, so never try to add it.
|
||||
if not cp.has_section(section) and section.upper() != 'DEFAULT':
|
||||
if not within_section and option and state == 'present':
|
||||
ini_lines.append('[%s]\n' % section)
|
||||
ini_lines.append('%s = %s\n' % (option, value))
|
||||
changed = True
|
||||
|
||||
cp.add_section(section)
|
||||
changed = True
|
||||
|
||||
if option is not None and value is not None:
|
||||
try:
|
||||
oldvalue = cp.get(section, option)
|
||||
if str(value) != str(oldvalue):
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
except ConfigParser.NoSectionError:
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
except ConfigParser.NoOptionError:
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
except ConfigParser.InterpolationError:
|
||||
cp.set(section, option, value)
|
||||
changed = True
|
||||
|
||||
if changed and not module.check_mode:
|
||||
if backup:
|
||||
module.backup_local(filename)
|
||||
|
||||
ini_file = open(filename, 'w')
|
||||
try:
|
||||
f = open(filename, 'w')
|
||||
cp.write(f)
|
||||
except:
|
||||
module.fail_json(msg="Can't create %s" % filename)
|
||||
ini_file.writelines(ini_lines)
|
||||
finally:
|
||||
ini_file.close()
|
||||
|
||||
return changed
|
||||
|
||||
# ==============================================================
|
||||
# identity
|
||||
|
||||
def identity(arg):
|
||||
"""
|
||||
This function simply returns its argument. It serves as a
|
||||
replacement for ConfigParser.optionxform, which by default
|
||||
changes arguments to lower case. The identity function is a
|
||||
better choice than str() or unicode(), because it is
|
||||
encoding-agnostic.
|
||||
"""
|
||||
return arg
|
||||
|
||||
# ==============================================================
|
||||
# main
|
||||
|
||||
|
@ -212,4 +237,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -42,11 +42,28 @@ options:
|
|||
aliases: []
|
||||
get_checksum:
|
||||
description:
|
||||
- Whether to return a checksum of the file (currently sha1)
|
||||
- Whether to return a checksum of the file (default sha1)
|
||||
required: false
|
||||
default: yes
|
||||
aliases: []
|
||||
version_added: "1.8"
|
||||
checksum_algorithm:
|
||||
description:
|
||||
- Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm.
|
||||
required: false
|
||||
choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]
|
||||
default: sha1
|
||||
aliases: [ 'checksum_algo' ]
|
||||
version_added: "2.0"
|
||||
mime:
|
||||
description:
|
||||
- Use file magic and return data about the nature of the file. this uses the 'file' utility found on most Linux/Unix systems.
|
||||
- This will add both `mime_type` and 'charset' fields to the return, if possible.
|
||||
required: false
|
||||
choices: [ Yes, No ]
|
||||
default: No
|
||||
version_added: "2.1"
|
||||
aliases: [ 'mime_type', 'mime-type' ]
|
||||
author: "Bruce Pennypacker (@bpennypacker)"
|
||||
'''
|
||||
|
||||
|
@ -84,6 +101,9 @@ EXAMPLES = '''
|
|||
|
||||
# Don't do md5 checksum
|
||||
- stat: path=/path/to/myhugefile get_md5=no
|
||||
|
||||
# Use sha256 to calculate checksum
|
||||
- stat: path=/path/to/something checksum_algorithm=sha256
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
@ -100,7 +120,7 @@ stat:
|
|||
path:
|
||||
description: The full path of the file/object to get the facts of
|
||||
returned: success and if path exists
|
||||
type: boolean
|
||||
type: string
|
||||
sample: '/path/to/file'
|
||||
mode:
|
||||
description: Unix permissions of the file in octal
|
||||
|
@ -254,7 +274,7 @@ stat:
|
|||
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
|
||||
checksum:
|
||||
description: hash of the path
|
||||
returned: success, path exists and user can read stats and path supports hashing
|
||||
returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available
|
||||
type: string
|
||||
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
|
||||
pw_name:
|
||||
|
@ -267,6 +287,16 @@ stat:
|
|||
returned: success, path exists and user can read stats and installed python supports it
|
||||
type: string
|
||||
sample: www-data
|
||||
mime_type:
|
||||
description: file magic data or mime-type
|
||||
returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error.
|
||||
type: string
|
||||
sample: PDF document, version 1.2
|
||||
charset:
|
||||
description: file character set or encoding
|
||||
returned: success, path exists and user can read stats and installed python supports it and the `mime` option was true, will return 'unknown' on error.
|
||||
type: string
|
||||
sample: us-ascii
|
||||
'''
|
||||
|
||||
import os
|
||||
|
@ -281,7 +311,9 @@ def main():
|
|||
path = dict(required=True),
|
||||
follow = dict(default='no', type='bool'),
|
||||
get_md5 = dict(default='yes', type='bool'),
|
||||
get_checksum = dict(default='yes', type='bool')
|
||||
get_checksum = dict(default='yes', type='bool'),
|
||||
checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo']),
|
||||
mime = dict(default=False, type='bool', aliases=['mime_type', 'mime-type']),
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
@ -291,6 +323,7 @@ def main():
|
|||
follow = module.params.get('follow')
|
||||
get_md5 = module.params.get('get_md5')
|
||||
get_checksum = module.params.get('get_checksum')
|
||||
checksum_algorithm = module.params.get('checksum_algorithm')
|
||||
|
||||
try:
|
||||
if follow:
|
||||
|
@ -351,8 +384,7 @@ def main():
|
|||
d['md5'] = None
|
||||
|
||||
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
|
||||
d['checksum'] = module.sha1(path)
|
||||
|
||||
d['checksum'] = module.digest_from_file(path, checksum_algorithm)
|
||||
|
||||
try:
|
||||
pw = pwd.getpwuid(st.st_uid)
|
||||
|
@ -364,6 +396,19 @@ def main():
|
|||
except:
|
||||
pass
|
||||
|
||||
if module.params.get('mime'):
|
||||
d['mime_type'] = 'unknown'
|
||||
d['charset'] = 'unknown'
|
||||
|
||||
filecmd = [module.get_bin_path('file', True),'-i', path]
|
||||
try:
|
||||
rc, out, err = module.run_command(filecmd)
|
||||
if rc == 0:
|
||||
mtype, chset = out.split(':')[1].split(';')
|
||||
d['mime_type'] = mtype.strip()
|
||||
d['charset'] = chset.split('=')[1].strip()
|
||||
except:
|
||||
pass
|
||||
|
||||
module.exit_json(changed=False, stat=d)
|
||||
|
||||
|
|
|
@ -250,7 +250,7 @@ def pick_handler(src, dest, module):
|
|||
obj = handler(src, dest, module)
|
||||
if obj.can_handle_archive():
|
||||
return obj
|
||||
module.fail_json(msg='Failed to find handler to unarchive. Make sure the required command to extract the file is installed.')
|
||||
module.fail_json(msg='Failed to find handler for "%s". Make sure the required command to extract the file is installed.' % src)
|
||||
|
||||
|
||||
def main():
|
||||
|
|
|
@ -55,6 +55,14 @@ options:
|
|||
If C(dest) is a directory, the file will always be
|
||||
downloaded (regardless of the force option), but replaced only if the contents changed.
|
||||
required: true
|
||||
tmp_dest:
|
||||
description:
|
||||
- absolute path of where temporary file is downloaded to.
|
||||
- Defaults to TMPDIR, TEMP or TMP env variables or a platform specific value
|
||||
- https://docs.python.org/2/library/tempfile.html#tempfile.tempdir
|
||||
required: false
|
||||
default: ''
|
||||
version_added: '2.1'
|
||||
force:
|
||||
description:
|
||||
- If C(yes) and C(dest) is not a directory, will download the file every
|
||||
|
@ -175,7 +183,7 @@ def url_filename(url):
|
|||
return 'index.html'
|
||||
return fn
|
||||
|
||||
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None):
|
||||
def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, headers=None, tmp_dest=''):
|
||||
"""
|
||||
Download data from the url and store in a temporary file.
|
||||
|
||||
|
@ -191,7 +199,19 @@ def url_get(module, url, dest, use_proxy, last_mod_time, force, timeout=10, head
|
|||
if info['status'] != 200:
|
||||
module.fail_json(msg="Request failed", status_code=info['status'], response=info['msg'], url=url, dest=dest)
|
||||
|
||||
fd, tempname = tempfile.mkstemp()
|
||||
if tmp_dest != '':
|
||||
# tmp_dest should be an existing dir
|
||||
tmp_dest_is_dir = os.path.isdir(tmp_dest)
|
||||
if not tmp_dest_is_dir:
|
||||
if os.path.exists(tmp_dest):
|
||||
module.fail_json(msg="%s is a file but should be a directory." % tmp_dest)
|
||||
else:
|
||||
module.fail_json(msg="%s directoy does not exist." % tmp_dest)
|
||||
|
||||
fd, tempname = tempfile.mkstemp(dir=tmp_dest)
|
||||
else:
|
||||
fd, tempname = tempfile.mkstemp()
|
||||
|
||||
f = os.fdopen(fd, 'wb')
|
||||
try:
|
||||
shutil.copyfileobj(rsp, f)
|
||||
|
@ -235,6 +255,7 @@ def main():
|
|||
checksum = dict(default=''),
|
||||
timeout = dict(required=False, type='int', default=10),
|
||||
headers = dict(required=False, default=None),
|
||||
tmp_dest = dict(required=False, default=''),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -250,7 +271,8 @@ def main():
|
|||
checksum = module.params['checksum']
|
||||
use_proxy = module.params['use_proxy']
|
||||
timeout = module.params['timeout']
|
||||
|
||||
tmp_dest = os.path.expanduser(module.params['tmp_dest'])
|
||||
|
||||
# Parse headers to dict
|
||||
if module.params['headers']:
|
||||
try:
|
||||
|
@ -303,7 +325,7 @@ def main():
|
|||
last_mod_time = datetime.datetime.utcfromtimestamp(mtime)
|
||||
|
||||
# download to tmpsrc
|
||||
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers)
|
||||
tmpsrc, info = url_get(module, url, dest, use_proxy, last_mod_time, force, timeout, headers, tmp_dest)
|
||||
|
||||
# Now the request has completed, we can finally generate the final
|
||||
# destination file name from the info dict.
|
||||
|
|
|
@ -25,6 +25,8 @@ import shutil
|
|||
import tempfile
|
||||
import base64
|
||||
import datetime
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
|
@ -143,7 +145,8 @@ options:
|
|||
version_added: '1.9.2'
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urlparse, httplib2 ]
|
||||
requirements:
|
||||
- httplib2 >= 0.7.0
|
||||
author: "Romeo Theriault (@romeotheriault)"
|
||||
'''
|
||||
|
||||
|
@ -156,7 +159,7 @@ EXAMPLES = '''
|
|||
register: webpage
|
||||
|
||||
- action: fail
|
||||
when: "'illustrative' not in webpage.content"
|
||||
when: "'AWESOME' not in webpage.content"
|
||||
|
||||
|
||||
# Create a JIRA issue
|
||||
|
@ -198,11 +201,15 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
HAS_HTTPLIB2 = True
|
||||
HAS_HTTPLIB2 = False
|
||||
|
||||
try:
|
||||
import httplib2
|
||||
except ImportError:
|
||||
HAS_HTTPLIB2 = False
|
||||
if LooseVersion(httplib2.__version__) >= LooseVersion('0.7'):
|
||||
HAS_HTTPLIB2 = True
|
||||
except ImportError, AttributeError:
|
||||
# AttributeError if __version__ is not present
|
||||
pass
|
||||
|
||||
HAS_URLPARSE = True
|
||||
|
||||
|
@ -382,7 +389,7 @@ def main():
|
|||
)
|
||||
|
||||
if not HAS_HTTPLIB2:
|
||||
module.fail_json(msg="httplib2 is not installed")
|
||||
module.fail_json(msg="httplib2 >= 0.7 is not installed")
|
||||
if not HAS_URLPARSE:
|
||||
module.fail_json(msg="urlparse is not installed")
|
||||
|
||||
|
|
48
packaging/language/pip.py
Normal file → Executable file
48
packaging/language/pip.py
Normal file → Executable file
|
@ -20,6 +20,7 @@
|
|||
#
|
||||
|
||||
import tempfile
|
||||
import re
|
||||
import os
|
||||
|
||||
DOCUMENTATION = '''
|
||||
|
@ -43,7 +44,8 @@ options:
|
|||
default: null
|
||||
requirements:
|
||||
description:
|
||||
- The path to a pip requirements file
|
||||
- The path to a pip requirements file, which should be local to the remote system.
|
||||
File can be specified as a relative path if using the chdir option.
|
||||
required: false
|
||||
default: null
|
||||
virtualenv:
|
||||
|
@ -90,6 +92,12 @@ options:
|
|||
required: false
|
||||
default: null
|
||||
version_added: "1.0"
|
||||
editable:
|
||||
description:
|
||||
- Pass the editable flag for versioning URLs.
|
||||
required: false
|
||||
default: yes
|
||||
version_added: "2.0"
|
||||
chdir:
|
||||
description:
|
||||
- cd into this directory before running the command
|
||||
|
@ -121,6 +129,9 @@ EXAMPLES = '''
|
|||
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
|
||||
- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'
|
||||
|
||||
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
|
||||
- pip: name='git+http://myrepo/app/MyApp' editable=false
|
||||
|
||||
# Install (MyApp) from local tarball
|
||||
- pip: name='file:///path/to/MyApp.tar.gz'
|
||||
|
||||
|
@ -239,6 +250,7 @@ def main():
|
|||
virtualenv_python=dict(default=None, required=False, type='str'),
|
||||
use_mirrors=dict(default='yes', type='bool'),
|
||||
extra_args=dict(default=None, required=False),
|
||||
editable=dict(default='yes', type='bool', required=False),
|
||||
chdir=dict(default=None, required=False, type='path'),
|
||||
executable=dict(default=None, required=False),
|
||||
),
|
||||
|
@ -311,16 +323,15 @@ def main():
|
|||
|
||||
# Automatically apply -e option to extra_args when source is a VCS url. VCS
|
||||
# includes those beginning with svn+, git+, hg+ or bzr+
|
||||
if name:
|
||||
if name.startswith('svn+') or name.startswith('git+') or \
|
||||
name.startswith('hg+') or name.startswith('bzr+'):
|
||||
args_list = [] # used if extra_args is not used at all
|
||||
if extra_args:
|
||||
args_list = extra_args.split(' ')
|
||||
if '-e' not in args_list:
|
||||
args_list.append('-e')
|
||||
# Ok, we will reconstruct the option string
|
||||
extra_args = ' '.join(args_list)
|
||||
has_vcs = bool(name and re.match(r'(svn|git|hg|bzr)\+', name))
|
||||
if has_vcs and module.params['editable']:
|
||||
args_list = [] # used if extra_args is not used at all
|
||||
if extra_args:
|
||||
args_list = extra_args.split(' ')
|
||||
if '-e' not in args_list:
|
||||
args_list.append('-e')
|
||||
# Ok, we will reconstruct the option string
|
||||
extra_args = ' '.join(args_list)
|
||||
|
||||
if extra_args:
|
||||
cmd += ' %s' % extra_args
|
||||
|
@ -333,8 +344,7 @@ def main():
|
|||
if module.check_mode:
|
||||
if extra_args or requirements or state == 'latest' or not name:
|
||||
module.exit_json(changed=True)
|
||||
elif name.startswith('svn+') or name.startswith('git+') or \
|
||||
name.startswith('hg+') or name.startswith('bzr+'):
|
||||
elif has_vcs:
|
||||
module.exit_json(changed=True)
|
||||
|
||||
freeze_cmd = '%s freeze' % pip
|
||||
|
@ -352,6 +362,12 @@ def main():
|
|||
changed = (state == 'present' and not is_present) or (state == 'absent' and is_present)
|
||||
module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)
|
||||
|
||||
if requirements or has_vcs:
|
||||
freeze_cmd = '%s freeze' % pip
|
||||
out_freeze_before = module.run_command(freeze_cmd, cwd=chdir)[1]
|
||||
else:
|
||||
out_freeze_before = None
|
||||
|
||||
rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
|
||||
out += out_pip
|
||||
err += err_pip
|
||||
|
@ -364,7 +380,11 @@ def main():
|
|||
if state == 'absent':
|
||||
changed = 'Successfully uninstalled' in out_pip
|
||||
else:
|
||||
changed = 'Successfully installed' in out_pip
|
||||
if out_freeze_before is None:
|
||||
changed = 'Successfully installed' in out_pip
|
||||
else:
|
||||
out_freeze_after = module.run_command(freeze_cmd, cwd=chdir)[1]
|
||||
changed = out_freeze_before != out_freeze_after
|
||||
|
||||
module.exit_json(changed=changed, cmd=cmd, name=name, version=version,
|
||||
state=state, requirements=requirements, virtualenv=env,
|
||||
|
|
|
@ -32,6 +32,7 @@ options:
|
|||
- A package name, like C(foo), or package specifier with version, like C(foo=1.0). Name wildcards (fnmatch) like C(apt*) and version wildcards like C(foo=1.0*) are also supported. Note that the apt-get commandline supports implicit regex matches here but we do not because it can let typos through easier (If you typo C(foo) as C(fo) apt-get would install packages that have "fo" in their name with a warning and a prompt for the user. Since we don't have warnings and prompts before installing we disallow this. Use an explicit fnmatch pattern if you want wildcarding)
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'pkg', 'package' ]
|
||||
state:
|
||||
description:
|
||||
- Indicates the desired package state. C(latest) ensures that the latest version is installed. C(build-dep) ensures the package build dependencies are installed.
|
||||
|
@ -62,9 +63,9 @@ options:
|
|||
default: null
|
||||
install_recommends:
|
||||
description:
|
||||
- Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed.
|
||||
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
|
||||
required: false
|
||||
default: yes
|
||||
default: null
|
||||
choices: [ "yes", "no" ]
|
||||
force:
|
||||
description:
|
||||
|
@ -231,7 +232,7 @@ def package_status(m, pkgname, version, cache, state):
|
|||
provided_packages = cache.get_providing_packages(pkgname)
|
||||
if provided_packages:
|
||||
is_installed = False
|
||||
# when virtual package providing only one package, look up status of target package
|
||||
# when virtual package providing only one package, look up status of target package
|
||||
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
|
||||
package = provided_packages[0]
|
||||
installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
|
||||
|
@ -339,7 +340,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
|
|||
return new_pkgspec
|
||||
|
||||
def install(m, pkgspec, cache, upgrade=False, default_release=None,
|
||||
install_recommends=True, force=False,
|
||||
install_recommends=None, force=False,
|
||||
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
|
||||
build_dep=False):
|
||||
pkg_list = []
|
||||
|
@ -385,8 +386,12 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
|
|||
|
||||
if default_release:
|
||||
cmd += " -t '%s'" % (default_release,)
|
||||
if not install_recommends:
|
||||
cmd += " --no-install-recommends"
|
||||
|
||||
if install_recommends is False:
|
||||
cmd += " -o APT::Install-Recommends=no"
|
||||
elif install_recommends is True:
|
||||
cmd += " -o APT::Install-Recommends=yes"
|
||||
# install_recommends is None uses the OS default
|
||||
|
||||
rc, out, err = m.run_command(cmd)
|
||||
if rc:
|
||||
|
@ -438,6 +443,9 @@ def install_deb(m, debs, cache, force, install_recommends, dpkg_options):
|
|||
if force:
|
||||
options += " --force-all"
|
||||
|
||||
for (k,v) in APT_ENV_VARS.iteritems():
|
||||
os.environ[k] = v
|
||||
|
||||
cmd = "dpkg %s -i %s" % (options, " ".join(pkgs_to_install))
|
||||
rc, out, err = m.run_command(cmd)
|
||||
if "stdout" in retvals:
|
||||
|
@ -547,7 +555,7 @@ def main():
|
|||
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
|
||||
deb = dict(default=None),
|
||||
default_release = dict(default=None, aliases=['default-release']),
|
||||
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
|
||||
install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'),
|
||||
force = dict(default='no', type='bool'),
|
||||
upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
|
||||
dpkg_options = dict(default=DPKG_OPTIONS)
|
||||
|
@ -559,7 +567,7 @@ def main():
|
|||
|
||||
if not HAS_PYTHON_APT:
|
||||
try:
|
||||
module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True)
|
||||
module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True)
|
||||
global apt, apt_pkg
|
||||
import apt
|
||||
import apt.debfile
|
||||
|
|
|
@ -388,7 +388,7 @@ def main():
|
|||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent']),
|
||||
username = dict(default=None, required=False),
|
||||
password = dict(default=None, required=False),
|
||||
password = dict(default=None, required=False, no_log=True),
|
||||
server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False),
|
||||
server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False),
|
||||
rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False),
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
|
||||
import traceback
|
||||
import os
|
||||
import yum
|
||||
import rpm
|
||||
|
@ -51,7 +49,7 @@ options:
|
|||
- "Package name, or package specifier with version, like C(name-1.0). When using state=latest, this can be '*' which means run: yum -y update. You can also pass a url or a local path to a rpm file. To operate on several packages this can accept a comma separated list of packages or (as of 2.0) a list of packages."
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
aliases: [ 'pkg' ]
|
||||
exclude:
|
||||
description:
|
||||
- "Package name(s) to exclude when state=present, or latest"
|
||||
|
@ -65,9 +63,9 @@ options:
|
|||
default: null
|
||||
state:
|
||||
description:
|
||||
- Whether to install (C(present), C(latest)), or remove (C(absent)) a package.
|
||||
- Whether to install (C(present) or C(installed), C(latest)), or remove (C(absent) or C(removed)) a package.
|
||||
required: false
|
||||
choices: [ "present", "latest", "absent" ]
|
||||
choices: [ "present", "installed", "latest", "absent", "removed" ]
|
||||
default: "present"
|
||||
enablerepo:
|
||||
description:
|
||||
|
@ -117,6 +115,16 @@ options:
|
|||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
|
||||
validate_certs:
|
||||
description:
|
||||
- This only applies if using a https url as the source of the rpm. e.g. for localinstall. If set to C(no), the SSL certificates will not be validated.
|
||||
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
|
||||
- Prior to 2.1 the code worked as if this was set to C(yes).
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
version_added: "2.1"
|
||||
|
||||
notes:
|
||||
- When used with a loop of package names in a playbook, ansible optimizes
|
||||
the call to the yum module. Instead of calling the module with a single
|
||||
|
@ -130,6 +138,15 @@ notes:
|
|||
that the other packages come from (such as epel-release) then that package
|
||||
needs to be installed in a separate task. This mimics yum's command line
|
||||
behaviour.
|
||||
- 'Yum itself has two types of groups. "Package groups" are specified in the
|
||||
rpm itself while "environment groups" are specified in a separate file
|
||||
(usually by the distribution). Unfortunately, this division becomes
|
||||
apparent to ansible users because ansible needs to operate on the group
|
||||
of packages in a single transaction and yum requires groups to be specified
|
||||
in different ways when used in that way. Package groups are specified as
|
||||
"@development-tools" and environment groups are "@^gnome-desktop-environment".
|
||||
Use the "yum group list" command to see which category of group the group
|
||||
you want to install falls into.'
|
||||
# informational: requirements for nodes
|
||||
requirements: [ yum ]
|
||||
author:
|
||||
|
@ -161,18 +178,23 @@ EXAMPLES = '''
|
|||
|
||||
- name: install the 'Development tools' package group
|
||||
yum: name="@Development tools" state=present
|
||||
|
||||
- name: install the 'Gnome desktop' environment group
|
||||
yum: name="@^gnome-desktop-environment" state=present
|
||||
'''
|
||||
|
||||
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
|
||||
BUFSIZE = 65536
|
||||
|
||||
def_qf = "%{name}-%{version}-%{release}.%{arch}"
|
||||
rpmbin = None
|
||||
|
||||
def yum_base(conf_file=None):
|
||||
|
||||
my = yum.YumBase()
|
||||
my.preconf.debuglevel=0
|
||||
my.preconf.errorlevel=0
|
||||
my.preconf.plugins = True
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
my.preconf.fn = conf_file
|
||||
if os.geteuid() != 0:
|
||||
|
@ -209,8 +231,8 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
|
|||
en_repos = []
|
||||
if dis_repos is None:
|
||||
dis_repos = []
|
||||
if not repoq:
|
||||
|
||||
if not repoq:
|
||||
pkgs = []
|
||||
try:
|
||||
my = yum_base(conf_file)
|
||||
|
@ -218,10 +240,10 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
|
|||
my.repos.disableRepo(rid)
|
||||
for rid in en_repos:
|
||||
my.repos.enableRepo(rid)
|
||||
|
||||
|
||||
e, m, u = my.rpmdb.matchPackageNames([pkgspec])
|
||||
pkgs = e + m
|
||||
if not pkgs:
|
||||
if not pkgs and not is_pkg:
|
||||
pkgs.extend(my.returnInstalledPackagesByDep(pkgspec))
|
||||
except Exception, e:
|
||||
module.fail_json(msg="Failure talking to yum: %s" % e)
|
||||
|
@ -229,21 +251,31 @@ def is_installed(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, di
|
|||
return [ po_to_nevra(p) for p in pkgs ]
|
||||
|
||||
else:
|
||||
global rpmbin
|
||||
if not rpmbin:
|
||||
rpmbin = module.get_bin_path('rpm', required=True)
|
||||
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, pkgspec]
|
||||
cmd = [rpmbin, '-q', '--qf', qf, pkgspec]
|
||||
rc, out, err = module.run_command(cmd)
|
||||
if not is_pkg:
|
||||
cmd = repoq + ["--disablerepo=*", "--pkgnarrow=installed", "--qf", qf, "--whatprovides", pkgspec]
|
||||
if rc != 0 and 'is not installed' not in out:
|
||||
module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err))
|
||||
if 'is not installed' in out:
|
||||
out = ''
|
||||
|
||||
pkgs = [p for p in out.replace('(none)', '0').split('\n') if p.strip()]
|
||||
if not pkgs and not is_pkg:
|
||||
cmd = [rpmbin, '-q', '--qf', qf, '--whatprovides', pkgspec]
|
||||
rc2, out2, err2 = module.run_command(cmd)
|
||||
else:
|
||||
rc2, out2, err2 = (0, '', '')
|
||||
|
||||
if rc == 0 and rc2 == 0:
|
||||
out += out2
|
||||
return [p for p in out.split('\n') if p.strip()]
|
||||
else:
|
||||
module.fail_json(msg='Error from repoquery: %s: %s' % (cmd, err + err2))
|
||||
|
||||
|
||||
if rc2 != 0 and 'no package provides' not in out2:
|
||||
module.fail_json(msg='Error from rpm: %s: %s' % (cmd, err + err2))
|
||||
if 'no package provides' in out2:
|
||||
out2 = ''
|
||||
pkgs += [p for p in out2.replace('(none)', '0').split('\n') if p.strip()]
|
||||
return pkgs
|
||||
|
||||
return []
|
||||
|
||||
def is_available(module, repoq, pkgspec, conf_file, qf=def_qf, en_repos=None, dis_repos=None):
|
||||
|
@ -483,20 +515,22 @@ def repolist(module, repoq, qf="%{repoid}"):
|
|||
def list_stuff(module, repoquerybin, conf_file, stuff):
|
||||
|
||||
qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|%{repoid}"
|
||||
# is_installed goes through rpm instead of repoquery so it needs a slightly different format
|
||||
is_installed_qf = "%{name}|%{epoch}|%{version}|%{release}|%{arch}|installed\n"
|
||||
repoq = [repoquerybin, '--show-duplicates', '--plugins', '--quiet']
|
||||
if conf_file and os.path.exists(conf_file):
|
||||
repoq += ['-c', conf_file]
|
||||
|
||||
if stuff == 'installed':
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
return [ pkg_to_dict(p) for p in sorted(is_installed(module, repoq, '-a', conf_file, qf=is_installed_qf)) if p.strip() ]
|
||||
elif stuff == 'updates':
|
||||
return [ pkg_to_dict(p) for p in is_update(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
return [ pkg_to_dict(p) for p in sorted(is_update(module, repoq, '-a', conf_file, qf=qf)) if p.strip() ]
|
||||
elif stuff == 'available':
|
||||
return [ pkg_to_dict(p) for p in is_available(module, repoq, '-a', conf_file, qf=qf) if p.strip() ]
|
||||
return [ pkg_to_dict(p) for p in sorted(is_available(module, repoq, '-a', conf_file, qf=qf)) if p.strip() ]
|
||||
elif stuff == 'repos':
|
||||
return [ dict(repoid=name, state='enabled') for name in repolist(module, repoq) if name.strip() ]
|
||||
return [ dict(repoid=name, state='enabled') for name in sorted(repolist(module, repoq)) if name.strip() ]
|
||||
else:
|
||||
return [ pkg_to_dict(p) for p in is_installed(module, repoq, stuff, conf_file, qf=qf) + is_available(module, repoq, stuff, conf_file, qf=qf) if p.strip() ]
|
||||
return [ pkg_to_dict(p) for p in sorted(is_installed(module, repoq, stuff, conf_file, qf=is_installed_qf) + is_available(module, repoq, stuff, conf_file, qf=qf)) if p.strip() ]
|
||||
|
||||
def install(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
||||
|
||||
|
@ -928,6 +962,7 @@ def ensure(module, state, pkgs, conf_file, enablerepo, disablerepo,
|
|||
|
||||
return res
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
# state=installed name=pkgspec
|
||||
|
@ -953,6 +988,7 @@ def main():
|
|||
conf_file=dict(default=None),
|
||||
disable_gpg_check=dict(required=False, default="no", type='bool'),
|
||||
update_cache=dict(required=False, default="no", type='bool'),
|
||||
validate_certs=dict(required=False, default="yes", type='bool'),
|
||||
# this should not be needed, but exists as a failsafe
|
||||
install_repoquery=dict(required=False, default="yes", type='bool'),
|
||||
),
|
||||
|
@ -998,7 +1034,8 @@ def main():
|
|||
results = ensure(module, state, pkg, params['conf_file'], enablerepo,
|
||||
disablerepo, disable_gpg_check, exclude, repoquery)
|
||||
if repoquery:
|
||||
results['msg'] = '%s %s' % (results.get('msg',''), 'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.')
|
||||
results['msg'] = '%s %s' % (results.get('msg',''),
|
||||
'Warning: Due to potential bad behaviour with rhnplugin and certificates, used slower repoquery calls instead of Yum API.')
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ options:
|
|||
required: true
|
||||
aliases: [ name ]
|
||||
description:
|
||||
- git, SSH, or HTTP protocol address of the git repository.
|
||||
- git, SSH, or HTTP(S) protocol address of the git repository.
|
||||
dest:
|
||||
required: true
|
||||
description:
|
||||
|
@ -55,7 +55,7 @@ options:
|
|||
version_added: "1.5"
|
||||
description:
|
||||
- if C(yes), adds the hostkey for the repo url if not already
|
||||
added. If ssh_args contains "-o StrictHostKeyChecking=no",
|
||||
added. If ssh_opts contains "-o StrictHostKeyChecking=no",
|
||||
this parameter is ignored.
|
||||
ssh_opts:
|
||||
required: false
|
||||
|
|
|
@ -80,9 +80,18 @@ options:
|
|||
choices: [ "yes", "no" ]
|
||||
default: "no"
|
||||
version_added: "1.9"
|
||||
validate_certs:
|
||||
description:
|
||||
- This only applies if using a https url as the source of the keys. If set to C(no), the SSL certificates will not be validated.
|
||||
- This should only set to C(no) used on personally controlled sites using self-signed certificates as it avoids verifying the source site.
|
||||
- Prior to 2.1 the code worked as if this was set to C(yes).
|
||||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
version_added: "2.1"
|
||||
description:
|
||||
- "Adds or removes authorized keys for particular user accounts"
|
||||
author: "Brad Olson (@bradobro)"
|
||||
author: "Ansible Core Team"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -93,27 +102,30 @@ EXAMPLES = '''
|
|||
- authorized_key: user=charlie key=https://github.com/charlie.keys
|
||||
|
||||
# Using alternate directory locations:
|
||||
- authorized_key: user=charlie
|
||||
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
|
||||
path='/etc/ssh/authorized_keys/charlie'
|
||||
manage_dir=no
|
||||
- authorized_key:
|
||||
user: charlie
|
||||
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
|
||||
path: '/etc/ssh/authorized_keys/charlie'
|
||||
manage_dir: no
|
||||
|
||||
# Using with_file
|
||||
- name: Set up authorized_keys for the deploy user
|
||||
authorized_key: user=deploy
|
||||
key="{{ item }}"
|
||||
authorized_key: user=deploy key="{{ item }}"
|
||||
with_file:
|
||||
- public_keys/doe-jane
|
||||
- public_keys/doe-john
|
||||
|
||||
# Using key_options:
|
||||
- authorized_key: user=charlie
|
||||
key="{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
|
||||
key_options='no-port-forwarding,from="10.0.1.1"'
|
||||
- authorized_key:
|
||||
user: charlie
|
||||
key: "{{ lookup('file', '/home/charlie/.ssh/id_rsa.pub') }}"
|
||||
key_options: 'no-port-forwarding,from="10.0.1.1"'
|
||||
|
||||
# Using validate_certs:
|
||||
- authorized_key: user=charlie key=https://github.com/user.keys validate_certs=no
|
||||
|
||||
# Set up authorized_keys exclusively with one key
|
||||
- authorized_key: user=root key="{{ item }}" state=present
|
||||
exclusive=yes
|
||||
- authorized_key: user=root key="{{ item }}" state=present exclusive=yes
|
||||
with_file:
|
||||
- public_keys/doe-jane
|
||||
'''
|
||||
|
@ -358,6 +370,7 @@ def enforce_state(module, params):
|
|||
state = params.get("state", "present")
|
||||
key_options = params.get("key_options", None)
|
||||
exclusive = params.get("exclusive", False)
|
||||
validate_certs = params.get("validate_certs", True)
|
||||
error_msg = "Error getting key from: %s"
|
||||
|
||||
# if the key is a url, request it and use it as key source
|
||||
|
@ -460,6 +473,7 @@ def main():
|
|||
key_options = dict(required=False, type='str'),
|
||||
unique = dict(default=False, type='bool'),
|
||||
exclusive = dict(default=False, type='bool'),
|
||||
validate_certs = dict(default=True, type='bool'),
|
||||
),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
|
|
@ -42,6 +42,7 @@ EXAMPLES = '''
|
|||
- hostname: name=web01
|
||||
'''
|
||||
|
||||
import socket
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
# import module snippets
|
||||
|
@ -259,8 +260,8 @@ class SystemdStrategy(GenericStrategy):
|
|||
(rc, out, err))
|
||||
|
||||
def get_permanent_hostname(self):
|
||||
cmd = 'hostnamectl --static status'
|
||||
rc, out, err = self.module.run_command(cmd, use_unsafe_shell=True)
|
||||
cmd = ['hostnamectl', '--static', 'status']
|
||||
rc, out, err = self.module.run_command(cmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg="Command failed rc=%d, out=%s, err=%s" %
|
||||
(rc, out, err))
|
||||
|
@ -398,6 +399,57 @@ class SolarisStrategy(GenericStrategy):
|
|||
|
||||
# ===========================================
|
||||
|
||||
class FreeBSDStrategy(GenericStrategy):
|
||||
"""
|
||||
This is a FreeBSD hostname manipulation strategy class - it edits
|
||||
the /etc/rc.conf.d/hostname file.
|
||||
"""
|
||||
|
||||
HOSTNAME_FILE = '/etc/rc.conf.d/hostname'
|
||||
|
||||
def get_permanent_hostname(self):
|
||||
|
||||
if not os.path.isfile(self.HOSTNAME_FILE):
|
||||
try:
|
||||
open(self.HOSTNAME_FILE, "a").write("hostname=temporarystub\n")
|
||||
except IOError, err:
|
||||
self.module.fail_json(msg="failed to write file: %s" %
|
||||
str(err))
|
||||
try:
|
||||
try:
|
||||
f = open(self.HOSTNAME_FILE, 'r')
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line.startswith('hostname='):
|
||||
return line[10:].strip('"')
|
||||
except Exception, err:
|
||||
self.module.fail_json(msg="failed to read hostname: %s" % str(err))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
return None
|
||||
|
||||
def set_permanent_hostname(self, name):
|
||||
try:
|
||||
try:
|
||||
f = open(self.HOSTNAME_FILE, 'r')
|
||||
lines = [x.strip() for x in f]
|
||||
|
||||
for i, line in enumerate(lines):
|
||||
if line.startswith('hostname='):
|
||||
lines[i] = 'hostname="%s"' % name
|
||||
break
|
||||
f.close()
|
||||
|
||||
f = open(self.HOSTNAME_FILE, 'w')
|
||||
f.write('\n'.join(lines) + '\n')
|
||||
except Exception, err:
|
||||
self.module.fail_json(msg="failed to update hostname: %s" % str(err))
|
||||
finally:
|
||||
f.close()
|
||||
|
||||
# ===========================================
|
||||
|
||||
class FedoraHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Fedora'
|
||||
|
@ -540,6 +592,12 @@ class SolarisHostname(Hostname):
|
|||
distribution = None
|
||||
strategy_class = SolarisStrategy
|
||||
|
||||
class FreeBSDHostname(Hostname):
|
||||
platform = 'FreeBSD'
|
||||
distribution = None
|
||||
strategy_class = FreeBSDStrategy
|
||||
|
||||
|
||||
# ===========================================
|
||||
|
||||
def main():
|
||||
|
@ -563,6 +621,10 @@ def main():
|
|||
hostname.set_permanent_hostname(name)
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name))
|
||||
module.exit_json(changed=changed, name=name,
|
||||
ansible_facts=dict(ansible_hostname=name.split('.')[0],
|
||||
ansible_nodename=name,
|
||||
ansible_fqdn=socket.getfqdn(),
|
||||
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
|
||||
|
||||
main()
|
||||
|
|
|
@ -23,7 +23,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: ping
|
||||
version_added: historical
|
||||
short_description: Try to connect to host, veryify a usable python and return C(pong) on success.
|
||||
short_description: Try to connect to host, verify a usable python and return C(pong) on success.
|
||||
description:
|
||||
- A trivial test module, this module always returns C(pong) on successful
|
||||
contact. It does not make sense in playbooks, but it is useful from
|
||||
|
|
|
@ -395,7 +395,7 @@ class LinuxService(Service):
|
|||
location = dict()
|
||||
|
||||
for binary in binaries:
|
||||
location[binary] = self.module.get_bin_path(binary)
|
||||
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
|
||||
|
||||
for initdir in initpaths:
|
||||
initscript = "%s/%s" % (initdir,self.name)
|
||||
|
@ -403,10 +403,31 @@ class LinuxService(Service):
|
|||
self.svc_initscript = initscript
|
||||
|
||||
def check_systemd():
|
||||
return os.path.exists("/run/systemd/system/") or os.path.exists("/dev/.run/systemd/") or os.path.exists("/dev/.systemd/")
|
||||
|
||||
# tools must be installed
|
||||
if location.get('systemctl',False):
|
||||
|
||||
# this should show if systemd is the boot init system
|
||||
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
|
||||
if os.path.exists(canary):
|
||||
return True
|
||||
|
||||
# If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
except IOError:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Locate a tool to enable/disable a service
|
||||
if location.get('systemctl',False) and check_systemd():
|
||||
if check_systemd():
|
||||
# service is managed by systemd
|
||||
self.__systemd_unit = self.name
|
||||
self.svc_cmd = location['systemctl']
|
||||
|
@ -450,8 +471,7 @@ class LinuxService(Service):
|
|||
self.enable_cmd = location['chkconfig']
|
||||
|
||||
if self.enable_cmd is None:
|
||||
# exiting without change on non-existent service
|
||||
self.module.exit_json(changed=False, exists=False)
|
||||
self.module.fail_json(msg="no service or tool found for: %s" % self.name)
|
||||
|
||||
# If no service control tool selected yet, try to see if 'service' is available
|
||||
if self.svc_cmd is None and location.get('service', False):
|
||||
|
@ -459,7 +479,7 @@ class LinuxService(Service):
|
|||
|
||||
# couldn't find anything yet
|
||||
if self.svc_cmd is None and not self.svc_initscript:
|
||||
self.module.exit_json(changed=False, exists=False)
|
||||
self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
|
||||
|
||||
if location.get('initctl', False):
|
||||
self.svc_initctl = location['initctl']
|
||||
|
@ -684,7 +704,8 @@ class LinuxService(Service):
|
|||
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
|
||||
if not self.name in out:
|
||||
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
|
||||
state = out.split()[-1]
|
||||
#TODO: look back on why this is here
|
||||
#state = out.split()[-1]
|
||||
|
||||
# Check if we're already in the correct state
|
||||
if "3:%s" % action in out and "5:%s" % action in out:
|
||||
|
@ -946,7 +967,6 @@ class FreeBsdService(Service):
|
|||
self.rcconf_file = rcfile
|
||||
|
||||
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
|
||||
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
|
||||
try:
|
||||
rcvars = shlex.split(stdout, comments=True)
|
||||
except:
|
||||
|
|
|
@ -49,6 +49,11 @@ options:
|
|||
- Optionally when used with the -u option, this option allows to
|
||||
change the user ID to a non-unique value.
|
||||
version_added: "1.1"
|
||||
seuser:
|
||||
required: false
|
||||
description:
|
||||
- Optionally sets the seuser type (user_u) on selinux enabled systems.
|
||||
version_added: "2.1"
|
||||
group:
|
||||
required: false
|
||||
description:
|
||||
|
@ -253,6 +258,7 @@ class User(object):
|
|||
self.name = module.params['name']
|
||||
self.uid = module.params['uid']
|
||||
self.non_unique = module.params['non_unique']
|
||||
self.seuser = module.params['seuser']
|
||||
self.group = module.params['group']
|
||||
self.groups = module.params['groups']
|
||||
self.comment = module.params['comment']
|
||||
|
@ -313,6 +319,9 @@ class User(object):
|
|||
if self.non_unique:
|
||||
cmd.append('-o')
|
||||
|
||||
if self.seuser is not None:
|
||||
cmd.append('-Z')
|
||||
cmd.append(self.seuser)
|
||||
if self.group is not None:
|
||||
if not self.group_exists(self.group):
|
||||
self.module.fail_json(msg="Group %s does not exist" % self.group)
|
||||
|
@ -1674,9 +1683,10 @@ class DarwinUser(User):
|
|||
self._update_system_user()
|
||||
# here we don't care about change status since it is a creation,
|
||||
# thus changed is always true.
|
||||
(rc, _out, _err, changed) = self._modify_group()
|
||||
out += _out
|
||||
err += _err
|
||||
if self.groups:
|
||||
(rc, _out, _err, changed) = self._modify_group()
|
||||
out += _out
|
||||
err += _err
|
||||
return (rc, err, out)
|
||||
|
||||
def modify_user(self):
|
||||
|
@ -1684,7 +1694,8 @@ class DarwinUser(User):
|
|||
out = ''
|
||||
err = ''
|
||||
|
||||
self._make_group_numerical()
|
||||
if self.group:
|
||||
self._make_group_numerical()
|
||||
|
||||
for field in self.fields:
|
||||
if self.__dict__.has_key(field[0]) and self.__dict__[field[0]]:
|
||||
|
@ -1707,12 +1718,13 @@ class DarwinUser(User):
|
|||
err += _err
|
||||
changed = rc
|
||||
|
||||
(rc, _out, _err, _changed) = self._modify_group()
|
||||
out += _out
|
||||
err += _err
|
||||
if self.groups:
|
||||
(rc, _out, _err, _changed) = self._modify_group()
|
||||
out += _out
|
||||
err += _err
|
||||
|
||||
if _changed is True:
|
||||
changed = rc
|
||||
if _changed is True:
|
||||
changed = rc
|
||||
|
||||
rc = self._update_system_user()
|
||||
if rc == 0:
|
||||
|
@ -2047,6 +2059,8 @@ def main():
|
|||
shell=dict(default=None, type='str'),
|
||||
password=dict(default=None, type='str', no_log=True),
|
||||
login_class=dict(default=None, type='str'),
|
||||
# following options are specific to selinux
|
||||
seuser=dict(default=None, type='str'),
|
||||
# following options are specific to userdel
|
||||
force=dict(default='no', type='bool'),
|
||||
remove=dict(default='no', type='bool'),
|
||||
|
|
|
@ -27,15 +27,20 @@ import shlex
|
|||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import datetime
|
||||
import traceback
|
||||
import signal
|
||||
import time
|
||||
import syslog
|
||||
|
||||
|
||||
syslog.openlog('ansible-%s' % os.path.basename(__file__))
|
||||
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
|
||||
|
||||
def notice(msg):
|
||||
syslog.syslog(syslog.LOG_NOTICE, msg)
|
||||
|
||||
def daemonize_self():
|
||||
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
|
||||
# logger.info("cobblerd started")
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid > 0:
|
||||
|
@ -65,50 +70,21 @@ def daemonize_self():
|
|||
os.dup2(dev_null.fileno(), sys.stdout.fileno())
|
||||
os.dup2(dev_null.fileno(), sys.stderr.fileno())
|
||||
|
||||
if len(sys.argv) < 3:
|
||||
print json.dumps({
|
||||
"failed" : True,
|
||||
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
|
||||
})
|
||||
sys.exit(1)
|
||||
|
||||
jid = "%s.%d" % (sys.argv[1], os.getpid())
|
||||
time_limit = sys.argv[2]
|
||||
wrapped_module = sys.argv[3]
|
||||
argsfile = sys.argv[4]
|
||||
cmd = "%s %s" % (wrapped_module, argsfile)
|
||||
def _run_module(wrapped_cmd, jid, job_path):
|
||||
|
||||
syslog.openlog('ansible-%s' % os.path.basename(__file__))
|
||||
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
|
||||
|
||||
# setup logging directory
|
||||
logdir = os.path.expanduser("~/.ansible_async")
|
||||
log_path = os.path.join(logdir, jid)
|
||||
|
||||
if not os.path.exists(logdir):
|
||||
try:
|
||||
os.makedirs(logdir)
|
||||
except:
|
||||
print json.dumps({
|
||||
"failed" : 1,
|
||||
"msg" : "could not create: %s" % logdir
|
||||
})
|
||||
|
||||
def _run_command(wrapped_cmd, jid, log_path):
|
||||
|
||||
logfile = open(log_path, "w")
|
||||
logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
|
||||
logfile.close()
|
||||
logfile = open(log_path, "w")
|
||||
jobfile = open(job_path, "w")
|
||||
jobfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
|
||||
jobfile.close()
|
||||
jobfile = open(job_path, "w")
|
||||
result = {}
|
||||
|
||||
outdata = ''
|
||||
try:
|
||||
cmd = shlex.split(wrapped_cmd)
|
||||
script = subprocess.Popen(cmd, shell=False,
|
||||
stdin=None, stdout=logfile, stderr=logfile)
|
||||
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=jobfile, stderr=jobfile)
|
||||
script.communicate()
|
||||
outdata = file(log_path).read()
|
||||
outdata = file(job_path).read()
|
||||
result = json.loads(outdata)
|
||||
|
||||
except (OSError, IOError), e:
|
||||
|
@ -118,83 +94,109 @@ def _run_command(wrapped_cmd, jid, log_path):
|
|||
"msg": str(e),
|
||||
}
|
||||
result['ansible_job_id'] = jid
|
||||
logfile.write(json.dumps(result))
|
||||
jobfile.write(json.dumps(result))
|
||||
except:
|
||||
result = {
|
||||
"failed" : 1,
|
||||
"cmd" : wrapped_cmd,
|
||||
"data" : outdata, # temporary debug only
|
||||
"data" : outdata, # temporary notice only
|
||||
"msg" : traceback.format_exc()
|
||||
}
|
||||
result['ansible_job_id'] = jid
|
||||
logfile.write(json.dumps(result))
|
||||
logfile.close()
|
||||
jobfile.write(json.dumps(result))
|
||||
jobfile.close()
|
||||
|
||||
# immediately exit this process, leaving an orphaned process
|
||||
# running which immediately forks a supervisory timing process
|
||||
|
||||
#import logging
|
||||
#import logging.handlers
|
||||
####################
|
||||
## main ##
|
||||
####################
|
||||
if __name__ == '__main__':
|
||||
|
||||
#logger = logging.getLogger("ansible_async")
|
||||
#logger.setLevel(logging.WARNING)
|
||||
#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") )
|
||||
def debug(msg):
|
||||
#logger.warning(msg)
|
||||
pass
|
||||
if len(sys.argv) < 3:
|
||||
print json.dumps({
|
||||
"failed" : True,
|
||||
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
|
||||
})
|
||||
sys.exit(1)
|
||||
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
# Notify the overlord that the async process started
|
||||
jid = "%s.%d" % (sys.argv[1], os.getpid())
|
||||
time_limit = sys.argv[2]
|
||||
wrapped_module = sys.argv[3]
|
||||
argsfile = sys.argv[4]
|
||||
cmd = "%s %s" % (wrapped_module, argsfile)
|
||||
step = 5
|
||||
|
||||
# we need to not return immmediately such that the launched command has an attempt
|
||||
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
|
||||
# this probably could be done with some IPC later. Modules should always read
|
||||
# the argsfile at the very first start of their execution anyway
|
||||
time.sleep(1)
|
||||
debug("Return async_wrapper task started.")
|
||||
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path })
|
||||
sys.stdout.flush()
|
||||
sys.exit(0)
|
||||
else:
|
||||
# The actual wrapper process
|
||||
# setup job output directory
|
||||
jobdir = os.path.expanduser("~/.ansible_async")
|
||||
job_path = os.path.join(jobdir, jid)
|
||||
|
||||
# Daemonize, so we keep on running
|
||||
daemonize_self()
|
||||
if not os.path.exists(jobdir):
|
||||
try:
|
||||
os.makedirs(jobdir)
|
||||
except:
|
||||
print json.dumps({
|
||||
"failed" : 1,
|
||||
"msg" : "could not create: %s" % jobdir
|
||||
})
|
||||
# immediately exit this process, leaving an orphaned process
|
||||
# running which immediately forks a supervisory timing process
|
||||
|
||||
# we are now daemonized, create a supervisory process
|
||||
debug("Starting module and watcher")
|
||||
try:
|
||||
pid = os.fork()
|
||||
if pid:
|
||||
# Notify the overlord that the async process started
|
||||
|
||||
sub_pid = os.fork()
|
||||
if sub_pid:
|
||||
# the parent stops the process after the time limit
|
||||
remaining = int(time_limit)
|
||||
|
||||
# set the child process group id to kill all children
|
||||
os.setpgid(sub_pid, sub_pid)
|
||||
|
||||
debug("Start watching %s (%s)"%(sub_pid, remaining))
|
||||
time.sleep(5)
|
||||
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
|
||||
debug("%s still running (%s)"%(sub_pid, remaining))
|
||||
time.sleep(5)
|
||||
remaining = remaining - 5
|
||||
if remaining <= 0:
|
||||
debug("Now killing %s"%(sub_pid))
|
||||
os.killpg(sub_pid, signal.SIGKILL)
|
||||
debug("Sent kill to group %s"%sub_pid)
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
debug("Done in kid B.")
|
||||
os._exit(0)
|
||||
else:
|
||||
# the child process runs the actual module
|
||||
debug("Start module (%s)"%os.getpid())
|
||||
_run_command(cmd, jid, log_path)
|
||||
debug("Module complete (%s)"%os.getpid())
|
||||
# we need to not return immmediately such that the launched command has an attempt
|
||||
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
|
||||
# this probably could be done with some IPC later. Modules should always read
|
||||
# the argsfile at the very first start of their execution anyway
|
||||
notice("Return async_wrapper task started.")
|
||||
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : job_path })
|
||||
sys.stdout.flush()
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
else:
|
||||
# The actual wrapper process
|
||||
|
||||
except Exception, err:
|
||||
debug("error: %s"%(err))
|
||||
raise err
|
||||
# Daemonize, so we keep on running
|
||||
daemonize_self()
|
||||
|
||||
# we are now daemonized, create a supervisory process
|
||||
notice("Starting module and watcher")
|
||||
|
||||
sub_pid = os.fork()
|
||||
if sub_pid:
|
||||
# the parent stops the process after the time limit
|
||||
remaining = int(time_limit)
|
||||
|
||||
# set the child process group id to kill all children
|
||||
os.setpgid(sub_pid, sub_pid)
|
||||
|
||||
notice("Start watching %s (%s)"%(sub_pid, remaining))
|
||||
time.sleep(step)
|
||||
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
|
||||
notice("%s still running (%s)"%(sub_pid, remaining))
|
||||
time.sleep(step)
|
||||
remaining = remaining - step
|
||||
if remaining <= 0:
|
||||
notice("Now killing %s"%(sub_pid))
|
||||
os.killpg(sub_pid, signal.SIGKILL)
|
||||
notice("Sent kill to group %s"%sub_pid)
|
||||
time.sleep(1)
|
||||
sys.exit(0)
|
||||
notice("Done in kid B.")
|
||||
sys.exit(0)
|
||||
else:
|
||||
# the child process runs the actual module
|
||||
notice("Start module (%s)"%os.getpid())
|
||||
_run_module(cmd, jid, job_path)
|
||||
notice("Module complete (%s)"%os.getpid())
|
||||
sys.exit(0)
|
||||
|
||||
except Exception, err:
|
||||
notice("error: %s"%(err))
|
||||
print json.dumps({
|
||||
"failed" : True,
|
||||
"msg" : "FATAL ERROR: %s" % str(err)
|
||||
})
|
||||
sys.exit(1)
|
||||
|
|
|
@ -14,7 +14,7 @@ author: "Benno Joy (@bennojoy)"
|
|||
module: include_vars
|
||||
short_description: Load variables from files, dynamically within a task.
|
||||
description:
|
||||
- Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
|
||||
- Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
|
||||
options:
|
||||
free-form:
|
||||
description:
|
||||
|
|
|
@ -24,9 +24,8 @@ author: "Dag Wieers (@dagwieers)"
|
|||
module: set_fact
|
||||
short_description: Set host facts from a task
|
||||
description:
|
||||
- This module allows setting new variables. Variables are set on a host-by-host basis
|
||||
just like facts discovered by the setup module.
|
||||
- These variables will survive between plays.
|
||||
- This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
|
||||
- These variables will survive between plays during an Ansible run, but will not be saved across executions even if you use a fact cache.
|
||||
options:
|
||||
key_value:
|
||||
description:
|
||||
|
|
|
@ -18,12 +18,14 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import socket
|
||||
import datetime
|
||||
import time
|
||||
import sys
|
||||
import re
|
||||
import binascii
|
||||
import datetime
|
||||
import math
|
||||
import re
|
||||
import select
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
|
||||
HAS_PSUTIL = False
|
||||
try:
|
||||
|
@ -101,7 +103,7 @@ options:
|
|||
notes:
|
||||
- The ability to use search_regex with a port connection was added in 1.7.
|
||||
requirements: []
|
||||
author:
|
||||
author:
|
||||
- "Jeroen Hoekx (@jhoekx)"
|
||||
- "John Jarvis (@jarv)"
|
||||
- "Andrii Radyk (@AnderEnder)"
|
||||
|
@ -125,7 +127,7 @@ EXAMPLES = '''
|
|||
- wait_for: path=/tmp/foo search_regex=completed
|
||||
|
||||
# wait until the lock file is removed
|
||||
- wait_for: path=/var/lock/file.lock state=absent
|
||||
- wait_for: path=/var/lock/file.lock state=absent
|
||||
|
||||
# wait until the process is finished and pid was destroyed
|
||||
- wait_for: path=/proc/3466/status state=absent
|
||||
|
@ -320,6 +322,11 @@ def _create_connection( (host, port), connect_timeout):
|
|||
connect_socket = socket.create_connection( (host, port), connect_timeout)
|
||||
return connect_socket
|
||||
|
||||
def _timedelta_total_seconds(timedelta):
|
||||
return (
|
||||
timedelta.microseconds + 0.0 +
|
||||
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
|
@ -349,6 +356,10 @@ def main():
|
|||
state = params['state']
|
||||
path = params['path']
|
||||
search_regex = params['search_regex']
|
||||
if search_regex is not None:
|
||||
compiled_search_re = re.compile(search_regex, re.MULTILINE)
|
||||
else:
|
||||
compiled_search_re = None
|
||||
|
||||
if port and path:
|
||||
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
|
||||
|
@ -404,55 +415,72 @@ def main():
|
|||
if path:
|
||||
try:
|
||||
os.stat(path)
|
||||
if search_regex:
|
||||
try:
|
||||
f = open(path)
|
||||
try:
|
||||
if re.search(search_regex, f.read(), re.MULTILINE):
|
||||
break
|
||||
else:
|
||||
time.sleep(1)
|
||||
finally:
|
||||
f.close()
|
||||
except IOError:
|
||||
time.sleep(1)
|
||||
pass
|
||||
else:
|
||||
break
|
||||
except OSError, e:
|
||||
# File not present
|
||||
if e.errno == 2:
|
||||
time.sleep(1)
|
||||
else:
|
||||
# If anything except file not present, throw an error
|
||||
if e.errno != 2:
|
||||
elapsed = datetime.datetime.now() - start
|
||||
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
|
||||
# file doesn't exist yet, so continue
|
||||
else:
|
||||
# File exists. Are there additional things to check?
|
||||
if not compiled_search_re:
|
||||
# nope, succeed!
|
||||
break
|
||||
try:
|
||||
f = open(path)
|
||||
try:
|
||||
if re.search(compiled_search_re, f.read()):
|
||||
# String found, success!
|
||||
break
|
||||
finally:
|
||||
f.close()
|
||||
except IOError:
|
||||
pass
|
||||
elif port:
|
||||
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
|
||||
try:
|
||||
s = _create_connection( (host, port), connect_timeout)
|
||||
if search_regex:
|
||||
s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout))
|
||||
except:
|
||||
# Failed to connect by connect_timeout. wait and try again
|
||||
pass
|
||||
else:
|
||||
# Connected -- are there additional conditions?
|
||||
if compiled_search_re:
|
||||
data = ''
|
||||
matched = False
|
||||
while 1:
|
||||
data += s.recv(1024)
|
||||
if not data:
|
||||
while datetime.datetime.now() < end:
|
||||
max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
|
||||
(readable, w, e) = select.select([s], [], [], max_timeout)
|
||||
if not readable:
|
||||
# No new data. Probably means our timeout
|
||||
# expired
|
||||
continue
|
||||
response = s.recv(1024)
|
||||
if not response:
|
||||
# Server shutdown
|
||||
break
|
||||
elif re.search(search_regex, data, re.MULTILINE):
|
||||
data += response
|
||||
if re.search(compiled_search_re, data):
|
||||
matched = True
|
||||
break
|
||||
|
||||
# Shutdown the client socket
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
if matched:
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
# Found our string, success!
|
||||
break
|
||||
else:
|
||||
# Connection established, success!
|
||||
s.shutdown(socket.SHUT_RDWR)
|
||||
s.close()
|
||||
break
|
||||
except:
|
||||
time.sleep(1)
|
||||
pass
|
||||
else:
|
||||
time.sleep(1)
|
||||
else:
|
||||
|
||||
# Conditions not yet met, wait and try again
|
||||
time.sleep(1)
|
||||
|
||||
else: # while-else
|
||||
# Timeout expired
|
||||
elapsed = datetime.datetime.now() - start
|
||||
if port:
|
||||
if search_regex:
|
||||
|
@ -485,4 +513,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -69,7 +69,7 @@ notes:
|
|||
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
|
||||
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
|
||||
requires: [ passlib>=1.6 ]
|
||||
author: "Lorin Hochstein (@lorin)"
|
||||
author: "Ansible Core Team"
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
|
@ -97,6 +97,7 @@ else:
|
|||
|
||||
apache_hashes = ["apr_md5_crypt", "des_crypt", "ldap_sha1", "plaintext"]
|
||||
|
||||
|
||||
def create_missing_directories(dest):
|
||||
destpath = os.path.dirname(dest)
|
||||
if not os.path.exists(destpath):
|
||||
|
@ -155,9 +156,6 @@ def absent(dest, username, check_mode):
|
|||
""" Ensures user is absent
|
||||
|
||||
Returns (msg, changed) """
|
||||
if not os.path.exists(dest):
|
||||
raise ValueError("%s does not exists" % dest)
|
||||
|
||||
if StrictVersion(passlib.__version__) >= StrictVersion('1.6'):
|
||||
ht = HtpasswdFile(dest, new=False)
|
||||
else:
|
||||
|
@ -244,6 +242,9 @@ def main():
|
|||
if state == 'present':
|
||||
(msg, changed) = present(path, username, password, crypt_scheme, create, check_mode)
|
||||
elif state == 'absent':
|
||||
if not os.path.exists(path):
|
||||
module.exit_json(msg="%s not present" % username,
|
||||
warnings="%s does not exist" % path, changed=False)
|
||||
(msg, changed) = absent(path, username, check_mode)
|
||||
else:
|
||||
module.fail_json(msg="Invalid state: %s" % state)
|
||||
|
|
|
@ -26,11 +26,9 @@ $result = New-Object psobject @{
|
|||
};
|
||||
|
||||
$win32_os = Get-CimInstance Win32_OperatingSystem
|
||||
$win32_cs = Get-CimInstance Win32_ComputerSystem
|
||||
$osversion = [Environment]::OSVersion
|
||||
$memory = @()
|
||||
$memory += Get-WmiObject win32_Physicalmemory
|
||||
$capacity = 0
|
||||
$memory | foreach {$capacity += $_.Capacity}
|
||||
$capacity = $win32_cs.TotalPhysicalMemory # Win32_PhysicalMemory is empty on some virtual platforms
|
||||
$netcfg = Get-WmiObject win32_NetworkAdapterConfiguration
|
||||
|
||||
$ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null}
|
||||
|
@ -70,6 +68,7 @@ Set-Attr $date "year" (Get-Date -format yyyy)
|
|||
Set-Attr $date "month" (Get-Date -format MM)
|
||||
Set-Attr $date "day" (Get-Date -format dd)
|
||||
Set-Attr $date "hour" (Get-Date -format HH)
|
||||
Set-Attr $date "minute" (Get-Date -format mm)
|
||||
Set-Attr $date "iso8601" (Get-Date -format s)
|
||||
Set-Attr $result.ansible_facts "ansible_date_time" $date
|
||||
|
||||
|
|
10
windows/win_copy.py
Normal file → Executable file
10
windows/win_copy.py
Normal file → Executable file
|
@ -44,16 +44,6 @@ options:
|
|||
required: true
|
||||
default: null
|
||||
author: "Jon Hawkesworth (@jhawkesworth)"
|
||||
notes:
|
||||
- The "win_copy" module is best used for small files only.
|
||||
This module should **not** be used for files bigger than 3Mb as
|
||||
this will result in a 500 response from the winrm host
|
||||
and it will not be possible to connect via winrm again until the
|
||||
windows remote management service has been restarted on the
|
||||
windows host.
|
||||
Files larger than 1Mb will take minutes to transfer.
|
||||
The recommended way to transfer large files is using win_get_url
|
||||
or collecting from a windows file share folder.
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
|
|
@ -17,6 +17,8 @@
|
|||
# WANT_JSON
|
||||
# POWERSHELL_COMMON
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
$params = Parse-Args $args
|
||||
|
||||
# path
|
||||
|
|
|
@ -44,6 +44,10 @@ $skip_certificate_validation = Get-Attr $params "skip_certificate_validation" $f
|
|||
$username = Get-Attr $params "username"
|
||||
$password = Get-Attr $params "password"
|
||||
|
||||
$proxy_url = Get-Attr $params "proxy_url"
|
||||
$proxy_username = Get-Attr $params "proxy_username"
|
||||
$proxy_password = Get-Attr $params "proxy_password"
|
||||
|
||||
if($skip_certificate_validation){
|
||||
[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}
|
||||
}
|
||||
|
@ -52,6 +56,14 @@ $force = Get-Attr -obj $params -name "force" "yes" | ConvertTo-Bool
|
|||
|
||||
If ($force -or -not (Test-Path $dest)) {
|
||||
$client = New-Object System.Net.WebClient
|
||||
if($proxy_url) {
|
||||
$proxy_server = New-Object System.Net.WebProxy($proxy_url, $true)
|
||||
if($proxy_username -and $proxy_password){
|
||||
$proxy_credential = New-Object System.Net.NetworkCredential($proxy_username, $proxy_password)
|
||||
$proxy_server.Credentials = $proxy_credential
|
||||
}
|
||||
$client.Proxy = $proxy_server
|
||||
}
|
||||
|
||||
if($username -and $password){
|
||||
$client.Credentials = New-Object System.Net.NetworkCredential($username, $password)
|
||||
|
|
|
@ -65,6 +65,24 @@ options:
|
|||
- Skip SSL certificate validation if true
|
||||
required: false
|
||||
default: false
|
||||
proxy_url:
|
||||
description:
|
||||
- The full URL of the proxy server to download through.
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
proxy_username:
|
||||
description:
|
||||
- Proxy authentication username
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
proxy_password:
|
||||
description:
|
||||
- Proxy authentication password
|
||||
version_added: "2.0"
|
||||
required: false
|
||||
author:
|
||||
- "Paul Durivage (@angstwad)"
|
||||
- "Takeshi Kuramochi (tksarah)"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
|
@ -83,4 +101,12 @@ $ ansible -i hosts -c winrm -m win_get_url -a "url=http://www.example.com/earthr
|
|||
url: 'http://www.example.com/earthrise.jpg'
|
||||
dest: 'C:\Users\RandomUser\earthrise.jpg'
|
||||
force: no
|
||||
|
||||
- name: Download earthrise.jpg to 'C:\Users\RandomUser\earthrise.jpg' through a proxy server.
|
||||
win_get_url:
|
||||
url: 'http://www.example.com/earthrise.jpg'
|
||||
dest: 'C:\Users\RandomUser\earthrise.jpg'
|
||||
proxy_url: 'http://10.0.0.1:8080'
|
||||
proxy_username: 'username'
|
||||
proxy_password: 'password'
|
||||
'''
|
||||
|
|
|
@ -34,6 +34,10 @@ options:
|
|||
description:
|
||||
- File system path to the MSI file to install
|
||||
required: true
|
||||
extra_args:
|
||||
description:
|
||||
- Additional arguments to pass to the msiexec.exe command
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- Whether the MSI file should be installed or uninstalled
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: win_template
|
||||
version_added: "1.9.2"
|
||||
|
@ -47,8 +47,8 @@ notes:
|
|||
- "templates are loaded with C(trim_blocks=True)."
|
||||
- By default, windows line endings are not created in the generated file.
|
||||
- "In order to ensure windows line endings are in the generated file, add the following header
|
||||
as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line
|
||||
of the template ends with \r\n"
|
||||
as the first line of your template: #jinja2: newline_sequence:'\\\\r\\\\n' and ensure each line
|
||||
of the template ends with \\\\r\\\\n"
|
||||
- Beware fetching files from windows machines when creating templates
|
||||
because certain tools, such as Powershell ISE, and regedit's export facility
|
||||
add a Byte Order Mark as the first character of the file, which can cause tracebacks.
|
||||
|
|
Loading…
Reference in a new issue