Merge branch 'devel' into feature/add_ec2_elb_lb_idle_timeout
* devel: (84 commits) Document and return an error if httplib2 >= 0.7 is not present. We since find doesn't make changes, support check mode and gather data for other tasks in check mode Correct typo in yum module docs Update doc to reflect password is required if adding a new user Update error message to be more explicit Simplify logic to handle options set to empty string Fix to issue 12912. Supply 'force' to install of python-apt. Note the difference between yum package groups and environment groups. rearranged systemd check, removed redundant systemctl check fixed unused cmd and state var assignements added earlier paths to systemd make os_router return a top level 'id' key Version bump for new beta 2.0.0-0.4.beta2 allow os_port to accept a list of security groups allow os_server to accept a list of security groups Add capability for stat module to use more hash algorithms allow empty description attribute for os_security_group Update hostname.py simpler way to check if systemd is the init system make os_keypair return a top level 'id' key make os_flavor return a top-level 'id' key ...
This commit is contained in:
commit
d4319555a0
52 changed files with 1902 additions and 457 deletions
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.0.0-0.3.beta1
|
||||
2.0.0-0.4.beta2
|
||||
|
|
|
@ -1226,8 +1226,12 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
|
|||
|
||||
wait = module.params.get('wait')
|
||||
wait_timeout = int(module.params.get('wait_timeout'))
|
||||
source_dest_check = module.params.get('source_dest_check')
|
||||
termination_protection = module.params.get('termination_protection')
|
||||
changed = False
|
||||
instance_dict_array = []
|
||||
source_dest_check = module.params.get('source_dest_check')
|
||||
termination_protection = module.params.get('termination_protection')
|
||||
|
||||
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
|
||||
# Fail unless the user defined instance tags
|
||||
|
|
|
@ -163,14 +163,133 @@ EXAMPLES = '''
|
|||
wait: yes
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
ami_id:
|
||||
description: id of found amazon image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "ami-e9095e8c"
|
||||
architecture:
|
||||
description: architecture of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "x86_64"
|
||||
architecture:
|
||||
description: architecture of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "x86_64"
|
||||
block_device_mapping:
|
||||
description: block device mapping associated with image
|
||||
returned: when AMI found
|
||||
type: dictionary of block devices
|
||||
sample: "{
|
||||
'/dev/xvda': {
|
||||
'delete_on_termination': true,
|
||||
'encrypted': false,
|
||||
'size': 8,
|
||||
'snapshot_id': 'snap-ca0330b8',
|
||||
'volume_type': 'gp2'
|
||||
}"
|
||||
creationDate:
|
||||
description: creation date of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "2015-10-15T22:43:44.000Z"
|
||||
description:
|
||||
description: description of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "test-server01"
|
||||
hypervisor:
|
||||
description: type of hypervisor
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "xen"
|
||||
is_public:
|
||||
description: whether image is public
|
||||
returned: when AMI found
|
||||
type: bool
|
||||
sample: false
|
||||
location:
|
||||
description: location of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "435210894375/test-server01-20151015-234343"
|
||||
name:
|
||||
description: ami name of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "test-server01-20151015-234343"
|
||||
owner_id:
|
||||
description: owner of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "435210894375"
|
||||
platform:
|
||||
description: plaform of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: null
|
||||
root_device_name:
|
||||
description: rood device name of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "/dev/xvda"
|
||||
root_device_type:
|
||||
description: rood device type of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "ebs"
|
||||
state:
|
||||
description: state of image
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "available"
|
||||
tags:
|
||||
description: tags assigned to image
|
||||
returned: when AMI found
|
||||
type: dictionary of tags
|
||||
sample: "{
|
||||
'Environment': 'devel',
|
||||
'Name': 'test-server01',
|
||||
'Role': 'web'
|
||||
}"
|
||||
virtualization_type:
|
||||
description: image virtualization type
|
||||
returned: when AMI found
|
||||
type: string
|
||||
sample: "hvm"
|
||||
'''
|
||||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
|
||||
HAS_BOTO=True
|
||||
except ImportError:
|
||||
HAS_BOTO=False
|
||||
|
||||
import json
|
||||
|
||||
def get_block_device_mapping(image):
|
||||
"""
|
||||
Retrieves block device mapping from AMI
|
||||
"""
|
||||
|
||||
bdm_dict = dict()
|
||||
bdm = getattr(image,'block_device_mapping')
|
||||
for device_name in bdm.keys():
|
||||
bdm_dict[device_name] = {
|
||||
'size': bdm[device_name].size,
|
||||
'snapshot_id': bdm[device_name].snapshot_id,
|
||||
'volume_type': bdm[device_name].volume_type,
|
||||
'encrypted': bdm[device_name].encrypted,
|
||||
'delete_on_termination': bdm[device_name].delete_on_termination
|
||||
}
|
||||
|
||||
return bdm_dict
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
|
@ -255,8 +374,12 @@ def main():
|
|||
data = {
|
||||
'ami_id': image.id,
|
||||
'architecture': image.architecture,
|
||||
'block_device_mapping': get_block_device_mapping(image),
|
||||
'creationDate': image.creationDate,
|
||||
'description': image.description,
|
||||
'hypervisor': image.hypervisor,
|
||||
'is_public': image.is_public,
|
||||
'location': image.location,
|
||||
'name': image.name,
|
||||
'owner_id': image.owner_id,
|
||||
'platform': image.platform,
|
||||
|
@ -299,4 +422,3 @@ from ansible.module_utils.ec2 import *
|
|||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
||||
|
|
|
@ -255,9 +255,10 @@ def get_properties(autoscaling_group):
|
|||
properties['viable_instances'] = 0
|
||||
properties['terminating_instances'] = 0
|
||||
|
||||
instance_facts = {}
|
||||
|
||||
if autoscaling_group.instances:
|
||||
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
|
||||
instance_facts = {}
|
||||
for i in autoscaling_group.instances:
|
||||
instance_facts[i.instance_id] = {'health_status': i.health_status,
|
||||
'lifecycle_state': i.lifecycle_state,
|
||||
|
@ -274,7 +275,7 @@ def get_properties(autoscaling_group):
|
|||
properties['terminating_instances'] += 1
|
||||
if i.lifecycle_state == 'Pending':
|
||||
properties['pending_instances'] += 1
|
||||
properties['instance_facts'] = instance_facts
|
||||
properties['instance_facts'] = instance_facts
|
||||
properties['load_balancers'] = autoscaling_group.load_balancers
|
||||
|
||||
if getattr(autoscaling_group, "tags", None):
|
||||
|
|
|
@ -82,7 +82,7 @@ pre_tasks:
|
|||
local_action:
|
||||
module: ec2_elb
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
state: 'absent'
|
||||
state: absent
|
||||
roles:
|
||||
- myrole
|
||||
post_tasks:
|
||||
|
@ -91,7 +91,7 @@ post_tasks:
|
|||
module: ec2_elb
|
||||
instance_id: "{{ ansible_ec2_instance_id }}"
|
||||
ec2_elbs: "{{ item }}"
|
||||
state: 'present'
|
||||
state: present
|
||||
with_items: ec2_elbs
|
||||
"""
|
||||
|
||||
|
|
|
@ -29,6 +29,7 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Create or destroy the ELB
|
||||
choices: ["present", "absent"]
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
|
@ -69,6 +70,12 @@ options:
|
|||
- An associative array of health check configuration settings (see example)
|
||||
require: false
|
||||
default: None
|
||||
access_logs:
|
||||
description:
|
||||
- An associative array of access logs configuration settings (see example)
|
||||
require: false
|
||||
default: None
|
||||
version_added: "2.0"
|
||||
subnets:
|
||||
description:
|
||||
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
|
||||
|
@ -165,7 +172,7 @@ EXAMPLES = """
|
|||
load_balancer_port: 80
|
||||
instance_port: 80
|
||||
|
||||
# Configure a health check
|
||||
# Configure a health check and the access logs
|
||||
- local_action:
|
||||
module: ec2_elb_lb
|
||||
name: "test-please-delete"
|
||||
|
@ -184,6 +191,10 @@ EXAMPLES = """
|
|||
interval: 30 # seconds
|
||||
unhealthy_threshold: 2
|
||||
healthy_threshold: 10
|
||||
access_logs:
|
||||
interval: 5 # minutes (defaults to 60)
|
||||
s3_location: "my-bucket" # This value is required if access_logs is set
|
||||
s3_prefix: "logs"
|
||||
|
||||
# Ensure ELB is gone
|
||||
- local_action:
|
||||
|
@ -311,7 +322,8 @@ class ElbManager(object):
|
|||
zones=None, purge_zones=None, security_group_ids=None,
|
||||
health_check=None, subnets=None, purge_subnets=None,
|
||||
scheme="internet-facing", connection_draining_timeout=None,
|
||||
idle_timeout=None, cross_az_load_balancing=None,
|
||||
idle_timeout=None,
|
||||
cross_az_load_balancing=None, access_logs=None,
|
||||
stickiness=None, region=None, **aws_connect_params):
|
||||
|
||||
self.module = module
|
||||
|
@ -328,6 +340,7 @@ class ElbManager(object):
|
|||
self.connection_draining_timeout = connection_draining_timeout
|
||||
self.idle_timeout = idle_timeout
|
||||
self.cross_az_load_balancing = cross_az_load_balancing
|
||||
self.access_logs = access_logs
|
||||
self.stickiness = stickiness
|
||||
|
||||
self.aws_connect_params = aws_connect_params
|
||||
|
@ -358,6 +371,8 @@ class ElbManager(object):
|
|||
self._set_idle_timeout()
|
||||
if self._check_attribute_support('cross_zone_load_balancing'):
|
||||
self._set_cross_az_load_balancing()
|
||||
if self._check_attribute_support('access_log'):
|
||||
self._set_access_log()
|
||||
# add sitcky options
|
||||
self.select_stickiness_policy()
|
||||
|
||||
|
@ -707,6 +722,32 @@ class ElbManager(object):
|
|||
self.elb_conn.modify_lb_attribute(self.name, 'CrossZoneLoadBalancing',
|
||||
attributes.cross_zone_load_balancing.enabled)
|
||||
|
||||
def _set_access_log(self):
|
||||
attributes = self.elb.get_attributes()
|
||||
if self.access_logs:
|
||||
if 's3_location' not in self.access_logs:
|
||||
self.module.fail_json(msg='s3_location information required')
|
||||
|
||||
access_logs_config = {
|
||||
"enabled": True,
|
||||
"s3_bucket_name": self.access_logs['s3_location'],
|
||||
"s3_bucket_prefix": self.access_logs.get('s3_prefix', ''),
|
||||
"emit_interval": self.access_logs.get('interval', 60),
|
||||
}
|
||||
|
||||
update_access_logs_config = False
|
||||
for attr, desired_value in access_logs_config.iteritems():
|
||||
if getattr(attributes.access_log, attr) != desired_value:
|
||||
setattr(attributes.access_log, attr, desired_value)
|
||||
update_access_logs_config = True
|
||||
if update_access_logs_config:
|
||||
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
|
||||
self.changed = True
|
||||
elif attributes.access_log.enabled:
|
||||
attributes.access_log.enabled = False
|
||||
self.changed = True
|
||||
self.elb_conn.modify_lb_attribute(self.name, 'AccessLog', attributes.access_log)
|
||||
|
||||
def _set_connection_draining_timeout(self):
|
||||
attributes = self.elb.get_attributes()
|
||||
if self.connection_draining_timeout is not None:
|
||||
|
@ -849,7 +890,8 @@ def main():
|
|||
connection_draining_timeout={'default': None, 'required': False},
|
||||
idle_timeout={'default': None, 'required': False},
|
||||
cross_az_load_balancing={'default': None, 'required': False},
|
||||
stickiness={'default': None, 'required': False, 'type': 'dict'}
|
||||
stickiness={'default': None, 'required': False, 'type': 'dict'},
|
||||
access_logs={'default': None, 'required': False, 'type': 'dict'}
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -874,6 +916,7 @@ def main():
|
|||
security_group_ids = module.params['security_group_ids']
|
||||
security_group_names = module.params['security_group_names']
|
||||
health_check = module.params['health_check']
|
||||
access_logs = module.params['access_logs']
|
||||
subnets = module.params['subnets']
|
||||
purge_subnets = module.params['purge_subnets']
|
||||
scheme = module.params['scheme']
|
||||
|
@ -907,7 +950,8 @@ def main():
|
|||
purge_zones, security_group_ids, health_check,
|
||||
subnets, purge_subnets, scheme,
|
||||
connection_draining_timeout, idle_timeout,
|
||||
cross_az_load_balancing, stickiness,
|
||||
cross_az_load_balancing,
|
||||
access_logs, stickiness,
|
||||
region=region, **aws_connect_params)
|
||||
|
||||
# check for unsupported attributes for this version of boto
|
||||
|
|
|
@ -259,7 +259,6 @@ def main():
|
|||
insufficient_data_actions=dict(type='list'),
|
||||
ok_actions=dict(type='list'),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
region=dict(aliases=['aws_region', 'ec2_region']),
|
||||
)
|
||||
)
|
||||
|
||||
|
@ -271,10 +270,14 @@ def main():
|
|||
state = module.params.get('state')
|
||||
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if region:
|
||||
try:
|
||||
connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
if state == 'present':
|
||||
create_metric_alarm(connection, module)
|
||||
|
|
|
@ -27,41 +27,35 @@ options:
|
|||
- instance ID if you wish to attach the volume. Since 1.9 you can set to None to detach.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
name:
|
||||
description:
|
||||
- volume Name tag if you wish to attach an existing volume (requires instance)
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
id:
|
||||
description:
|
||||
- volume id if you wish to attach an existing volume (requires instance) or remove an existing volume
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
version_added: "1.6"
|
||||
volume_size:
|
||||
description:
|
||||
- size of volume (in GB) to create.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
volume_type:
|
||||
description:
|
||||
- Type of EBS volume; standard (magnetic), gp2 (SSD), io1 (Provisioned IOPS). "Standard" is the old EBS default
|
||||
and continues to remain the Ansible default for backwards compatibility.
|
||||
required: false
|
||||
default: standard
|
||||
aliases: []
|
||||
version_added: "1.9"
|
||||
iops:
|
||||
description:
|
||||
- the provisioned IOPs you want to associate with this volume (integer).
|
||||
required: false
|
||||
default: 100
|
||||
aliases: []
|
||||
version_added: "1.3"
|
||||
encrypted:
|
||||
description:
|
||||
|
@ -73,7 +67,6 @@ options:
|
|||
- device id to override device mapping. Assumes /dev/sdf for Linux/UNIX and /dev/xvdf for Windows.
|
||||
required: false
|
||||
default: null
|
||||
aliases: []
|
||||
zone:
|
||||
description:
|
||||
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
|
||||
|
@ -92,7 +85,6 @@ options:
|
|||
required: false
|
||||
default: "yes"
|
||||
choices: ["yes", "no"]
|
||||
aliases: []
|
||||
version_added: "1.5"
|
||||
state:
|
||||
description:
|
||||
|
@ -118,7 +110,7 @@ EXAMPLES = '''
|
|||
- ec2_vol:
|
||||
instance: XXXXXX
|
||||
volume_size: 5
|
||||
iops: 200
|
||||
iops: 100
|
||||
device_name: sdd
|
||||
|
||||
# Example using snapshot id
|
||||
|
@ -189,6 +181,7 @@ from distutils.version import LooseVersion
|
|||
|
||||
try:
|
||||
import boto.ec2
|
||||
from boto.exception import BotoServerError
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
@ -200,6 +193,11 @@ def get_volume(module, ec2):
|
|||
zone = module.params.get('zone')
|
||||
filters = {}
|
||||
volume_ids = None
|
||||
|
||||
# If no name or id supplied, just try volume creation based on module parameters
|
||||
if id is None and name is None:
|
||||
return None
|
||||
|
||||
if zone:
|
||||
filters['availability_zone'] = zone
|
||||
if name:
|
||||
|
@ -219,18 +217,20 @@ def get_volume(module, ec2):
|
|||
module.fail_json(msg=msg)
|
||||
else:
|
||||
return None
|
||||
|
||||
if len(vols) > 1:
|
||||
module.fail_json(msg="Found more than one volume in zone (if specified) with name: %s" % name)
|
||||
return vols[0]
|
||||
|
||||
def get_volumes(module, ec2):
|
||||
|
||||
instance = module.params.get('instance')
|
||||
|
||||
if not instance:
|
||||
module.fail_json(msg = "Instance must be specified to get volumes")
|
||||
|
||||
try:
|
||||
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
|
||||
if not instance:
|
||||
vols = ec2.get_all_volumes()
|
||||
else:
|
||||
vols = ec2.get_all_volumes(filters={'attachment.instance-id': instance})
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
return vols
|
||||
|
@ -254,7 +254,9 @@ def boto_supports_volume_encryption():
|
|||
"""
|
||||
return hasattr(boto, 'Version') and LooseVersion(boto.Version) >= LooseVersion('2.29.0')
|
||||
|
||||
|
||||
def create_volume(module, ec2, zone):
|
||||
changed = False
|
||||
name = module.params.get('name')
|
||||
id = module.params.get('id')
|
||||
instance = module.params.get('instance')
|
||||
|
@ -267,30 +269,15 @@ def create_volume(module, ec2, zone):
|
|||
if iops:
|
||||
volume_type = 'io1'
|
||||
|
||||
if instance == 'None' or instance == '':
|
||||
instance = None
|
||||
|
||||
volume = get_volume(module, ec2)
|
||||
if volume:
|
||||
if volume.attachment_state() is not None:
|
||||
if instance is None:
|
||||
return volume
|
||||
adata = volume.attach_data
|
||||
if adata.instance_id != instance:
|
||||
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
|
||||
% (name or id, adata.instance_id))
|
||||
else:
|
||||
module.exit_json(msg="Volume %s is already mapped on instance %s: %s" %
|
||||
(name or id, adata.instance_id, adata.device),
|
||||
volume_id=id,
|
||||
device=adata.device,
|
||||
changed=False)
|
||||
else:
|
||||
if volume is None:
|
||||
try:
|
||||
if boto_supports_volume_encryption():
|
||||
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops, encrypted)
|
||||
changed = True
|
||||
else:
|
||||
volume = ec2.create_volume(volume_size, zone, snapshot, volume_type, iops)
|
||||
changed = True
|
||||
|
||||
while volume.status != 'available':
|
||||
time.sleep(3)
|
||||
|
@ -301,52 +288,89 @@ def create_volume(module, ec2, zone):
|
|||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
return volume
|
||||
return volume, changed
|
||||
|
||||
|
||||
def attach_volume(module, ec2, volume, instance):
|
||||
|
||||
device_name = module.params.get('device_name')
|
||||
|
||||
if device_name and instance:
|
||||
try:
|
||||
attach = volume.attach(instance.id, device_name)
|
||||
while volume.attachment_state() != 'attached':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
changed = False
|
||||
|
||||
# If device_name isn't set, make a choice based on best practices here:
|
||||
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
|
||||
|
||||
|
||||
# In future this needs to be more dynamic but combining block device mapping best practices
|
||||
# (bounds for devices, as above) with instance.block_device_mapping data would be tricky. For me ;)
|
||||
|
||||
|
||||
# Use password data attribute to tell whether the instance is Windows or Linux
|
||||
if device_name is None and instance:
|
||||
if device_name is None:
|
||||
try:
|
||||
if not ec2.get_password_data(instance.id):
|
||||
device_name = '/dev/sdf'
|
||||
attach = volume.attach(instance.id, device_name)
|
||||
while volume.attachment_state() != 'attached':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
else:
|
||||
device_name = '/dev/xvdf'
|
||||
attach = volume.attach(instance.id, device_name)
|
||||
while volume.attachment_state() != 'attached':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
if volume.attachment_state() is not None:
|
||||
adata = volume.attach_data
|
||||
if adata.instance_id != instance.id:
|
||||
module.fail_json(msg = "Volume %s is already attached to another instance: %s"
|
||||
% (volume.id, adata.instance_id))
|
||||
else:
|
||||
try:
|
||||
volume.attach(instance.id, device_name)
|
||||
while volume.attachment_state() != 'attached':
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
changed = True
|
||||
except boto.exception.BotoServerError, e:
|
||||
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
|
||||
|
||||
def detach_volume(module, ec2):
|
||||
vol = get_volume(module, ec2)
|
||||
if not vol or vol.attachment_state() is None:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
vol.detach()
|
||||
module.exit_json(changed=True)
|
||||
return volume, changed
|
||||
|
||||
def detach_volume(module, ec2, volume):
|
||||
|
||||
changed = False
|
||||
|
||||
if volume.attachment_state() is not None:
|
||||
adata = volume.attach_data
|
||||
volume.detach()
|
||||
while volume.attachment_state() is not None:
|
||||
time.sleep(3)
|
||||
volume.update()
|
||||
changed = True
|
||||
|
||||
return volume, changed
|
||||
|
||||
def get_volume_info(volume, state):
|
||||
|
||||
# If we're just listing volumes then do nothing, else get the latest update for the volume
|
||||
if state != 'list':
|
||||
volume.update()
|
||||
|
||||
volume_info = {}
|
||||
attachment = volume.attach_data
|
||||
|
||||
volume_info = {
|
||||
'create_time': volume.create_time,
|
||||
'id': volume.id,
|
||||
'iops': volume.iops,
|
||||
'size': volume.size,
|
||||
'snapshot_id': volume.snapshot_id,
|
||||
'status': volume.status,
|
||||
'type': volume.type,
|
||||
'zone': volume.zone,
|
||||
'attachment_set': {
|
||||
'attach_time': attachment.attach_time,
|
||||
'device': attachment.device,
|
||||
'instance_id': attachment.instance_id,
|
||||
'status': attachment.status
|
||||
},
|
||||
'tags': volume.tags
|
||||
}
|
||||
|
||||
return volume_info
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
|
@ -380,11 +404,30 @@ def main():
|
|||
zone = module.params.get('zone')
|
||||
snapshot = module.params.get('snapshot')
|
||||
state = module.params.get('state')
|
||||
|
||||
|
||||
# Ensure we have the zone or can get the zone
|
||||
if instance is None and zone is None and state == 'present':
|
||||
module.fail_json(msg="You must specify either instance or zone")
|
||||
|
||||
# Set volume detach flag
|
||||
if instance == 'None' or instance == '':
|
||||
instance = None
|
||||
detach_vol_flag = True
|
||||
else:
|
||||
detach_vol_flag = False
|
||||
|
||||
# Set changed flag
|
||||
changed = False
|
||||
|
||||
ec2 = ec2_connect(module)
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||
|
||||
if region:
|
||||
try:
|
||||
ec2 = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
|
||||
if state == 'list':
|
||||
returned_volumes = []
|
||||
|
@ -393,21 +436,7 @@ def main():
|
|||
for v in vols:
|
||||
attachment = v.attach_data
|
||||
|
||||
returned_volumes.append({
|
||||
'create_time': v.create_time,
|
||||
'id': v.id,
|
||||
'iops': v.iops,
|
||||
'size': v.size,
|
||||
'snapshot_id': v.snapshot_id,
|
||||
'status': v.status,
|
||||
'type': v.type,
|
||||
'zone': v.zone,
|
||||
'attachment_set': {
|
||||
'attach_time': attachment.attach_time,
|
||||
'device': attachment.device,
|
||||
'status': attachment.status
|
||||
}
|
||||
})
|
||||
returned_volumes.append(get_volume_info(v, state))
|
||||
|
||||
module.exit_json(changed=False, volumes=returned_volumes)
|
||||
|
||||
|
@ -418,8 +447,12 @@ def main():
|
|||
# instance is specified but zone isn't.
|
||||
# Useful for playbooks chaining instance launch with volume create + attach and where the
|
||||
# zone doesn't matter to the user.
|
||||
inst = None
|
||||
if instance:
|
||||
reservation = ec2.get_all_instances(instance_ids=instance)
|
||||
try:
|
||||
reservation = ec2.get_all_instances(instance_ids=instance)
|
||||
except BotoServerError as e:
|
||||
module.fail_json(msg=e.message)
|
||||
inst = reservation[0].instances[0]
|
||||
zone = inst.placement
|
||||
|
||||
|
@ -438,17 +471,19 @@ def main():
|
|||
|
||||
if volume_size and (id or snapshot):
|
||||
module.fail_json(msg="Cannot specify volume_size together with id or snapshot")
|
||||
|
||||
if state == 'absent':
|
||||
delete_volume(module, ec2)
|
||||
|
||||
|
||||
if state == 'present':
|
||||
volume = create_volume(module, ec2, zone)
|
||||
if instance:
|
||||
attach_volume(module, ec2, volume, inst)
|
||||
else:
|
||||
detach_volume(module, ec2)
|
||||
module.exit_json(volume_id=volume.id, device=device_name, volume_type=volume.type)
|
||||
volume, changed = create_volume(module, ec2, zone)
|
||||
if detach_vol_flag:
|
||||
volume, changed = detach_volume(module, ec2, volume)
|
||||
elif inst is not None:
|
||||
volume, changed = attach_volume(module, ec2, volume, inst)
|
||||
|
||||
# Add device, volume_id and volume_type parameters separately to maintain backward compatability
|
||||
volume_info = get_volume_info(volume, state)
|
||||
module.exit_json(changed=changed, volume=volume_info, device=volume_info['attachment_set']['device'], volume_id=volume_info['id'], volume_type=volume_info['type'])
|
||||
elif state == 'absent':
|
||||
delete_volume(module, ec2)
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
|
|
@ -565,7 +565,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -107,6 +107,7 @@ import sys
|
|||
try:
|
||||
import boto
|
||||
import boto.iam
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
@ -246,7 +247,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -307,7 +307,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ description:
|
|||
options:
|
||||
command:
|
||||
description:
|
||||
- Specifies the action to take.
|
||||
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
|
||||
required: true
|
||||
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
|
||||
instance_name:
|
||||
|
|
|
@ -35,7 +35,7 @@ options:
|
|||
default: null
|
||||
aliases: ['ec2_secret_key', 'secret_key']
|
||||
bucket:
|
||||
description:
|
||||
description:
|
||||
- Bucket name.
|
||||
required: true
|
||||
default: null
|
||||
|
@ -131,12 +131,12 @@ options:
|
|||
default: 0
|
||||
version_added: "2.0"
|
||||
s3_url:
|
||||
description:
|
||||
description:
|
||||
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||
default: null
|
||||
aliases: [ S3_URL ]
|
||||
src:
|
||||
description:
|
||||
description:
|
||||
- The source file path when performing a PUT operation.
|
||||
required: false
|
||||
default: null
|
||||
|
@ -416,17 +416,11 @@ def main():
|
|||
if acl not in CannedACLStrings:
|
||||
module.fail_json(msg='Unknown permission specified: %s' % str(acl))
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
overwrite='never'
|
||||
|
||||
if overwrite not in ['always', 'never', 'different']:
|
||||
if module.boolean(overwrite):
|
||||
overwrite = 'always'
|
||||
else:
|
||||
overwrite='never'
|
||||
overwrite = 'never'
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
|
|
|
@ -249,22 +249,29 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
|
|||
'Standard_G4',
|
||||
'Standard_G5']
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
import azure as windows_azure
|
||||
|
||||
from azure import WindowsAzureError, WindowsAzureMissingResourceError
|
||||
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
|
||||
from azure import WindowsAzureError as AzureException
|
||||
from azure import WindowsAzureMissingResourceError as AzureMissingException
|
||||
else:
|
||||
from azure.common import AzureException as AzureException
|
||||
from azure.common import AzureMissingResourceHttpError as AzureMissingException
|
||||
|
||||
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
|
||||
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
|
||||
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
|
||||
|
||||
HAS_AZURE = True
|
||||
except ImportError:
|
||||
HAS_AZURE = False
|
||||
|
||||
from distutils.version import LooseVersion
|
||||
from types import MethodType
|
||||
import json
|
||||
|
||||
|
||||
def _wait_for_completion(azure, promise, wait_timeout, msg):
|
||||
if not promise: return
|
||||
wait_timeout = time.time() + wait_timeout
|
||||
|
@ -274,7 +281,7 @@ def _wait_for_completion(azure, promise, wait_timeout, msg):
|
|||
if operation_result.status == "Succeeded":
|
||||
return
|
||||
|
||||
raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
|
||||
raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
|
||||
|
||||
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
|
||||
def _handle_timeout(signum, frame):
|
||||
|
@ -289,7 +296,7 @@ def _delete_disks_when_detached(azure, wait_timeout, disk_names):
|
|||
if disk.attached_to is None:
|
||||
azure.delete_disk(disk.name, True)
|
||||
disk_names.remove(disk_name)
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
|
||||
finally:
|
||||
signal.alarm(0)
|
||||
|
@ -347,13 +354,13 @@ def create_virtual_machine(module, azure):
|
|||
result = azure.create_hosted_service(service_name=name, label=name, location=location)
|
||||
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
|
||||
changed = True
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
|
||||
|
||||
try:
|
||||
# check to see if a vm with this name exists; if so, do nothing
|
||||
azure.get_role(name, name, name)
|
||||
except WindowsAzureMissingResourceError:
|
||||
except AzureMissingException:
|
||||
# vm does not exist; create it
|
||||
|
||||
if os_type == 'linux':
|
||||
|
@ -419,13 +426,13 @@ def create_virtual_machine(module, azure):
|
|||
virtual_network_name=virtual_network_name)
|
||||
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
|
||||
changed = True
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
|
||||
|
||||
try:
|
||||
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
|
||||
return (changed, urlparse(deployment.url).hostname, deployment)
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
|
||||
|
||||
|
||||
|
@ -453,9 +460,9 @@ def terminate_virtual_machine(module, azure):
|
|||
disk_names = []
|
||||
try:
|
||||
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
|
||||
except WindowsAzureMissingResourceError, e:
|
||||
except AzureMissingException, e:
|
||||
pass # no such deployment or service
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
|
||||
|
||||
# Delete deployment
|
||||
|
@ -468,13 +475,13 @@ def terminate_virtual_machine(module, azure):
|
|||
role_props = azure.get_role(name, deployment.name, role.role_name)
|
||||
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
|
||||
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
|
||||
|
||||
try:
|
||||
result = azure.delete_deployment(name, deployment.name)
|
||||
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
|
||||
|
||||
# It's unclear when disks associated with terminated deployment get detatched.
|
||||
|
@ -482,14 +489,14 @@ def terminate_virtual_machine(module, azure):
|
|||
# become detatched by polling the list of remaining disks and examining the state.
|
||||
try:
|
||||
_delete_disks_when_detached(azure, wait_timeout, disk_names)
|
||||
except (WindowsAzureError, TimeoutError), e:
|
||||
except (AzureException, TimeoutError), e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
try:
|
||||
# Now that the vm is deleted, remove the cloud service
|
||||
result = azure.delete_hosted_service(service_name=name)
|
||||
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
|
||||
public_dns_name = urlparse(deployment.url).hostname
|
||||
|
||||
|
@ -545,7 +552,8 @@ def main():
|
|||
subscription_id, management_cert_path = get_azure_creds(module)
|
||||
|
||||
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
|
||||
if LooseVersion(windows_azure.__version__) <= "0.8.0":
|
||||
|
||||
if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
|
||||
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
|
||||
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
|
||||
else:
|
||||
|
@ -597,7 +605,7 @@ class Wrapper(object):
|
|||
while wait_timeout > time.time():
|
||||
try:
|
||||
return f()
|
||||
except WindowsAzureError, e:
|
||||
except AzureException, e:
|
||||
if not str(e).lower().find("temporary redirect") == -1:
|
||||
time.sleep(5)
|
||||
pass
|
||||
|
|
|
@ -97,9 +97,12 @@ options:
|
|||
- You can specify a different logging driver for the container than for the daemon.
|
||||
"json-file" Default logging driver for Docker. Writes JSON messages to file.
|
||||
docker logs command is available only for this logging driver.
|
||||
"none" disables any logging for the container. docker logs won't be available with this driver.
|
||||
"none" disables any logging for the container.
|
||||
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
|
||||
docker logs command is not available for this logging driver.
|
||||
"journald" Journald logging driver for Docker. Writes log messages to "journald".
|
||||
"gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash.
|
||||
"fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input).
|
||||
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
|
||||
Requires docker >= 1.6.0.
|
||||
required: false
|
||||
|
@ -108,11 +111,14 @@ options:
|
|||
- json-file
|
||||
- none
|
||||
- syslog
|
||||
- journald
|
||||
- gelf
|
||||
- fluentd
|
||||
version_added: "2.0"
|
||||
log_opt:
|
||||
description:
|
||||
- Additional options to pass to the logging driver selected above. See Docker log-driver
|
||||
documentation for more information (https://docs.docker.com/reference/logging/overview/).
|
||||
- Additional options to pass to the logging driver selected above. See Docker `log-driver
|
||||
<https://docs.docker.com/reference/logging/overview/>` documentation for more information.
|
||||
Requires docker >=1.7.0.
|
||||
required: false
|
||||
default: null
|
||||
|
@ -1056,11 +1062,11 @@ class DockerManager(object):
|
|||
continue
|
||||
|
||||
# EXPOSED PORTS
|
||||
expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys())
|
||||
expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys())
|
||||
for p in (self.exposed_ports or []):
|
||||
expected_exposed_ports.add("/".join(p))
|
||||
|
||||
actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys())
|
||||
actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys())
|
||||
|
||||
if actually_exposed_ports != expected_exposed_ports:
|
||||
self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
|
||||
|
@ -1386,6 +1392,11 @@ class DockerManager(object):
|
|||
changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
|
||||
try:
|
||||
last = changes[-1]
|
||||
# seems Docker 1.8 puts an empty dict at the end of the
|
||||
# stream; catch that and get the previous instead
|
||||
# https://github.com/ansible/ansible-modules-core/issues/2043
|
||||
if last.strip() == '{}':
|
||||
last = changes[-2]
|
||||
except IndexError:
|
||||
last = '{}'
|
||||
status = json.loads(last).get('status', '')
|
||||
|
@ -1662,7 +1673,7 @@ def main():
|
|||
net = dict(default=None),
|
||||
pid = dict(default=None),
|
||||
insecure_registry = dict(default=False, type='bool'),
|
||||
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
|
||||
log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']),
|
||||
log_opt = dict(default=None, type='dict'),
|
||||
cpu_set = dict(default=None),
|
||||
cap_add = dict(default=None, type='list'),
|
||||
|
|
|
@ -21,6 +21,7 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: keystone_user
|
||||
version_added: "1.2"
|
||||
deprecated: Deprecated in 2.0. Use os_user instead
|
||||
short_description: Manage OpenStack Identity (keystone) users, tenants and roles
|
||||
description:
|
||||
- Manage users,tenants, roles from OpenStack.
|
|
@ -75,12 +75,12 @@ options:
|
|||
required: false
|
||||
default: None
|
||||
ramdisk:
|
||||
descrption:
|
||||
description:
|
||||
- The name of an existing ramdisk image that will be associated with this image
|
||||
required: false
|
||||
default: None
|
||||
kernel:
|
||||
descrption:
|
||||
description:
|
||||
- The name of an existing kernel image that will be associated with this image
|
||||
required: false
|
||||
default: None
|
||||
|
@ -154,7 +154,8 @@ def main():
|
|||
disk_format=module.params['disk_format'],
|
||||
container_format=module.params['container_format'],
|
||||
wait=module.params['wait'],
|
||||
timeout=module.params['timeout']
|
||||
timeout=module.params['timeout'],
|
||||
is_public=module.params['is_public'],
|
||||
)
|
||||
changed = True
|
||||
if not module.params['wait']:
|
||||
|
|
|
@ -22,7 +22,6 @@ except ImportError:
|
|||
HAS_SHADE = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_image_facts
|
||||
short_description: Retrieve facts about an image within OpenStack.
|
||||
version_added: "2.0"
|
||||
|
@ -55,77 +54,79 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
RETURN = '''
|
||||
This module registers image details in facts named: openstack_image. When
|
||||
image is not found, openstack_image will be null.
|
||||
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the image.
|
||||
returned: success
|
||||
type: string
|
||||
status:
|
||||
description: Image status.
|
||||
returned: success
|
||||
type: string
|
||||
created_at:
|
||||
description: Image created at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
deleted:
|
||||
description: Image deleted flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
container_format:
|
||||
description: Container format of the image.
|
||||
returned: success
|
||||
type: string
|
||||
min_ram:
|
||||
description: Min amount of RAM required for this image.
|
||||
returned: success
|
||||
type: int
|
||||
disk_format:
|
||||
description: Disk format of the image.
|
||||
returned: success
|
||||
type: string
|
||||
updated_at:
|
||||
description: Image updated at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
properties:
|
||||
description: Additional properties associated with the image.
|
||||
returned: success
|
||||
type: dict
|
||||
min_disk:
|
||||
description: Min amount of disk space required for this image.
|
||||
returned: success
|
||||
type: int
|
||||
protected:
|
||||
description: Image protected flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
checksum:
|
||||
description: Checksum for the image.
|
||||
returned: success
|
||||
type: string
|
||||
owner:
|
||||
description: Owner for the image.
|
||||
returned: success
|
||||
type: string
|
||||
is_public:
|
||||
description: Is plubic flag of the image.
|
||||
returned: success
|
||||
type: boolean
|
||||
deleted_at:
|
||||
description: Image deleted at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
size:
|
||||
description: Size of the image.
|
||||
returned: success
|
||||
type: int
|
||||
openstack_image:
|
||||
description: has all the openstack facts about the image
|
||||
returned: always, but can be null
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the image.
|
||||
returned: success
|
||||
type: string
|
||||
status:
|
||||
description: Image status.
|
||||
returned: success
|
||||
type: string
|
||||
created_at:
|
||||
description: Image created at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
deleted:
|
||||
description: Image deleted flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
container_format:
|
||||
description: Container format of the image.
|
||||
returned: success
|
||||
type: string
|
||||
min_ram:
|
||||
description: Min amount of RAM required for this image.
|
||||
returned: success
|
||||
type: int
|
||||
disk_format:
|
||||
description: Disk format of the image.
|
||||
returned: success
|
||||
type: string
|
||||
updated_at:
|
||||
description: Image updated at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
properties:
|
||||
description: Additional properties associated with the image.
|
||||
returned: success
|
||||
type: dict
|
||||
min_disk:
|
||||
description: Min amount of disk space required for this image.
|
||||
returned: success
|
||||
type: int
|
||||
protected:
|
||||
description: Image protected flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
checksum:
|
||||
description: Checksum for the image.
|
||||
returned: success
|
||||
type: string
|
||||
owner:
|
||||
description: Owner for the image.
|
||||
returned: success
|
||||
type: string
|
||||
is_public:
|
||||
description: Is plubic flag of the image.
|
||||
returned: success
|
||||
type: boolean
|
||||
deleted_at:
|
||||
description: Image deleted at timestamp.
|
||||
returned: success
|
||||
type: string
|
||||
size:
|
||||
description: Size of the image.
|
||||
returned: success
|
||||
type: int
|
||||
'''
|
||||
|
||||
|
||||
|
|
|
@ -146,10 +146,14 @@ def main():
|
|||
" as offered. Delete key first." % name
|
||||
)
|
||||
else:
|
||||
module.exit_json(changed=False, key=keypair)
|
||||
changed = False
|
||||
else:
|
||||
keypair = cloud.create_keypair(name, public_key)
|
||||
changed = True
|
||||
|
||||
new_key = cloud.create_keypair(name, public_key)
|
||||
module.exit_json(changed=True, key=new_key)
|
||||
module.exit_json(changed=changed,
|
||||
key=keypair,
|
||||
id=keypair['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if keypair:
|
||||
|
|
|
@ -146,7 +146,10 @@ def main():
|
|||
if state == 'present':
|
||||
if not net:
|
||||
net = cloud.create_network(name, shared, admin_state_up, external)
|
||||
module.exit_json(changed=False, network=net, id=net['id'])
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, network=net, id=net['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not net:
|
||||
|
|
|
@ -82,33 +82,35 @@ EXAMPLES = '''
|
|||
'''
|
||||
|
||||
RETURN = '''
|
||||
This module registers network details in facts named: openstack_networks. If a
|
||||
network name/id and or filter does not result in a network found, an empty
|
||||
list is set in openstack_networks.
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the network.
|
||||
returned: success
|
||||
type: string
|
||||
status:
|
||||
description: Network status.
|
||||
returned: success
|
||||
type: string
|
||||
subnets:
|
||||
description: Subnet(s) included in this network.
|
||||
returned: success
|
||||
type: list of strings
|
||||
tenant_id:
|
||||
description: Tenant id associated with this network.
|
||||
returned: success
|
||||
type: string
|
||||
shared:
|
||||
description: Network shared flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
openstack_networks:
|
||||
description: has all the openstack facts about the networks
|
||||
returned: always, but can be null
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the network.
|
||||
returned: success
|
||||
type: string
|
||||
status:
|
||||
description: Network status.
|
||||
returned: success
|
||||
type: string
|
||||
subnets:
|
||||
description: Subnet(s) included in this network.
|
||||
returned: success
|
||||
type: list of strings
|
||||
tenant_id:
|
||||
description: Tenant id associated with this network.
|
||||
returned: success
|
||||
type: string
|
||||
shared:
|
||||
description: Network shared flag.
|
||||
returned: success
|
||||
type: boolean
|
||||
'''
|
||||
|
||||
def main():
|
||||
|
|
|
@ -217,8 +217,13 @@ def main():
|
|||
rxtx_factor=module.params['rxtx_factor'],
|
||||
is_public=module.params['is_public']
|
||||
)
|
||||
module.exit_json(changed=True, flavor=flavor)
|
||||
module.exit_json(changed=False, flavor=flavor)
|
||||
changed=True
|
||||
else:
|
||||
changed=False
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
flavor=flavor,
|
||||
id=flavor['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if flavor:
|
||||
|
|
392
cloud/openstack/os_port.py
Normal file
392
cloud/openstack/os_port.py
Normal file
|
@ -0,0 +1,392 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_port
|
||||
short_description: Add/Update/Delete ports from an OpenStack cloud.
|
||||
extends_documentation_fragment: openstack
|
||||
author: "Davide Agnello (@dagnello)"
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Add, Update or Remove ports from an OpenStack cloud. A state=present,
|
||||
will ensure the port is created or updated if required.
|
||||
options:
|
||||
network:
|
||||
description:
|
||||
- Network ID or name this port belongs to.
|
||||
required: true
|
||||
name:
|
||||
description:
|
||||
- Name that has to be given to the port.
|
||||
required: false
|
||||
default: None
|
||||
fixed_ips:
|
||||
description:
|
||||
- Desired IP and/or subnet for this port. Subnet is referenced by
|
||||
subnet_id and IP is referenced by ip_address.
|
||||
required: false
|
||||
default: None
|
||||
admin_state_up:
|
||||
description:
|
||||
- Sets admin state.
|
||||
required: false
|
||||
default: None
|
||||
mac_address:
|
||||
description:
|
||||
- MAC address of this port.
|
||||
required: false
|
||||
default: None
|
||||
security_groups:
|
||||
description:
|
||||
- Security group(s) ID(s) or name(s) associated with the port (comma
|
||||
separated string or YAML list)
|
||||
required: false
|
||||
default: None
|
||||
no_security_groups:
|
||||
description:
|
||||
- Do not associate a security group with this port.
|
||||
required: false
|
||||
default: False
|
||||
allowed_address_pairs:
|
||||
description:
|
||||
- "Allowed address pairs list. Allowed address pairs are supported with
|
||||
dictionary structure.
|
||||
e.g. allowed_address_pairs:
|
||||
- ip_address: 10.1.0.12
|
||||
mac_address: ab:cd:ef:12:34:56
|
||||
- ip_address: ..."
|
||||
required: false
|
||||
default: None
|
||||
extra_dhcp_opt:
|
||||
description:
|
||||
- "Extra dhcp options to be assigned to this port. Extra options are
|
||||
supported with dictionary structure.
|
||||
e.g. extra_dhcp_opt:
|
||||
- opt_name: opt name1
|
||||
opt_value: value1
|
||||
- opt_name: ..."
|
||||
required: false
|
||||
default: None
|
||||
device_owner:
|
||||
description:
|
||||
- The ID of the entity that uses this port.
|
||||
required: false
|
||||
default: None
|
||||
device_id:
|
||||
description:
|
||||
- Device ID of device using this port.
|
||||
required: false
|
||||
default: None
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a port
|
||||
- os_port:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: port1
|
||||
network: foo
|
||||
|
||||
# Create a port with a static IP
|
||||
- os_port:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: port1
|
||||
network: foo
|
||||
fixed_ips:
|
||||
- ip_address: 10.1.0.21
|
||||
|
||||
# Create a port with No security groups
|
||||
- os_port:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: port1
|
||||
network: foo
|
||||
no_security_groups: True
|
||||
|
||||
# Update the existing 'port1' port with multiple security groups (version 1)
|
||||
- os_port:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: port1
|
||||
security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
|
||||
|
||||
# Update the existing 'port1' port with multiple security groups (version 2)
|
||||
- os_port:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: port1
|
||||
security_groups:
|
||||
- 1496e8c7-4918-482a-9172-f4f00fc4a3a5
|
||||
- 057d4bdf-6d4d-472...
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the port.
|
||||
returned: success
|
||||
type: string
|
||||
network_id:
|
||||
description: Network ID this port belongs in.
|
||||
returned: success
|
||||
type: string
|
||||
security_groups:
|
||||
description: Security group(s) associated with this port.
|
||||
returned: success
|
||||
type: list of strings
|
||||
status:
|
||||
description: Port's status.
|
||||
returned: success
|
||||
type: string
|
||||
fixed_ips:
|
||||
description: Fixed ip(s) associated with this port.
|
||||
returned: success
|
||||
type: list of dicts
|
||||
tenant_id:
|
||||
description: Tenant id associated with this port.
|
||||
returned: success
|
||||
type: string
|
||||
allowed_address_pairs:
|
||||
description: Allowed address pairs with this port.
|
||||
returned: success
|
||||
type: list of dicts
|
||||
admin_state_up:
|
||||
description: Admin state up flag for this port.
|
||||
returned: success
|
||||
type: bool
|
||||
'''
|
||||
|
||||
|
||||
def _needs_update(module, port, cloud):
|
||||
"""Check for differences in the updatable values.
|
||||
|
||||
NOTE: We don't currently allow name updates.
|
||||
"""
|
||||
compare_simple = ['admin_state_up',
|
||||
'mac_address',
|
||||
'device_owner',
|
||||
'device_id']
|
||||
compare_dict = ['allowed_address_pairs',
|
||||
'extra_dhcp_opt']
|
||||
compare_list = ['security_groups']
|
||||
|
||||
for key in compare_simple:
|
||||
if module.params[key] is not None and module.params[key] != port[key]:
|
||||
return True
|
||||
for key in compare_dict:
|
||||
if module.params[key] is not None and cmp(module.params[key],
|
||||
port[key]) != 0:
|
||||
return True
|
||||
for key in compare_list:
|
||||
if module.params[key] is not None and (set(module.params[key]) !=
|
||||
set(port[key])):
|
||||
return True
|
||||
|
||||
# NOTE: if port was created or updated with 'no_security_groups=True',
|
||||
# subsequent updates without 'no_security_groups' flag or
|
||||
# 'no_security_groups=False' and no specified 'security_groups', will not
|
||||
# result in an update to the port where the default security group is
|
||||
# applied.
|
||||
if module.params['no_security_groups'] and port['security_groups'] != []:
|
||||
return True
|
||||
|
||||
if module.params['fixed_ips'] is not None:
|
||||
for item in module.params['fixed_ips']:
|
||||
if 'ip_address' in item:
|
||||
# if ip_address in request does not match any in existing port,
|
||||
# update is required.
|
||||
if not any(match['ip_address'] == item['ip_address']
|
||||
for match in port['fixed_ips']):
|
||||
return True
|
||||
if 'subnet_id' in item:
|
||||
return True
|
||||
for item in port['fixed_ips']:
|
||||
# if ip_address in existing port does not match any in request,
|
||||
# update is required.
|
||||
if not any(match.get('ip_address') == item['ip_address']
|
||||
for match in module.params['fixed_ips']):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _system_state_change(module, port, cloud):
|
||||
state = module.params['state']
|
||||
if state == 'present':
|
||||
if not port:
|
||||
return True
|
||||
return _needs_update(module, port, cloud)
|
||||
if state == 'absent' and port:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _compose_port_args(module, cloud):
|
||||
port_kwargs = {}
|
||||
optional_parameters = ['name',
|
||||
'fixed_ips',
|
||||
'admin_state_up',
|
||||
'mac_address',
|
||||
'security_groups',
|
||||
'allowed_address_pairs',
|
||||
'extra_dhcp_opt',
|
||||
'device_owner',
|
||||
'device_id']
|
||||
for optional_param in optional_parameters:
|
||||
if module.params[optional_param] is not None:
|
||||
port_kwargs[optional_param] = module.params[optional_param]
|
||||
|
||||
if module.params['no_security_groups']:
|
||||
port_kwargs['security_groups'] = []
|
||||
|
||||
return port_kwargs
|
||||
|
||||
|
||||
def get_security_group_id(module, cloud, security_group_name_or_id):
|
||||
security_group = cloud.get_security_group(security_group_name_or_id)
|
||||
if not security_group:
|
||||
module.fail_json(msg="Security group: %s, was not found"
|
||||
% security_group_name_or_id)
|
||||
return security_group['id']
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
network=dict(required=False),
|
||||
name=dict(required=False),
|
||||
fixed_ips=dict(default=None),
|
||||
admin_state_up=dict(default=None),
|
||||
mac_address=dict(default=None),
|
||||
security_groups=dict(default=None, type='list'),
|
||||
no_security_groups=dict(default=False, type='bool'),
|
||||
allowed_address_pairs=dict(default=None),
|
||||
extra_dhcp_opt=dict(default=None),
|
||||
device_owner=dict(default=None),
|
||||
device_id=dict(default=None),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs(
|
||||
mutually_exclusive=[
|
||||
['no_security_groups', 'security_groups'],
|
||||
]
|
||||
)
|
||||
|
||||
module = AnsibleModule(argument_spec,
|
||||
supports_check_mode=True,
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
name = module.params['name']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
if module.params['security_groups']:
|
||||
# translate security_groups to UUID's if names where provided
|
||||
module.params['security_groups'] = [
|
||||
get_security_group_id(module, cloud, v)
|
||||
for v in module.params['security_groups']
|
||||
]
|
||||
|
||||
port = None
|
||||
network_id = None
|
||||
if name:
|
||||
port = cloud.get_port(name)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=_system_state_change(module, port, cloud))
|
||||
|
||||
changed = False
|
||||
if state == 'present':
|
||||
if not port:
|
||||
network = module.params['network']
|
||||
if not network:
|
||||
module.fail_json(
|
||||
msg="Parameter 'network' is required in Port Create"
|
||||
)
|
||||
port_kwargs = _compose_port_args(module, cloud)
|
||||
network_object = cloud.get_network(network)
|
||||
|
||||
if network_object:
|
||||
network_id = network_object['id']
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="Specified network was not found."
|
||||
)
|
||||
|
||||
port = cloud.create_port(network_id, **port_kwargs)
|
||||
changed = True
|
||||
else:
|
||||
if _needs_update(module, port, cloud):
|
||||
port_kwargs = _compose_port_args(module, cloud)
|
||||
port = cloud.update_port(port['id'], **port_kwargs)
|
||||
changed = True
|
||||
module.exit_json(changed=changed, id=port['id'], port=port)
|
||||
|
||||
if state == 'absent':
|
||||
if port:
|
||||
cloud.delete_port(port['id'])
|
||||
changed = True
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -58,12 +58,17 @@ options:
|
|||
required: true when I(interfaces) or I(enable_snat) are provided,
|
||||
false otherwise.
|
||||
default: None
|
||||
external_fixed_ips:
|
||||
description:
|
||||
- The IP address parameters for the external gateway network. Each
|
||||
is a dictionary with the subnet name or ID (subnet) and the IP
|
||||
address to assign on the subnet (ip). If no IP is specified,
|
||||
one is automatically assigned from that subnet.
|
||||
required: false
|
||||
default: None
|
||||
interfaces:
|
||||
description:
|
||||
- List of subnets to attach to the router. Each is a dictionary with
|
||||
the subnet name or ID (subnet) and the IP address to assign on that
|
||||
subnet (ip). If no IP is specified, one is automatically assigned from
|
||||
that subnet.
|
||||
- List of subnets to attach to the router internal interface.
|
||||
required: false
|
||||
default: None
|
||||
requirements: ["shade"]
|
||||
|
@ -76,28 +81,32 @@ EXAMPLES = '''
|
|||
state: present
|
||||
name: simple_router
|
||||
|
||||
# Creates a router attached to ext_network1 and one subnet interface.
|
||||
# An IP address from subnet1's IP range will automatically be assigned
|
||||
# to that interface.
|
||||
# Creates a router attached to ext_network1 on an IPv4 subnet and one
|
||||
# internal subnet interface.
|
||||
- os_router:
|
||||
cloud: mycloud
|
||||
state: present
|
||||
name: router1
|
||||
network: ext_network1
|
||||
external_fixed_ips:
|
||||
- subnet: public-subnet
|
||||
ip: 172.24.4.2
|
||||
interfaces:
|
||||
- subnet: subnet1
|
||||
- private-subnet
|
||||
|
||||
# Update existing router1 to include subnet2 (10.5.5.0/24), specifying
|
||||
# the IP address within subnet2's IP range we'd like for that interface.
|
||||
# Update existing router1 external gateway to include the IPv6 subnet.
|
||||
# Note that since 'interfaces' is not provided, any existing internal
|
||||
# interfaces on an existing router will be left intact.
|
||||
- os_router:
|
||||
cloud: mycloud
|
||||
state: present
|
||||
name: router1
|
||||
network: ext_network1
|
||||
interfaces:
|
||||
- subnet: subnet1
|
||||
- subnet: subnet2
|
||||
ip: 10.5.5.1
|
||||
external_fixed_ips:
|
||||
- subnet: public-subnet
|
||||
ip: 172.24.4.2
|
||||
- subnet: ipv6-public-subnet
|
||||
ip: 2001:db8::3
|
||||
|
||||
# Delete router1
|
||||
- os_router:
|
||||
|
@ -150,44 +159,57 @@ router:
|
|||
'''
|
||||
|
||||
|
||||
def _needs_update(cloud, module, router, network):
|
||||
def _needs_update(cloud, module, router, network, internal_subnet_ids):
|
||||
"""Decide if the given router needs an update.
|
||||
"""
|
||||
if router['admin_state_up'] != module.params['admin_state_up']:
|
||||
return True
|
||||
if router['external_gateway_info']['enable_snat'] != module.params['enable_snat']:
|
||||
return True
|
||||
if router['external_gateway_info']:
|
||||
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
|
||||
return True
|
||||
if network:
|
||||
if router['external_gateway_info']['network_id'] != network['id']:
|
||||
if not router['external_gateway_info']:
|
||||
return True
|
||||
elif router['external_gateway_info']['network_id'] != network['id']:
|
||||
return True
|
||||
|
||||
# check subnet interfaces
|
||||
for new_iface in module.params['interfaces']:
|
||||
subnet = cloud.get_subnet(new_iface['subnet'])
|
||||
if not subnet:
|
||||
module.fail_json(msg='subnet %s not found' % new_iface['subnet'])
|
||||
exists = False
|
||||
# check external interfaces
|
||||
if module.params['external_fixed_ips']:
|
||||
for new_iface in module.params['external_fixed_ips']:
|
||||
subnet = cloud.get_subnet(new_iface['subnet'])
|
||||
exists = False
|
||||
|
||||
# compare the requested interface with existing, looking for an existing match
|
||||
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
|
||||
if existing_iface['subnet_id'] == subnet['id']:
|
||||
if 'ip' in new_iface:
|
||||
if existing_iface['ip_address'] == new_iface['ip']:
|
||||
# both subnet id and ip address match
|
||||
# compare the requested interface with existing, looking for an existing match
|
||||
for existing_iface in router['external_gateway_info']['external_fixed_ips']:
|
||||
if existing_iface['subnet_id'] == subnet['id']:
|
||||
if 'ip' in new_iface:
|
||||
if existing_iface['ip_address'] == new_iface['ip']:
|
||||
# both subnet id and ip address match
|
||||
exists = True
|
||||
break
|
||||
else:
|
||||
# only the subnet was given, so ip doesn't matter
|
||||
exists = True
|
||||
break
|
||||
else:
|
||||
# only the subnet was given, so ip doesn't matter
|
||||
exists = True
|
||||
break
|
||||
|
||||
# this interface isn't present on the existing router
|
||||
if not exists:
|
||||
# this interface isn't present on the existing router
|
||||
if not exists:
|
||||
return True
|
||||
|
||||
# check internal interfaces
|
||||
if module.params['interfaces']:
|
||||
existing_subnet_ids = []
|
||||
for port in cloud.list_router_interfaces(router, 'internal'):
|
||||
if 'fixed_ips' in port:
|
||||
for fixed_ip in port['fixed_ips']:
|
||||
existing_subnet_ids.append(fixed_ip['subnet_id'])
|
||||
|
||||
if set(internal_subnet_ids) != set(existing_subnet_ids):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def _system_state_change(cloud, module, router, network):
|
||||
def _system_state_change(cloud, module, router, network, internal_ids):
|
||||
"""Check if the system state would be changed."""
|
||||
state = module.params['state']
|
||||
if state == 'absent' and router:
|
||||
|
@ -195,7 +217,7 @@ def _system_state_change(cloud, module, router, network):
|
|||
if state == 'present':
|
||||
if not router:
|
||||
return True
|
||||
return _needs_update(cloud, module, router, network)
|
||||
return _needs_update(cloud, module, router, network, internal_ids)
|
||||
return False
|
||||
|
||||
def _build_kwargs(cloud, module, router, network):
|
||||
|
@ -213,12 +235,10 @@ def _build_kwargs(cloud, module, router, network):
|
|||
# can't send enable_snat unless we have a network
|
||||
kwargs['enable_snat'] = module.params['enable_snat']
|
||||
|
||||
if module.params['interfaces']:
|
||||
if module.params['external_fixed_ips']:
|
||||
kwargs['ext_fixed_ips'] = []
|
||||
for iface in module.params['interfaces']:
|
||||
for iface in module.params['external_fixed_ips']:
|
||||
subnet = cloud.get_subnet(iface['subnet'])
|
||||
if not subnet:
|
||||
module.fail_json(msg='subnet %s not found' % iface['subnet'])
|
||||
d = {'subnet_id': subnet['id']}
|
||||
if 'ip' in iface:
|
||||
d['ip_address'] = iface['ip']
|
||||
|
@ -226,6 +246,25 @@ def _build_kwargs(cloud, module, router, network):
|
|||
|
||||
return kwargs
|
||||
|
||||
def _validate_subnets(module, cloud):
|
||||
external_subnet_ids = []
|
||||
internal_subnet_ids = []
|
||||
if module.params['external_fixed_ips']:
|
||||
for iface in module.params['external_fixed_ips']:
|
||||
subnet = cloud.get_subnet(iface['subnet'])
|
||||
if not subnet:
|
||||
module.fail_json(msg='subnet %s not found' % iface['subnet'])
|
||||
external_subnet_ids.append(subnet['id'])
|
||||
|
||||
if module.params['interfaces']:
|
||||
for iface in module.params['interfaces']:
|
||||
subnet = cloud.get_subnet(iface)
|
||||
if not subnet:
|
||||
module.fail_json(msg='subnet %s not found' % iface)
|
||||
internal_subnet_ids.append(subnet['id'])
|
||||
|
||||
return (external_subnet_ids, internal_subnet_ids)
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
|
@ -233,7 +272,8 @@ def main():
|
|||
admin_state_up=dict(type='bool', default=True),
|
||||
enable_snat=dict(type='bool', default=True),
|
||||
network=dict(default=None),
|
||||
interfaces=dict(type='list', default=None)
|
||||
interfaces=dict(type='list', default=None),
|
||||
external_fixed_ips=dict(type='list', default=None),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
|
@ -248,8 +288,8 @@ def main():
|
|||
name = module.params['name']
|
||||
network = module.params['network']
|
||||
|
||||
if module.params['interfaces'] and not network:
|
||||
module.fail_json(msg='network is required when supplying interfaces')
|
||||
if module.params['external_fixed_ips'] and not network:
|
||||
module.fail_json(msg='network is required when supplying external_fixed_ips')
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
|
@ -261,9 +301,13 @@ def main():
|
|||
if not net:
|
||||
module.fail_json(msg='network %s not found' % network)
|
||||
|
||||
# Validate and cache the subnet IDs so we can avoid duplicate checks
|
||||
# and expensive API calls.
|
||||
external_ids, internal_ids = _validate_subnets(module, cloud)
|
||||
|
||||
if module.check_mode:
|
||||
module.exit_json(
|
||||
changed=_system_state_change(cloud, module, router, net)
|
||||
changed=_system_state_change(cloud, module, router, net, internal_ids)
|
||||
)
|
||||
|
||||
if state == 'present':
|
||||
|
@ -272,19 +316,38 @@ def main():
|
|||
if not router:
|
||||
kwargs = _build_kwargs(cloud, module, router, net)
|
||||
router = cloud.create_router(**kwargs)
|
||||
for internal_subnet_id in internal_ids:
|
||||
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
|
||||
changed = True
|
||||
else:
|
||||
if _needs_update(cloud, module, router, net):
|
||||
if _needs_update(cloud, module, router, net, internal_ids):
|
||||
kwargs = _build_kwargs(cloud, module, router, net)
|
||||
router = cloud.update_router(**kwargs)
|
||||
|
||||
# On a router update, if any internal interfaces were supplied,
|
||||
# just detach all existing internal interfaces and attach the new.
|
||||
if internal_ids:
|
||||
ports = cloud.list_router_interfaces(router, 'internal')
|
||||
for port in ports:
|
||||
cloud.remove_router_interface(router, port_id=port['id'])
|
||||
for internal_subnet_id in internal_ids:
|
||||
cloud.add_router_interface(router, subnet_id=internal_subnet_id)
|
||||
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, router=router)
|
||||
module.exit_json(changed=changed,
|
||||
router=router,
|
||||
id=router['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not router:
|
||||
module.exit_json(changed=False)
|
||||
else:
|
||||
# We need to detach all internal interfaces on a router before
|
||||
# we will be allowed to delete it.
|
||||
ports = cloud.list_router_interfaces(router, 'internal')
|
||||
for port in ports:
|
||||
cloud.remove_router_interface(router, port_id=port['id'])
|
||||
cloud.delete_router(name)
|
||||
module.exit_json(changed=True)
|
||||
|
||||
|
|
|
@ -91,7 +91,7 @@ def _system_state_change(module, secgroup):
|
|||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name=dict(required=True),
|
||||
description=dict(default=None),
|
||||
description=dict(default=''),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
|
|
|
@ -76,7 +76,8 @@ options:
|
|||
default: None
|
||||
security_groups:
|
||||
description:
|
||||
- The name of the security group to which the instance should be added
|
||||
- Names of the security groups to which the instance should be
|
||||
added. This may be a YAML list or a common separated string.
|
||||
required: false
|
||||
default: None
|
||||
nics:
|
||||
|
@ -84,20 +85,16 @@ options:
|
|||
- A list of networks to which the instance's interface should
|
||||
be attached. Networks may be referenced by net-id/net-name/port-id
|
||||
or port-name.
|
||||
Also this accepts a string containing a list of net-id/port-id.
|
||||
Eg: nics: "net-id=uuid-1,net-id=uuid-2"
|
||||
- 'Also this accepts a string containing a list of (net/port)-(id/name)
|
||||
Eg: nics: "net-id=uuid-1,port-name=myport"'
|
||||
required: false
|
||||
default: None
|
||||
public_ip:
|
||||
auto_ip:
|
||||
description:
|
||||
- Ensure instance has public ip however the cloud wants to do that
|
||||
required: false
|
||||
default: 'yes'
|
||||
auto_floating_ip:
|
||||
description:
|
||||
- If the module should automatically assign a floating IP
|
||||
required: false
|
||||
default: 'yes'
|
||||
aliases: ['auto_floating_ip', 'public_ip']
|
||||
floating_ips:
|
||||
description:
|
||||
- list of valid floating IPs that pre-exist to assign to this node
|
||||
|
@ -110,8 +107,9 @@ options:
|
|||
default: None
|
||||
meta:
|
||||
description:
|
||||
- A list of key value pairs that should be provided as a metadata to
|
||||
the new instance.
|
||||
- 'A list of key value pairs that should be provided as a metadata to
|
||||
the new instance or a string containing a list of key-value pairs.
|
||||
Eg: meta: "key1=value1,key2=value2"'
|
||||
required: false
|
||||
default: None
|
||||
wait:
|
||||
|
@ -197,7 +195,7 @@ EXAMPLES = '''
|
|||
timeout: 200
|
||||
flavor: 101
|
||||
security_groups: default
|
||||
auto_floating_ip: yes
|
||||
auto_ip: yes
|
||||
|
||||
# Creates a new instance in named cloud mordred availability zone az2
|
||||
# and assigns a pre-known floating IP
|
||||
|
@ -263,6 +261,25 @@ EXAMPLES = '''
|
|||
timeout: 200
|
||||
flavor: 4
|
||||
nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
|
||||
|
||||
# Creates a new instance and attaches to a network and passes metadata to
|
||||
# the instance
|
||||
- os_server:
|
||||
state: present
|
||||
auth:
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
username: admin
|
||||
password: admin
|
||||
project_name: admin
|
||||
name: vm1
|
||||
image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
|
||||
key_name: ansible_key
|
||||
timeout: 200
|
||||
flavor: 4
|
||||
nics:
|
||||
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
|
||||
- net-name: another_network
|
||||
meta: "hostname=test1,group=uge_master"
|
||||
'''
|
||||
|
||||
|
||||
|
@ -272,35 +289,37 @@ def _exit_hostvars(module, cloud, server, changed=True):
|
|||
changed=changed, server=server, id=server.id, openstack=hostvars)
|
||||
|
||||
|
||||
def _parse_nics(nics):
|
||||
for net in nics:
|
||||
if type(net) == str:
|
||||
for nic in net.split(','):
|
||||
yield dict((nic.split('='),))
|
||||
else:
|
||||
yield net
|
||||
|
||||
def _network_args(module, cloud):
|
||||
args = []
|
||||
nics = module.params['nics']
|
||||
if type(nics) == str :
|
||||
for kv_str in nics.split(","):
|
||||
nic = {}
|
||||
k, v = kv_str.split("=")
|
||||
nic[k] = v
|
||||
args.append(nic)
|
||||
else:
|
||||
for net in module.params['nics']:
|
||||
if net.get('net-id'):
|
||||
args.append(net)
|
||||
elif net.get('net-name'):
|
||||
by_name = cloud.get_network(net['net-name'])
|
||||
if not by_name:
|
||||
module.fail_json(
|
||||
msg='Could not find network by net-name: %s' %
|
||||
net['net-name'])
|
||||
args.append({'net-id': by_name['id']})
|
||||
elif net.get('port-id'):
|
||||
args.append(net)
|
||||
elif net.get('port-name'):
|
||||
by_name = cloud.get_port(net['port-name'])
|
||||
if not by_name:
|
||||
module.fail_json(
|
||||
msg='Could not find port by port-name: %s' %
|
||||
net['port-name'])
|
||||
args.append({'port-id': by_name['id']})
|
||||
|
||||
for net in _parse_nics(nics):
|
||||
if net.get('net-id'):
|
||||
args.append(net)
|
||||
elif net.get('net-name'):
|
||||
by_name = cloud.get_network(net['net-name'])
|
||||
if not by_name:
|
||||
module.fail_json(
|
||||
msg='Could not find network by net-name: %s' %
|
||||
net['net-name'])
|
||||
args.append({'net-id': by_name['id']})
|
||||
elif net.get('port-id'):
|
||||
args.append(net)
|
||||
elif net.get('port-name'):
|
||||
by_name = cloud.get_port(net['port-name'])
|
||||
if not by_name:
|
||||
module.fail_json(
|
||||
msg='Could not find port by port-name: %s' %
|
||||
net['port-name'])
|
||||
args.append({'port-id': by_name['id']})
|
||||
return args
|
||||
|
||||
|
||||
|
@ -335,13 +354,20 @@ def _create_server(module, cloud):
|
|||
|
||||
nics = _network_args(module, cloud)
|
||||
|
||||
if type(module.params['meta']) is str:
|
||||
metas = {}
|
||||
for kv_str in module.params['meta'].split(","):
|
||||
k, v = kv_str.split("=")
|
||||
metas[k] = v
|
||||
module.params['meta'] = metas
|
||||
|
||||
bootkwargs = dict(
|
||||
name=module.params['name'],
|
||||
image=image_id,
|
||||
flavor=flavor_dict['id'],
|
||||
nics=nics,
|
||||
meta=module.params['meta'],
|
||||
security_groups=module.params['security_groups'].split(','),
|
||||
security_groups=module.params['security_groups'],
|
||||
userdata=module.params['userdata'],
|
||||
config_drive=module.params['config_drive'],
|
||||
)
|
||||
|
@ -352,7 +378,7 @@ def _create_server(module, cloud):
|
|||
server = cloud.create_server(
|
||||
ip_pool=module.params['floating_ip_pools'],
|
||||
ips=module.params['floating_ips'],
|
||||
auto_ip=module.params['auto_floating_ip'],
|
||||
auto_ip=module.params['auto_ip'],
|
||||
root_volume=module.params['root_volume'],
|
||||
terminate_volume=module.params['terminate_volume'],
|
||||
wait=module.params['wait'], timeout=module.params['timeout'],
|
||||
|
@ -371,18 +397,18 @@ def _delete_floating_ip_list(cloud, server, extra_ips):
|
|||
def _check_floating_ips(module, cloud, server):
|
||||
changed = False
|
||||
|
||||
auto_floating_ip = module.params['auto_floating_ip']
|
||||
auto_ip = module.params['auto_ip']
|
||||
floating_ips = module.params['floating_ips']
|
||||
floating_ip_pools = module.params['floating_ip_pools']
|
||||
|
||||
if floating_ip_pools or floating_ips or auto_floating_ip:
|
||||
if floating_ip_pools or floating_ips or auto_ip:
|
||||
ips = openstack_find_nova_addresses(server.addresses, 'floating')
|
||||
if not ips:
|
||||
# If we're configured to have a floating but we don't have one,
|
||||
# let's add one
|
||||
server = cloud.add_ips_to_server(
|
||||
server,
|
||||
auto_ip=auto_floating_ip,
|
||||
auto_ip=auto_ip,
|
||||
ips=floating_ips,
|
||||
ip_pool=floating_ip_pools,
|
||||
)
|
||||
|
@ -434,12 +460,12 @@ def main():
|
|||
flavor_ram = dict(default=None, type='int'),
|
||||
flavor_include = dict(default=None),
|
||||
key_name = dict(default=None),
|
||||
security_groups = dict(default='default'),
|
||||
security_groups = dict(default=['default'], type='list'),
|
||||
nics = dict(default=[], type='list'),
|
||||
meta = dict(default=None),
|
||||
userdata = dict(default=None),
|
||||
config_drive = dict(default=False, type='bool'),
|
||||
auto_floating_ip = dict(default=True, type='bool'),
|
||||
auto_ip = dict(default=True, type='bool', aliases=['auto_floating_ip', 'public_ip']),
|
||||
floating_ips = dict(default=None),
|
||||
floating_ip_pools = dict(default=None),
|
||||
root_volume = dict(default=None),
|
||||
|
@ -448,8 +474,8 @@ def main():
|
|||
)
|
||||
module_kwargs = openstack_module_kwargs(
|
||||
mutually_exclusive=[
|
||||
['auto_floating_ip', 'floating_ips'],
|
||||
['auto_floating_ip', 'floating_ip_pools'],
|
||||
['auto_ip', 'floating_ips'],
|
||||
['auto_ip', 'floating_ip_pools'],
|
||||
['floating_ips', 'floating_ip_pools'],
|
||||
['flavor', 'flavor_ram'],
|
||||
['image', 'root_volume'],
|
||||
|
|
|
@ -302,7 +302,9 @@ def main():
|
|||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed)
|
||||
module.exit_json(changed=changed,
|
||||
subnet=subnet,
|
||||
id=subnet['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not subnet:
|
||||
|
|
154
cloud/openstack/os_subnets_facts.py
Normal file
154
cloud/openstack/os_subnets_facts.py
Normal file
|
@ -0,0 +1,154 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_subnets_facts
|
||||
short_description: Retrieve facts about one or more OpenStack subnets.
|
||||
version_added: "2.0"
|
||||
author: "Davide Agnello (@dagnello)"
|
||||
description:
|
||||
- Retrieve facts about one or more subnets from OpenStack.
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "shade"
|
||||
options:
|
||||
subnet:
|
||||
description:
|
||||
- Name or ID of the subnet
|
||||
required: false
|
||||
filters:
|
||||
description:
|
||||
- A dictionary of meta data to use for further filtering. Elements of
|
||||
this dictionary may be additional dictionaries.
|
||||
required: false
|
||||
extends_documentation_fragment: openstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Gather facts about previously created subnets
|
||||
- os_subnets_facts:
|
||||
auth:
|
||||
auth_url: https://your_api_url.com:9000/v2.0
|
||||
username: user
|
||||
password: password
|
||||
project_name: someproject
|
||||
- debug: var=openstack_subnets
|
||||
|
||||
# Gather facts about a previously created subnet by name
|
||||
- os_subnets_facts:
|
||||
auth:
|
||||
auth_url: https://your_api_url.com:9000/v2.0
|
||||
username: user
|
||||
password: password
|
||||
project_name: someproject
|
||||
name: subnet1
|
||||
- debug: var=openstack_subnets
|
||||
|
||||
# Gather facts about a previously created subnet with filter (note: name and
|
||||
filters parameters are Not mutually exclusive)
|
||||
- os_subnets_facts:
|
||||
auth:
|
||||
auth_url: https://your_api_url.com:9000/v2.0
|
||||
username: user
|
||||
password: password
|
||||
project_name: someproject
|
||||
filters:
|
||||
tenant_id: 55e2ce24b2a245b09f181bf025724cbe
|
||||
- debug: var=openstack_subnets
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
openstack_subnets:
|
||||
description: has all the openstack facts about the subnets
|
||||
returned: always, but can be null
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Unique UUID.
|
||||
returned: success
|
||||
type: string
|
||||
name:
|
||||
description: Name given to the subnet.
|
||||
returned: success
|
||||
type: string
|
||||
network_id:
|
||||
description: Network ID this subnet belongs in.
|
||||
returned: success
|
||||
type: string
|
||||
cidr:
|
||||
description: Subnet's CIDR.
|
||||
returned: success
|
||||
type: string
|
||||
gateway_ip:
|
||||
description: Subnet's gateway ip.
|
||||
returned: success
|
||||
type: string
|
||||
enable_dhcp:
|
||||
description: DHCP enable flag for this subnet.
|
||||
returned: success
|
||||
type: bool
|
||||
ip_version:
|
||||
description: IP version for this subnet.
|
||||
returned: success
|
||||
type: int
|
||||
tenant_id:
|
||||
description: Tenant id associated with this subnet.
|
||||
returned: success
|
||||
type: string
|
||||
dns_nameservers:
|
||||
description: DNS name servers for this subnet.
|
||||
returned: success
|
||||
type: list of strings
|
||||
allocation_pools:
|
||||
description: Allocation pools associated with this subnet.
|
||||
returned: success
|
||||
type: list of dicts
|
||||
'''
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name=dict(required=False, default=None),
|
||||
filters=dict(required=False, default=None)
|
||||
)
|
||||
module = AnsibleModule(argument_spec)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
subnets = cloud.search_subnets(module.params['name'],
|
||||
module.params['filters'])
|
||||
module.exit_json(changed=False, ansible_facts=dict(
|
||||
openstack_subnets=subnets))
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
212
cloud/openstack/os_user.py
Normal file
212
cloud/openstack/os_user.py
Normal file
|
@ -0,0 +1,212 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
try:
|
||||
import shade
|
||||
HAS_SHADE = True
|
||||
except ImportError:
|
||||
HAS_SHADE = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: os_user
|
||||
short_description: Manage OpenStack Identity Users
|
||||
extends_documentation_fragment: openstack
|
||||
version_added: "2.0"
|
||||
description:
|
||||
- Manage OpenStack Identity users. Users can be created,
|
||||
updated or deleted using this module. A user will be updated
|
||||
if I(name) matches an existing user and I(state) is present.
|
||||
The value for I(name) cannot be updated without deleting and
|
||||
re-creating the user.
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Username for the user
|
||||
required: true
|
||||
password:
|
||||
description:
|
||||
- Password for the user
|
||||
required: true when I(state) is present
|
||||
default: None
|
||||
email:
|
||||
description:
|
||||
- Email address for the user
|
||||
required: false
|
||||
default: None
|
||||
default_project:
|
||||
description:
|
||||
- Project name or ID that the user should be associated with by default
|
||||
required: false
|
||||
default: None
|
||||
domain:
|
||||
description:
|
||||
- Domain to create the user in if the cloud supports domains
|
||||
required: false
|
||||
default: None
|
||||
enabled:
|
||||
description:
|
||||
- Is the user enabled
|
||||
required: false
|
||||
default: True
|
||||
state:
|
||||
description:
|
||||
- Should the resource be present or absent.
|
||||
choices: [present, absent]
|
||||
default: present
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "shade"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a user
|
||||
- os_user:
|
||||
cloud: mycloud
|
||||
state: present
|
||||
name: demouser
|
||||
password: secret
|
||||
email: demo@example.com
|
||||
domain: default
|
||||
default_project: demo
|
||||
|
||||
# Delete a user
|
||||
- os_user:
|
||||
cloud: mycloud
|
||||
state: absent
|
||||
name: demouser
|
||||
'''
|
||||
|
||||
|
||||
RETURN = '''
|
||||
user:
|
||||
description: Dictionary describing the user.
|
||||
returned: On success when I(state) is 'present'
|
||||
type: dictionary
|
||||
contains:
|
||||
default_project_id:
|
||||
description: User default project ID. Only present with Keystone >= v3.
|
||||
type: string
|
||||
sample: "4427115787be45f08f0ec22a03bfc735"
|
||||
domain_id:
|
||||
description: User domain ID. Only present with Keystone >= v3.
|
||||
type: string
|
||||
sample: "default"
|
||||
email:
|
||||
description: User email address
|
||||
type: string
|
||||
sample: "demo@example.com"
|
||||
id:
|
||||
description: User ID
|
||||
type: string
|
||||
sample: "f59382db809c43139982ca4189404650"
|
||||
name:
|
||||
description: User name
|
||||
type: string
|
||||
sample: "demouser"
|
||||
'''
|
||||
|
||||
def _needs_update(module, user):
|
||||
keys = ('email', 'default_project', 'domain', 'enabled')
|
||||
for key in keys:
|
||||
if module.params[key] is not None and module.params[key] != user.get(key):
|
||||
return True
|
||||
|
||||
# We don't get password back in the user object, so assume any supplied
|
||||
# password is a change.
|
||||
if module.params['password'] is not None:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def main():
|
||||
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name=dict(required=True),
|
||||
password=dict(required=False, default=None),
|
||||
email=dict(required=False, default=None),
|
||||
default_project=dict(required=False, default=None),
|
||||
domain=dict(required=False, default=None),
|
||||
enabled=dict(default=True, type='bool'),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
module_kwargs = openstack_module_kwargs()
|
||||
module = AnsibleModule(
|
||||
argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['password'])
|
||||
],
|
||||
**module_kwargs)
|
||||
|
||||
if not HAS_SHADE:
|
||||
module.fail_json(msg='shade is required for this module')
|
||||
|
||||
name = module.params['name']
|
||||
password = module.params['password']
|
||||
email = module.params['email']
|
||||
default_project = module.params['default_project']
|
||||
domain = module.params['domain']
|
||||
enabled = module.params['enabled']
|
||||
state = module.params['state']
|
||||
|
||||
try:
|
||||
cloud = shade.openstack_cloud(**module.params)
|
||||
user = cloud.get_user(name)
|
||||
|
||||
project_id = None
|
||||
if default_project:
|
||||
project = cloud.get_project(default_project)
|
||||
if not project:
|
||||
module.fail_json(msg='Default project %s is not valid' % default_project)
|
||||
project_id = project['id']
|
||||
|
||||
if state == 'present':
|
||||
if user is None:
|
||||
user = cloud.create_user(
|
||||
name=name, password=password, email=email,
|
||||
default_project=default_project, domain_id=domain,
|
||||
enabled=enabled)
|
||||
changed = True
|
||||
else:
|
||||
if _needs_update(module, user):
|
||||
user = cloud.update_user(
|
||||
user['id'], password=password, email=email,
|
||||
default_project=project_id, domain_id=domain,
|
||||
enabled=enabled)
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, user=user)
|
||||
|
||||
elif state == 'absent':
|
||||
if user is None:
|
||||
changed=False
|
||||
else:
|
||||
cloud.delete_user(user['id'])
|
||||
changed=True
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
except shade.OpenStackCloudException as e:
|
||||
module.fail_json(msg=e.message, extra_data=e.extra_data)
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -32,7 +32,7 @@ options:
|
|||
required: true
|
||||
password:
|
||||
description:
|
||||
- set the user's password
|
||||
- set the user's password. (Required when adding a user)
|
||||
required: false
|
||||
default: null
|
||||
host:
|
||||
|
|
|
@ -244,7 +244,8 @@ def main():
|
|||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
|
||||
cursor = db_connection.cursor()
|
||||
except Exception, e:
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
|
||||
errno, errstr = e.args
|
||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (ERROR: %s %s)" % (errno, errstr))
|
||||
mysqlvar_val = getvariable(cursor, mysqlvar)
|
||||
if mysqlvar_val is None:
|
||||
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)
|
||||
|
|
|
@ -77,6 +77,13 @@ options:
|
|||
already existed.
|
||||
required: false
|
||||
version_added: "1.5"
|
||||
remote_src:
|
||||
description:
|
||||
- If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is False.
|
||||
choices: [ "True", "False" ]
|
||||
required: false
|
||||
default: "False"
|
||||
version_added: "2.0"
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
- validate
|
||||
|
|
|
@ -25,8 +25,6 @@ import stat
|
|||
import fnmatch
|
||||
import time
|
||||
import re
|
||||
import shutil
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
@ -50,17 +48,18 @@ options:
|
|||
required: false
|
||||
default: '*'
|
||||
description:
|
||||
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
|
||||
those whose basenames match at least one of the patterns specified. Multiple patterns can be
|
||||
specified using a list.
|
||||
- One or more (shell or regex) patterns, which type is controled by C(use_regex) option.
|
||||
- The patterns restrict the list of files to be returned to those whose basenames match at
|
||||
least one of the patterns specified. Multiple patterns can be specified using a list.
|
||||
aliases: ['pattern']
|
||||
contains:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- One or more re patterns which should be matched against the file content
|
||||
- One or more re patterns which should be matched against the file content
|
||||
paths:
|
||||
required: true
|
||||
aliases: [ "name" ]
|
||||
aliases: [ "name", "path" ]
|
||||
description:
|
||||
- List of paths to the file or directory to search. All paths must be fully qualified.
|
||||
file_type:
|
||||
|
@ -108,6 +107,12 @@ options:
|
|||
choices: [ True, False ]
|
||||
description:
|
||||
- Set this to true to retrieve a file's sha1 checksum
|
||||
use_regex:
|
||||
required: false
|
||||
default: "False"
|
||||
choices: [ True, False ]
|
||||
description:
|
||||
- If false the patterns are file globs (shell) if true they are python regexes
|
||||
'''
|
||||
|
||||
|
||||
|
@ -121,8 +126,11 @@ EXAMPLES = '''
|
|||
# Recursively find /var/tmp files with last access time greater than 3600 seconds
|
||||
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
|
||||
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
|
||||
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
|
||||
- find: paths="/var/tmp" patterns="'*.old','*.log.gz'" size="10m"
|
||||
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
|
||||
- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
@ -152,13 +160,23 @@ examined:
|
|||
sample: 34
|
||||
'''
|
||||
|
||||
def pfilter(f, patterns=None):
|
||||
def pfilter(f, patterns=None, use_regex=False):
|
||||
'''filter using glob patterns'''
|
||||
|
||||
if patterns is None:
|
||||
return True
|
||||
for p in patterns:
|
||||
if fnmatch.fnmatch(f, p):
|
||||
return True
|
||||
|
||||
if use_regex:
|
||||
for p in patterns:
|
||||
r = re.compile(p)
|
||||
if r.match(f):
|
||||
return True
|
||||
else:
|
||||
|
||||
for p in patterns:
|
||||
if fnmatch.fnmatch(f, p):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
@ -236,8 +254,8 @@ def statinfo(st):
|
|||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
paths = dict(required=True, aliases=['name'], type='list'),
|
||||
patterns = dict(default=['*'], type='list'),
|
||||
paths = dict(required=True, aliases=['name','path'], type='list'),
|
||||
patterns = dict(default=['*'], type='list', aliases=['pattern']),
|
||||
contains = dict(default=None, type='str'),
|
||||
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
|
||||
age = dict(default=None, type='str'),
|
||||
|
@ -247,7 +265,9 @@ def main():
|
|||
hidden = dict(default="False", type='bool'),
|
||||
follow = dict(default="False", type='bool'),
|
||||
get_checksum = dict(default="False", type='bool'),
|
||||
use_regex = dict(default="False", type='bool'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
params = module.params
|
||||
|
@ -292,16 +312,21 @@ def main():
|
|||
if os.path.basename(fsname).startswith('.') and not params['hidden']:
|
||||
continue
|
||||
|
||||
st = os.stat(fsname)
|
||||
try:
|
||||
st = os.stat(fsname)
|
||||
except:
|
||||
msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
|
||||
continue
|
||||
|
||||
r = {'path': fsname}
|
||||
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
|
||||
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
|
||||
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
|
||||
|
||||
r.update(statinfo(st))
|
||||
filelist.append(r)
|
||||
|
||||
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
|
||||
if pfilter(fsobj, params['patterns']) and \
|
||||
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
|
||||
agefilter(st, now, age, params['age_stamp']) and \
|
||||
sizefilter(st, size) and \
|
||||
contentfilter(fsname, params['contains']):
|
||||
|
@ -314,7 +339,7 @@ def main():
|
|||
if not params['recurse']:
|
||||
break
|
||||
else:
|
||||
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
|
||||
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
|
||||
|
||||
matched = len(filelist)
|
||||
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
|
||||
|
|
|
@ -65,6 +65,12 @@ options:
|
|||
description:
|
||||
- all arguments accepted by the M(file) module also work here
|
||||
required: false
|
||||
state:
|
||||
description:
|
||||
- If set to C(absent) the option or section will be removed if present instead of created.
|
||||
required: false
|
||||
default: "present"
|
||||
choices: [ "present", "absent" ]
|
||||
notes:
|
||||
- While it is possible to add an I(option) without specifying a I(value), this makes
|
||||
no sense.
|
||||
|
@ -110,21 +116,14 @@ def do_ini(module, filename, section=None, option=None, value=None, state='prese
|
|||
|
||||
|
||||
if state == 'absent':
|
||||
if option is None and value is None:
|
||||
if cp.has_section(section):
|
||||
cp.remove_section(section)
|
||||
changed = True
|
||||
if option is None:
|
||||
changed = cp.remove_section(section)
|
||||
else:
|
||||
if option is not None:
|
||||
try:
|
||||
if cp.get(section, option):
|
||||
cp.remove_option(section, option)
|
||||
changed = True
|
||||
except ConfigParser.InterpolationError:
|
||||
cp.remove_option(section, option)
|
||||
changed = True
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
changed = cp.remove_option(section, option)
|
||||
except ConfigParser.NoSectionError:
|
||||
# Option isn't present if the section isn't either
|
||||
pass
|
||||
|
||||
if state == 'present':
|
||||
|
||||
|
@ -212,4 +211,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -42,11 +42,18 @@ options:
|
|||
aliases: []
|
||||
get_checksum:
|
||||
description:
|
||||
- Whether to return a checksum of the file (currently sha1)
|
||||
- Whether to return a checksum of the file (default sha1)
|
||||
required: false
|
||||
default: yes
|
||||
aliases: []
|
||||
version_added: "1.8"
|
||||
checksum_algorithm:
|
||||
description:
|
||||
- Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm.
|
||||
required: false
|
||||
choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]
|
||||
default: sha1
|
||||
version_added: "2.0"
|
||||
author: "Bruce Pennypacker (@bpennypacker)"
|
||||
'''
|
||||
|
||||
|
@ -84,6 +91,9 @@ EXAMPLES = '''
|
|||
|
||||
# Don't do md5 checksum
|
||||
- stat: path=/path/to/myhugefile get_md5=no
|
||||
|
||||
# Use sha256 to calculate checksum
|
||||
- stat: path=/path/to/something checksum_algorithm=sha256
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
@ -245,8 +255,8 @@ stat:
|
|||
lnk_source:
|
||||
description: Original path
|
||||
returned: success, path exists and user can read stats and the path is a symbolic link
|
||||
type: boolean
|
||||
sample: True
|
||||
type: string
|
||||
sample: /home/foobar/21102015-1445431274-908472971
|
||||
md5:
|
||||
description: md5 hash of the path
|
||||
returned: success, path exists and user can read stats and path supports hashing and md5 is supported
|
||||
|
@ -254,7 +264,7 @@ stat:
|
|||
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
|
||||
checksum:
|
||||
description: hash of the path
|
||||
returned: success, path exists and user can read stats and path supports hashing
|
||||
returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available
|
||||
type: string
|
||||
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
|
||||
pw_name:
|
||||
|
@ -281,7 +291,8 @@ def main():
|
|||
path = dict(required=True),
|
||||
follow = dict(default='no', type='bool'),
|
||||
get_md5 = dict(default='yes', type='bool'),
|
||||
get_checksum = dict(default='yes', type='bool')
|
||||
get_checksum = dict(default='yes', type='bool'),
|
||||
checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'])
|
||||
),
|
||||
supports_check_mode = True
|
||||
)
|
||||
|
@ -291,6 +302,7 @@ def main():
|
|||
follow = module.params.get('follow')
|
||||
get_md5 = module.params.get('get_md5')
|
||||
get_checksum = module.params.get('get_checksum')
|
||||
checksum_algorithm = module.params.get('checksum_algorithm')
|
||||
|
||||
try:
|
||||
if follow:
|
||||
|
@ -351,8 +363,7 @@ def main():
|
|||
d['md5'] = None
|
||||
|
||||
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
|
||||
d['checksum'] = module.sha1(path)
|
||||
|
||||
d['checksum'] = module.digest_from_file(path, checksum_algorithm)
|
||||
|
||||
try:
|
||||
pw = pwd.getpwuid(st.st_uid)
|
||||
|
@ -370,4 +381,4 @@ def main():
|
|||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
|
||||
main()
|
||||
main()
|
|
@ -25,6 +25,8 @@ import shutil
|
|||
import tempfile
|
||||
import base64
|
||||
import datetime
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
try:
|
||||
import json
|
||||
except ImportError:
|
||||
|
@ -44,7 +46,6 @@ options:
|
|||
- HTTP or HTTPS URL in the form (http|https)://host.domain[:port]/path
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
dest:
|
||||
description:
|
||||
- path of where to download the file to (if desired). If I(dest) is a directory, the basename of the file on the remote server will be used.
|
||||
|
@ -74,7 +75,7 @@ options:
|
|||
version_added: "2.0"
|
||||
method:
|
||||
description:
|
||||
- The HTTP method of the request or response.
|
||||
- The HTTP method of the request or response. It MUST be uppercase.
|
||||
required: false
|
||||
choices: [ "GET", "POST", "PUT", "HEAD", "DELETE", "OPTIONS", "PATCH", "TRACE", "CONNECT", "REFRESH" ]
|
||||
default: "GET"
|
||||
|
@ -144,7 +145,8 @@ options:
|
|||
version_added: '1.9.2'
|
||||
|
||||
# informational: requirements for nodes
|
||||
requirements: [ urlparse, httplib2 ]
|
||||
requirements:
|
||||
- httplib2 >= 0.7.0
|
||||
author: "Romeo Theriault (@romeotheriault)"
|
||||
'''
|
||||
|
||||
|
@ -199,11 +201,15 @@ EXAMPLES = '''
|
|||
|
||||
'''
|
||||
|
||||
HAS_HTTPLIB2 = True
|
||||
HAS_HTTPLIB2 = False
|
||||
|
||||
try:
|
||||
import httplib2
|
||||
except ImportError:
|
||||
HAS_HTTPLIB2 = False
|
||||
if LooseVersion(httplib2.__version__) >= LooseVersion('0.7'):
|
||||
HAS_HTTPLIB2 = True
|
||||
except ImportError, AttributeError:
|
||||
# AttributeError if __version__ is not present
|
||||
pass
|
||||
|
||||
HAS_URLPARSE = True
|
||||
|
||||
|
@ -383,7 +389,7 @@ def main():
|
|||
)
|
||||
|
||||
if not HAS_HTTPLIB2:
|
||||
module.fail_json(msg="httplib2 is not installed")
|
||||
module.fail_json(msg="httplib2 >= 0.7 is not installed")
|
||||
if not HAS_URLPARSE:
|
||||
module.fail_json(msg="urlparse is not installed")
|
||||
|
||||
|
@ -474,7 +480,7 @@ def main():
|
|||
content_type, params = cgi.parse_header(uresp['content_type'])
|
||||
if 'charset' in params:
|
||||
content_encoding = params['charset']
|
||||
u_content = unicode(content, content_encoding, errors='xmlcharrefreplace')
|
||||
u_content = unicode(content, content_encoding, errors='replace')
|
||||
if content_type.startswith('application/json') or \
|
||||
content_type.startswith('text/json'):
|
||||
try:
|
||||
|
@ -483,7 +489,7 @@ def main():
|
|||
except:
|
||||
pass
|
||||
else:
|
||||
u_content = unicode(content, content_encoding, errors='xmlcharrefreplace')
|
||||
u_content = unicode(content, content_encoding, errors='replace')
|
||||
|
||||
if resp['status'] not in status_code:
|
||||
module.fail_json(msg="Status code was not " + str(status_code), content=u_content, **uresp)
|
||||
|
|
|
@ -559,7 +559,7 @@ def main():
|
|||
|
||||
if not HAS_PYTHON_APT:
|
||||
try:
|
||||
module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True)
|
||||
module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True)
|
||||
global apt, apt_pkg
|
||||
import apt
|
||||
import apt.debfile
|
||||
|
|
|
@ -130,6 +130,15 @@ notes:
|
|||
that the other packages come from (such as epel-release) then that package
|
||||
needs to be installed in a separate task. This mimics yum's command line
|
||||
behaviour.
|
||||
- 'Yum itself has two types of groups. "Package groups" are specified in the
|
||||
rpm itself while "environment groups" are specified in a separate file
|
||||
(usually by the distribution). Unfortunately, this division becomes
|
||||
apparent to ansible users because ansible needs to operate on the group
|
||||
of packages in a single transaction and yum requires groups to be specified
|
||||
in different ways when used in that way. Package groups are specified as
|
||||
"@development-tools" and environment groups are "@^gnome-desktop-environment".
|
||||
Use the "yum group list" command to see which category of group the group
|
||||
you want to install falls into.'
|
||||
# informational: requirements for nodes
|
||||
requirements: [ yum ]
|
||||
author:
|
||||
|
@ -161,6 +170,9 @@ EXAMPLES = '''
|
|||
|
||||
- name: install the 'Development tools' package group
|
||||
yum: name="@Development tools" state=present
|
||||
|
||||
- name: install the 'Gnome desktop' environment group
|
||||
yum: name="@^gnome-desktop-environment" state=present
|
||||
'''
|
||||
|
||||
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url
|
||||
|
@ -755,7 +767,11 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
|
||||
if update_all:
|
||||
cmd = yum_basecmd + ['update']
|
||||
will_update = set(updates.keys())
|
||||
will_update_from_other_package = dict()
|
||||
else:
|
||||
will_update = set()
|
||||
will_update_from_other_package = dict()
|
||||
for spec in items:
|
||||
# some guess work involved with groups. update @<group> will install the group if missing
|
||||
if spec.startswith('@'):
|
||||
|
@ -779,8 +795,19 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
nothing_to_do = False
|
||||
break
|
||||
|
||||
if spec in pkgs['update'] and spec in updates.keys():
|
||||
nothing_to_do = False
|
||||
# this contains the full NVR and spec could contain wildcards
|
||||
# or virtual provides (like "python-*" or "smtp-daemon") while
|
||||
# updates contains name only.
|
||||
this_name_only = '-'.join(this.split('-')[:-2])
|
||||
if spec in pkgs['update'] and this_name_only in updates.keys():
|
||||
nothing_to_do = False
|
||||
will_update.add(spec)
|
||||
# Massage the updates list
|
||||
if spec != this_name_only:
|
||||
# For reporting what packages would be updated more
|
||||
# succinctly
|
||||
will_update_from_other_package[spec] = this_name_only
|
||||
break
|
||||
|
||||
if nothing_to_do:
|
||||
res['results'].append("All packages providing %s are up to date" % spec)
|
||||
|
@ -793,12 +820,6 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
|
||||
module.fail_json(**res)
|
||||
|
||||
# list of package updates
|
||||
if update_all:
|
||||
will_update = updates.keys()
|
||||
else:
|
||||
will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')]
|
||||
|
||||
# check_mode output
|
||||
if module.check_mode:
|
||||
to_update = []
|
||||
|
@ -806,6 +827,9 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
|
|||
if w.startswith('@'):
|
||||
to_update.append((w, None))
|
||||
msg = '%s will be updated' % w
|
||||
elif w not in updates:
|
||||
other_pkg = will_update_from_other_package[w]
|
||||
to_update.append((w, 'because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo'])))
|
||||
else:
|
||||
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
|
||||
|
||||
|
|
|
@ -453,7 +453,7 @@ def is_local_branch(git_path, module, dest, branch):
|
|||
def is_not_a_branch(git_path, module, dest):
|
||||
branches = get_branches(git_path, module, dest)
|
||||
for b in branches:
|
||||
if b.startswith('* ') and 'no branch' in b:
|
||||
if b.startswith('* ') and ('no branch' in b or 'detached from' in b):
|
||||
return True
|
||||
return False
|
||||
|
||||
|
|
|
@ -171,9 +171,10 @@ class Subversion(object):
|
|||
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
|
||||
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
|
||||
# The --quiet option will return only modified files.
|
||||
|
||||
# Match only revisioned files, i.e. ignore status '?'.
|
||||
regex = re.compile(r'^[^?X]')
|
||||
# Has local mods if more than 0 modifed revisioned files.
|
||||
return len(filter(len, lines)) > 0
|
||||
return len(filter(regex.match, lines)) > 0
|
||||
|
||||
def needs_update(self):
|
||||
curr, url = self.get_revision()
|
||||
|
|
|
@ -67,6 +67,7 @@ options:
|
|||
cron_file:
|
||||
description:
|
||||
- If specified, uses this file in cron.d instead of an individual user's crontab.
|
||||
To use the C(cron_file) parameter you must specify the C(user) as well.
|
||||
required: false
|
||||
default: null
|
||||
backup:
|
||||
|
|
|
@ -481,6 +481,15 @@ class ScientificLinuxHostname(Hostname):
|
|||
else:
|
||||
strategy_class = RedHatStrategy
|
||||
|
||||
class OracleLinuxHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Oracle linux server'
|
||||
distribution_version = get_distribution_version()
|
||||
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
|
||||
strategy_class = SystemdStrategy
|
||||
else:
|
||||
strategy_class = RedHatStrategy
|
||||
|
||||
class AmazonLinuxHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Amazon'
|
||||
|
|
|
@ -23,19 +23,20 @@ DOCUMENTATION = '''
|
|||
---
|
||||
module: ping
|
||||
version_added: historical
|
||||
short_description: Try to connect to host and return C(pong) on success.
|
||||
short_description: Try to connect to host, veryify a usable python and return C(pong) on success.
|
||||
description:
|
||||
- A trivial test module, this module always returns C(pong) on successful
|
||||
contact. It does not make sense in playbooks, but it is useful from
|
||||
C(/usr/bin/ansible)
|
||||
C(/usr/bin/ansible) to verify the ability to login and that a usable python is configured.
|
||||
- This is NOT ICMP ping, this is just a trivial test module.
|
||||
options: {}
|
||||
author:
|
||||
author:
|
||||
- "Ansible Core Team"
|
||||
- "Michael DeHaan"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Test 'webservers' status
|
||||
# Test we can logon to 'webservers' and execute python with json lib.
|
||||
ansible webservers -m ping
|
||||
'''
|
||||
|
||||
|
|
|
@ -395,7 +395,7 @@ class LinuxService(Service):
|
|||
location = dict()
|
||||
|
||||
for binary in binaries:
|
||||
location[binary] = self.module.get_bin_path(binary)
|
||||
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
|
||||
|
||||
for initdir in initpaths:
|
||||
initscript = "%s/%s" % (initdir,self.name)
|
||||
|
@ -403,25 +403,31 @@ class LinuxService(Service):
|
|||
self.svc_initscript = initscript
|
||||
|
||||
def check_systemd():
|
||||
# verify systemd is installed (by finding systemctl)
|
||||
if not location.get('systemctl', False):
|
||||
return False
|
||||
|
||||
# Check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
except IOError, err:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
# tools must be installed
|
||||
if location.get('systemctl',False):
|
||||
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
return True
|
||||
# this should show if systemd is the boot init system
|
||||
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
|
||||
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
|
||||
if os.path.exists(canary):
|
||||
return True
|
||||
|
||||
# If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
except IOError:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
# Locate a tool to enable/disable a service
|
||||
if location.get('systemctl',False) and check_systemd():
|
||||
if check_systemd():
|
||||
# service is managed by systemd
|
||||
self.__systemd_unit = self.name
|
||||
self.svc_cmd = location['systemctl']
|
||||
|
@ -699,7 +705,8 @@ class LinuxService(Service):
|
|||
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
|
||||
if not self.name in out:
|
||||
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
|
||||
state = out.split()[-1]
|
||||
#TODO: look back on why this is here
|
||||
#state = out.split()[-1]
|
||||
|
||||
# Check if we're already in the correct state
|
||||
if "3:%s" % action in out and "5:%s" % action in out:
|
||||
|
@ -961,7 +968,6 @@ class FreeBsdService(Service):
|
|||
self.rcconf_file = rcfile
|
||||
|
||||
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
|
||||
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
|
||||
try:
|
||||
rcvars = shlex.split(stdout, comments=True)
|
||||
except:
|
||||
|
|
|
@ -123,6 +123,8 @@ class SysctlModule(object):
|
|||
|
||||
def process(self):
|
||||
|
||||
self.platform = get_platform().lower()
|
||||
|
||||
# Whitespace is bad
|
||||
self.args['name'] = self.args['name'].strip()
|
||||
self.args['value'] = self._parse_value(self.args['value'])
|
||||
|
@ -206,7 +208,11 @@ class SysctlModule(object):
|
|||
|
||||
# Use the sysctl command to find the current value
|
||||
def get_token_curr_value(self, token):
|
||||
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
|
||||
if self.platform == 'openbsd':
|
||||
# openbsd doesn't support -e, just drop it
|
||||
thiscmd = "%s -n %s" % (self.sysctl_cmd, token)
|
||||
else:
|
||||
thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
|
||||
rc,out,err = self.module.run_command(thiscmd)
|
||||
if rc != 0:
|
||||
return None
|
||||
|
@ -217,7 +223,11 @@ class SysctlModule(object):
|
|||
def set_token_value(self, token, value):
|
||||
if len(value.split()) > 0:
|
||||
value = '"' + value + '"'
|
||||
thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
|
||||
if self.platform == 'openbsd':
|
||||
# openbsd doesn't accept -w, but since it's not needed, just drop it
|
||||
thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value)
|
||||
else:
|
||||
thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
|
||||
rc,out,err = self.module.run_command(thiscmd)
|
||||
if rc != 0:
|
||||
self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))
|
||||
|
@ -227,9 +237,20 @@ class SysctlModule(object):
|
|||
# Run sysctl -p
|
||||
def reload_sysctl(self):
|
||||
# do it
|
||||
if get_platform().lower() == 'freebsd':
|
||||
if self.platform == 'freebsd':
|
||||
# freebsd doesn't support -p, so reload the sysctl service
|
||||
rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload')
|
||||
elif self.platform == 'openbsd':
|
||||
# openbsd doesn't support -p and doesn't have a sysctl service,
|
||||
# so we have to set every value with its own sysctl call
|
||||
for k, v in self.file_values.items():
|
||||
rc = 0
|
||||
if k != self.args['name']:
|
||||
rc = self.set_token_value(k, v)
|
||||
if rc != 0:
|
||||
break
|
||||
if rc == 0 and self.args['state'] == "present":
|
||||
rc = self.set_token_value(self.args['name'], self.args['value'])
|
||||
else:
|
||||
# system supports reloading via the -p flag to sysctl, so we'll use that
|
||||
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]
|
||||
|
|
|
@ -1352,20 +1352,21 @@ class SunOS(User):
|
|||
cmd.append('-s')
|
||||
cmd.append(self.shell)
|
||||
|
||||
if self.module.check_mode:
|
||||
return (0, '', '')
|
||||
else:
|
||||
# modify the user if cmd will do anything
|
||||
if cmd_len != len(cmd):
|
||||
# modify the user if cmd will do anything
|
||||
if cmd_len != len(cmd):
|
||||
(rc, out, err) = (0, '', '')
|
||||
if not self.module.check_mode:
|
||||
cmd.append(self.name)
|
||||
(rc, out, err) = self.execute_command(cmd)
|
||||
if rc is not None and rc != 0:
|
||||
self.module.fail_json(name=self.name, msg=err, rc=rc)
|
||||
else:
|
||||
(rc, out, err) = (None, '', '')
|
||||
else:
|
||||
(rc, out, err) = (None, '', '')
|
||||
|
||||
# we have to set the password by editing the /etc/shadow file
|
||||
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
|
||||
# we have to set the password by editing the /etc/shadow file
|
||||
if self.update_password == 'always' and self.password is not None and info[1] != self.password:
|
||||
(rc, out, err) = (0, '', '')
|
||||
if not self.module.check_mode:
|
||||
try:
|
||||
lines = []
|
||||
for line in open(self.SHADOWFILE, 'rb').readlines():
|
||||
|
@ -1382,7 +1383,7 @@ class SunOS(User):
|
|||
except Exception, err:
|
||||
self.module.fail_json(msg="failed to update users password: %s" % str(err))
|
||||
|
||||
return (rc, out, err)
|
||||
return (rc, out, err)
|
||||
|
||||
# ===========================================
|
||||
class DarwinUser(User):
|
||||
|
@ -2044,7 +2045,7 @@ def main():
|
|||
comment=dict(default=None, type='str'),
|
||||
home=dict(default=None, type='str'),
|
||||
shell=dict(default=None, type='str'),
|
||||
password=dict(default=None, type='str'),
|
||||
password=dict(default=None, type='str', no_log=True),
|
||||
login_class=dict(default=None, type='str'),
|
||||
# following options are specific to userdel
|
||||
force=dict(default='no', type='bool'),
|
||||
|
@ -2062,7 +2063,7 @@ def main():
|
|||
ssh_key_type=dict(default=ssh_defaults['type'], type='str'),
|
||||
ssh_key_file=dict(default=None, type='str'),
|
||||
ssh_key_comment=dict(default=ssh_defaults['comment'], type='str'),
|
||||
ssh_key_passphrase=dict(default=None, type='str'),
|
||||
ssh_key_passphrase=dict(default=None, type='str', no_log=True),
|
||||
update_password=dict(default='always',choices=['always','on_create'],type='str'),
|
||||
expires=dict(default=None, type='float'),
|
||||
),
|
||||
|
@ -2160,4 +2161,5 @@ def main():
|
|||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
2
test-requirements.txt
Normal file
2
test-requirements.txt
Normal file
|
@ -0,0 +1,2 @@
|
|||
mock
|
||||
pytest
|
221
test/unit/cloud/openstack/test_os_server.py
Normal file
221
test/unit/cloud/openstack/test_os_server.py
Normal file
|
@ -0,0 +1,221 @@
|
|||
import mock
|
||||
import pytest
|
||||
import yaml
|
||||
import inspect
|
||||
import collections
|
||||
|
||||
from cloud.openstack import os_server
|
||||
|
||||
|
||||
class AnsibleFail(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleExit(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def params_from_doc(func):
|
||||
'''This function extracts the docstring from the specified function,
|
||||
parses it as a YAML document, and returns parameters for the os_server
|
||||
module.'''
|
||||
|
||||
doc = inspect.getdoc(func)
|
||||
cfg = yaml.load(doc)
|
||||
|
||||
for task in cfg:
|
||||
for module, params in task.items():
|
||||
for k, v in params.items():
|
||||
if k in ['nics'] and type(v) == str:
|
||||
params[k] = [v]
|
||||
task[module] = collections.defaultdict(str,
|
||||
params)
|
||||
|
||||
return cfg[0]['os_server']
|
||||
|
||||
|
||||
class FakeCloud (object):
|
||||
ports = [
|
||||
{'name': 'port1', 'id': '1234'},
|
||||
{'name': 'port2', 'id': '4321'},
|
||||
]
|
||||
|
||||
networks = [
|
||||
{'name': 'network1', 'id': '5678'},
|
||||
{'name': 'network2', 'id': '8765'},
|
||||
]
|
||||
|
||||
images = [
|
||||
{'name': 'cirros', 'id': '1'},
|
||||
{'name': 'fedora', 'id': '2'},
|
||||
]
|
||||
|
||||
flavors = [
|
||||
{'name': 'm1.small', 'id': '1', 'flavor_ram': 1024},
|
||||
{'name': 'm1.tiny', 'id': '2', 'flavor_ram': 512},
|
||||
]
|
||||
|
||||
def _find(self, source, name):
|
||||
for item in source:
|
||||
if item['name'] == name or item['id'] == name:
|
||||
return item
|
||||
|
||||
def get_image_id(self, name, exclude=None):
|
||||
image = self._find(self.images, name)
|
||||
if image:
|
||||
return image['id']
|
||||
|
||||
def get_flavor(self, name):
|
||||
return self._find(self.flavors, name)
|
||||
|
||||
def get_flavor_by_ram(self, ram, include=None):
|
||||
for flavor in self.flavors:
|
||||
if flavor['ram'] >= ram and (include is None or include in
|
||||
flavor['name']):
|
||||
return flavor
|
||||
|
||||
def get_port(self, name):
|
||||
return self._find(self.ports, name)
|
||||
|
||||
def get_network(self, name):
|
||||
return self._find(self.networks, name)
|
||||
|
||||
create_server = mock.MagicMock()
|
||||
|
||||
|
||||
class TestNetworkArgs(object):
|
||||
'''This class exercises the _network_args function of the
|
||||
os_server module. For each test, we parse the YAML document
|
||||
contained in the docstring to retrieve the module parameters for the
|
||||
test.'''
|
||||
|
||||
def setup_method(self, method):
|
||||
self.cloud = FakeCloud()
|
||||
self.module = mock.MagicMock()
|
||||
self.module.params = params_from_doc(method)
|
||||
|
||||
def test_nics_string_net_id(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics: net-id=1234
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['net-id'] == '1234')
|
||||
|
||||
def test_nics_string_net_id_list(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics: net-id=1234,net-id=4321
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['net-id'] == '1234')
|
||||
assert(args[1]['net-id'] == '4321')
|
||||
|
||||
def test_nics_string_port_id(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics: port-id=1234
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['port-id'] == '1234')
|
||||
|
||||
def test_nics_string_net_name(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics: net-name=network1
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['net-id'] == '5678')
|
||||
|
||||
def test_nics_string_port_name(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics: port-name=port1
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['port-id'] == '1234')
|
||||
|
||||
def test_nics_structured_net_id(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics:
|
||||
- net-id: '1234'
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['net-id'] == '1234')
|
||||
|
||||
def test_nics_structured_mixed(self):
|
||||
'''
|
||||
- os_server:
|
||||
nics:
|
||||
- net-id: '1234'
|
||||
- port-name: port1
|
||||
- 'net-name=network1,port-id=4321'
|
||||
'''
|
||||
args = os_server._network_args(self.module, self.cloud)
|
||||
assert(args[0]['net-id'] == '1234')
|
||||
assert(args[1]['port-id'] == '1234')
|
||||
assert(args[2]['net-id'] == '5678')
|
||||
assert(args[3]['port-id'] == '4321')
|
||||
|
||||
|
||||
class TestCreateServer(object):
|
||||
def setup_method(self, method):
|
||||
self.cloud = FakeCloud()
|
||||
self.module = mock.MagicMock()
|
||||
self.module.params = params_from_doc(method)
|
||||
self.module.fail_json.side_effect = AnsibleFail()
|
||||
self.module.exit_json.side_effect = AnsibleExit()
|
||||
|
||||
self.meta = mock.MagicMock()
|
||||
self.meta.gett_hostvars_from_server.return_value = {
|
||||
'id': '1234'
|
||||
}
|
||||
os_server.meta = self.meta
|
||||
|
||||
def test_create_server(self):
|
||||
'''
|
||||
- os_server:
|
||||
image: cirros
|
||||
flavor: m1.tiny
|
||||
nics:
|
||||
- net-name: network1
|
||||
'''
|
||||
with pytest.raises(AnsibleExit):
|
||||
os_server._create_server(self.module, self.cloud)
|
||||
|
||||
assert(self.cloud.create_server.call_count == 1)
|
||||
assert(self.cloud.create_server.call_args[1]['image']
|
||||
== self.cloud.get_image_id('cirros'))
|
||||
assert(self.cloud.create_server.call_args[1]['flavor']
|
||||
== self.cloud.get_flavor('m1.tiny')['id'])
|
||||
assert(self.cloud.create_server.call_args[1]['nics'][0]['net-id']
|
||||
== self.cloud.get_network('network1')['id'])
|
||||
|
||||
def test_create_server_bad_flavor(self):
|
||||
'''
|
||||
- os_server:
|
||||
image: cirros
|
||||
flavor: missing_flavor
|
||||
nics:
|
||||
- net-name: network1
|
||||
'''
|
||||
with pytest.raises(AnsibleFail):
|
||||
os_server._create_server(self.module, self.cloud)
|
||||
|
||||
assert('missing_flavor' in
|
||||
self.module.fail_json.call_args[1]['msg'])
|
||||
|
||||
def test_create_server_bad_nic(self):
|
||||
'''
|
||||
- os_server:
|
||||
image: cirros
|
||||
flavor: m1.tiny
|
||||
nics:
|
||||
- net-name: missing_network
|
||||
'''
|
||||
with pytest.raises(AnsibleFail):
|
||||
os_server._create_server(self.module, self.cloud)
|
||||
|
||||
assert('missing_network' in
|
||||
self.module.fail_json.call_args[1]['msg'])
|
|
@ -56,7 +56,7 @@ def _disable_module(module):
|
|||
|
||||
result, stdout, stderr = module.run_command("%s %s" % (a2dismod_binary, name))
|
||||
|
||||
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S):
|
||||
if re.match(r'.*\b' + name + r' already disabled', stdout, re.S|re.M):
|
||||
module.exit_json(changed = False, result = "Success")
|
||||
elif result != 0:
|
||||
module.fail_json(msg="Failed to disable module %s: %s" % (name, stdout))
|
||||
|
@ -71,7 +71,7 @@ def _enable_module(module):
|
|||
|
||||
result, stdout, stderr = module.run_command("%s %s" % (a2enmod_binary, name))
|
||||
|
||||
if re.match(r'.*\b' + name + r' already enabled', stdout, re.S):
|
||||
if re.match(r'.*\b' + name + r' already enabled', stdout, re.S|re.M):
|
||||
module.exit_json(changed = False, result = "Success")
|
||||
elif result != 0:
|
||||
module.fail_json(msg="Failed to enable module %s: %s" % (name, stdout))
|
||||
|
|
|
@ -64,6 +64,15 @@ Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).
|
|||
Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString
|
||||
Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString()
|
||||
|
||||
$date = New-Object psobject
|
||||
Set-Attr $date "date" (Get-Date -format d)
|
||||
Set-Attr $date "year" (Get-Date -format yyyy)
|
||||
Set-Attr $date "month" (Get-Date -format MM)
|
||||
Set-Attr $date "day" (Get-Date -format dd)
|
||||
Set-Attr $date "hour" (Get-Date -format HH)
|
||||
Set-Attr $date "iso8601" (Get-Date -format s)
|
||||
Set-Attr $result.ansible_facts "ansible_date_time" $date
|
||||
|
||||
Set-Attr $result.ansible_facts "ansible_totalmem" $capacity
|
||||
|
||||
Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u")
|
||||
|
|
|
@ -71,18 +71,15 @@ If (Test-Path $path)
|
|||
}
|
||||
Else
|
||||
{
|
||||
# Only files have the .Directory attribute.
|
||||
If ( $state -eq "directory" -and $fileinfo.Directory )
|
||||
If ( $state -eq "directory" -and -not $fileinfo.PsIsContainer )
|
||||
{
|
||||
Fail-Json (New-Object psobject) "path is not a directory"
|
||||
}
|
||||
|
||||
# Only files have the .Directory attribute.
|
||||
If ( $state -eq "file" -and -not $fileinfo.Directory )
|
||||
If ( $state -eq "file" -and $fileinfo.PsIsContainer )
|
||||
{
|
||||
Fail-Json (New-Object psobject) "path is not a file"
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
Else
|
||||
|
|
|
@ -387,8 +387,11 @@ Elseif (Test-Path $dest) {
|
|||
$found = $FALSE;
|
||||
Foreach ($encoding in $sortedlist.GetValueList()) {
|
||||
$preamble = $encoding.GetPreamble();
|
||||
If ($preamble) {
|
||||
Foreach ($i in 0..$preamble.Length) {
|
||||
If ($preamble -and $bom) {
|
||||
Foreach ($i in 0..($preamble.Length - 1)) {
|
||||
If ($i -ge $bom.Length) {
|
||||
break;
|
||||
}
|
||||
If ($preamble[$i] -ne $bom[$i]) {
|
||||
break;
|
||||
}
|
||||
|
@ -427,7 +430,7 @@ If ($state -eq "present") {
|
|||
}
|
||||
Else {
|
||||
|
||||
If ($regex -eq $FALSE -and $line -eq $FALSE) {
|
||||
If ($regexp -eq $FALSE -and $line -eq $FALSE) {
|
||||
Fail-Json (New-Object psobject) "one of line= or regexp= is required with state=absent";
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue