diff --git a/VERSION b/VERSION
new file mode 100644
index 00000000000..8b31b2b4fdb
--- /dev/null
+++ b/VERSION
@@ -0,0 +1 @@
+2.0.0-0.3.beta1
diff --git a/cloud/amazon/cloudformation.py b/cloud/amazon/cloudformation.py
index 236bc89000d..4d0db148261 100644
--- a/cloud/amazon/cloudformation.py
+++ b/cloud/amazon/cloudformation.py
@@ -241,7 +241,7 @@ def main():
stack_name=dict(required=True),
template_parameters=dict(required=False, type='dict', default={}),
state=dict(default='present', choices=['present', 'absent']),
- template=dict(default=None, required=False),
+ template=dict(default=None, required=False, type='path'),
notification_arns=dict(default=None, required=False),
stack_policy=dict(default=None, required=False),
disable_rollback=dict(default=False, type='bool'),
@@ -368,6 +368,16 @@ def main():
for output in stack.outputs:
stack_outputs[output.key] = output.value
result['stack_outputs'] = stack_outputs
+ stack_resources = []
+ for res in cfn.list_stack_resources(stack_name):
+ stack_resources.append({
+ "last_updated_time": res.last_updated_time,
+ "logical_resource_id": res.logical_resource_id,
+ "physical_resource_id": res.physical_resource_id,
+ "status": res.resource_status,
+ "status_reason": res.resource_status_reason,
+ "resource_type": res.resource_type })
+ result['stack_resources'] = stack_resources
# absent state is different because of the way delete_stack works.
# problem is it it doesn't give an error if stack isn't found
diff --git a/cloud/amazon/ec2.py b/cloud/amazon/ec2.py
index 0b0d3c91127..ed36b855480 100644
--- a/cloud/amazon/ec2.py
+++ b/cloud/amazon/ec2.py
@@ -216,7 +216,7 @@ options:
volumes:
version_added: "1.5"
description:
- - "a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume. Encrypt the volume by passing 'encrypted: true' in the volume dict."
+ - a list of hash/dictionaries of volumes to add to the new instance; '[{"key":"value", "key":"value"}]'; keys allowed are - device_name (str; required), delete_on_termination (bool; False), device_type (deprecated), ephemeral (str), encrypted (bool; False), snapshot (str), volume_type (str), iops (int) - device_type is deprecated use volume_type, iops must be set when volume_type='io1', ephemeral and snapshot are mutually exclusive.
required: false
default: null
aliases: []
@@ -295,7 +295,7 @@ EXAMPLES = '''
volumes:
- device_name: /dev/sdb
snapshot: snap-abcdef12
- device_type: io1
+ volume_type: io1
iops: 1000
volume_size: 100
delete_on_termination: true
@@ -710,11 +710,21 @@ def create_block_device(module, ec2, volume):
# Not aware of a way to determine this programatically
# http://aws.amazon.com/about-aws/whats-new/2013/10/09/ebs-provisioned-iops-maximum-iops-gb-ratio-increased-to-30-1/
MAX_IOPS_TO_SIZE_RATIO = 30
+
+ # device_type has been used historically to represent volume_type,
+ # however ec2_vol uses volume_type, as does the BlockDeviceType, so
+ # we add handling for either/or but not both
+ if all(key in volume for key in ['device_type','volume_type']):
+ module.fail_json(msg = 'device_type is a deprecated name for volume_type. Do not use both device_type and volume_type')
+
+ # get whichever one is set, or NoneType if neither are set
+ volume_type = volume.get('device_type') or volume.get('volume_type')
+
if 'snapshot' not in volume and 'ephemeral' not in volume:
if 'volume_size' not in volume:
module.fail_json(msg = 'Size must be specified when creating a new volume or modifying the root volume')
if 'snapshot' in volume:
- if 'device_type' in volume and volume.get('device_type') == 'io1' and 'iops' not in volume:
+ if volume_type == 'io1' and 'iops' not in volume:
module.fail_json(msg = 'io1 volumes must have an iops value set')
if 'iops' in volume:
snapshot = ec2.get_all_snapshots(snapshot_ids=[volume['snapshot']])[0]
@@ -729,10 +739,11 @@ def create_block_device(module, ec2, volume):
return BlockDeviceType(snapshot_id=volume.get('snapshot'),
ephemeral_name=volume.get('ephemeral'),
size=volume.get('volume_size'),
- volume_type=volume.get('device_type'),
+ volume_type=volume_type,
delete_on_termination=volume.get('delete_on_termination', False),
iops=volume.get('iops'),
encrypted=volume.get('encrypted', None))
+
def boto_supports_param_in_spot_request(ec2, param):
"""
Check if Boto library has a in its request_spot_instances() method. For example, the placement_group parameter wasn't added until 2.3.0.
@@ -1215,8 +1226,12 @@ def startstop_instances(module, ec2, instance_ids, state, instance_tags):
wait = module.params.get('wait')
wait_timeout = int(module.params.get('wait_timeout'))
+ source_dest_check = module.params.get('source_dest_check')
+ termination_protection = module.params.get('termination_protection')
changed = False
instance_dict_array = []
+ source_dest_check = module.params.get('source_dest_check')
+ termination_protection = module.params.get('termination_protection')
if not isinstance(instance_ids, list) or len(instance_ids) < 1:
# Fail unless the user defined instance tags
diff --git a/cloud/amazon/ec2_ami.py b/cloud/amazon/ec2_ami.py
index 0d504ee3b0c..53b21a519ba 100644
--- a/cloud/amazon/ec2_ami.py
+++ b/cloud/amazon/ec2_ami.py
@@ -47,12 +47,6 @@ options:
- create or deregister/delete image
required: false
default: 'present'
- region:
- description:
- - The AWS region to use. Must be specified if ec2_url is not used. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- default: null
- aliases: [ 'aws_region', 'ec2_region' ]
description:
description:
- An optional human-readable string describing the contents and purpose of the AMI.
@@ -72,7 +66,8 @@ options:
device_mapping:
version_added: "2.0"
description:
- - An optional list of devices with custom configurations (same block-device-mapping parameters)
+ - An optional list of device hashes/dictionaries with custom configurations (same block-device-mapping parameters)
+ - "Valid properties include: device_name, volume_type, size (in GB), delete_on_termination (boolean), no_device (boolean), snapshot_id, iops (for io1 volume_type)"
required: false
default: null
delete_snapshot:
@@ -88,7 +83,9 @@ options:
version_added: "2.0"
author: "Evan Duffield (@scicoin-project) "
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
# Thank you to iAcquire for sponsoring development of this module.
@@ -133,6 +130,21 @@ EXAMPLES = '''
volume_type: gp2
register: instance
+# AMI Creation, excluding a volume attached at /dev/sdb
+- ec2_ami
+ aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
+ aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+ instance_id: i-xxxxxx
+ name: newtest
+ device_mapping:
+ - device_name: /dev/sda1
+ size: XXX
+ delete_on_termination: true
+ volume_type: gp2
+ - device_name: /dev/sdb
+ no_device: yes
+ register: instance
+
# Deregister/Delete AMI
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
diff --git a/cloud/amazon/ec2_asg.py b/cloud/amazon/ec2_asg.py
index 7d03a7b35f4..f6e71b4baf8 100644
--- a/cloud/amazon/ec2_asg.py
+++ b/cloud/amazon/ec2_asg.py
@@ -80,11 +80,6 @@ options:
required: false
version_added: "1.8"
default: True
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- aliases: ['aws_region', 'ec2_region']
vpc_zone_identifier:
description:
- List of VPC subnets to use
@@ -134,7 +129,9 @@ options:
default: Default
choices: ['OldestInstance', 'NewestInstance', 'OldestLaunchConfiguration', 'ClosestToNextInstanceHour', 'Default']
version_added: "2.0"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = '''
@@ -258,9 +255,10 @@ def get_properties(autoscaling_group):
properties['viable_instances'] = 0
properties['terminating_instances'] = 0
+ instance_facts = {}
+
if autoscaling_group.instances:
properties['instances'] = [i.instance_id for i in autoscaling_group.instances]
- instance_facts = {}
for i in autoscaling_group.instances:
instance_facts[i.instance_id] = {'health_status': i.health_status,
'lifecycle_state': i.lifecycle_state,
@@ -277,7 +275,7 @@ def get_properties(autoscaling_group):
properties['terminating_instances'] += 1
if i.lifecycle_state == 'Pending':
properties['pending_instances'] += 1
- properties['instance_facts'] = instance_facts
+ properties['instance_facts'] = instance_facts
properties['load_balancers'] = autoscaling_group.load_balancers
if getattr(autoscaling_group, "tags", None):
diff --git a/cloud/amazon/ec2_eip.py b/cloud/amazon/ec2_eip.py
index 4975f37a250..020ec67a497 100644
--- a/cloud/amazon/ec2_eip.py
+++ b/cloud/amazon/ec2_eip.py
@@ -40,12 +40,6 @@ options:
required: false
choices: ['present', 'absent']
default: present
- region:
- description:
- - the EC2 region to use
- required: false
- default: null
- aliases: [ ec2_region ]
in_vpc:
description:
- allocate an EIP inside a VPC or not
@@ -64,7 +58,9 @@ options:
required: false
default: false
version_added: "2.0"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
author: "Lorin Hochstein (@lorin) "
author: "Rick Mendes (@rickmendes) "
notes:
diff --git a/cloud/amazon/ec2_elb.py b/cloud/amazon/ec2_elb.py
index 6530a00bcb9..9f333764a5d 100644
--- a/cloud/amazon/ec2_elb.py
+++ b/cloud/amazon/ec2_elb.py
@@ -41,11 +41,6 @@ options:
- List of ELB names, required for registration. The ec2_elbs fact should be used if there was a previous de-register.
required: false
default: None
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- aliases: ['aws_region', 'ec2_region']
enable_availability_zone:
description:
- Whether to enable the availability zone of the instance on the target ELB if the availability zone has not already
@@ -73,7 +68,9 @@ options:
required: false
default: 0
version_added: "1.6"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = """
@@ -85,7 +82,7 @@ pre_tasks:
local_action:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
- state: 'absent'
+ state: absent
roles:
- myrole
post_tasks:
@@ -94,7 +91,7 @@ post_tasks:
module: ec2_elb
instance_id: "{{ ansible_ec2_instance_id }}"
ec2_elbs: "{{ item }}"
- state: 'present'
+ state: present
with_items: ec2_elbs
"""
diff --git a/cloud/amazon/ec2_elb_lb.py b/cloud/amazon/ec2_elb_lb.py
index 2a0a41a9af9..aaccc1b2b57 100644
--- a/cloud/amazon/ec2_elb_lb.py
+++ b/cloud/amazon/ec2_elb_lb.py
@@ -29,6 +29,7 @@ options:
state:
description:
- Create or destroy the ELB
+ choices: ["present", "absent"]
required: true
name:
description:
@@ -74,11 +75,6 @@ options:
- An associative array of access logs configuration settings (see example)
require: false
default: None
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- aliases: ['aws_region', 'ec2_region']
subnets:
description:
- A list of VPC subnets to use when creating ELB. Zones should be empty if using this.
@@ -126,7 +122,9 @@ options:
required: false
version_added: "2.0"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = """
diff --git a/cloud/amazon/ec2_group.py b/cloud/amazon/ec2_group.py
index d2fe04c968d..6ec9086db05 100644
--- a/cloud/amazon/ec2_group.py
+++ b/cloud/amazon/ec2_group.py
@@ -45,12 +45,6 @@ options:
- List of firewall outbound rules to enforce in this group (see example). If none are supplied, a default all-out rule is assumed. If an empty list is supplied, no outbound rules will be enabled.
required: false
version_added: "1.6"
- region:
- description:
- - the EC2 region to use
- required: false
- default: null
- aliases: []
state:
version_added: "1.4"
description:
@@ -74,7 +68,9 @@ options:
default: 'true'
aliases: []
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
notes:
- If a rule declares a group_name and that group doesn't exist, it will be
@@ -116,6 +112,10 @@ EXAMPLES = '''
from_port: 10051
to_port: 10051
group_id: sg-12345678
+ - proto: icmp
+ from_port: 8 # icmp type, -1 = any type
+ to_port: -1 # icmp subtype, -1 = any subtype
+ cidr_ip: 10.0.0.0/8
- proto: all
# the containing group name may be specified here
group_name: example
diff --git a/cloud/amazon/ec2_key.py b/cloud/amazon/ec2_key.py
index fc33257cf34..3fe7b959f71 100644
--- a/cloud/amazon/ec2_key.py
+++ b/cloud/amazon/ec2_key.py
@@ -31,12 +31,6 @@ options:
description:
- Public key material.
required: false
- region:
- description:
- - the EC2 region to use
- required: false
- default: null
- aliases: []
state:
description:
- create or delete keypair
@@ -58,7 +52,9 @@ options:
aliases: []
version_added: "1.6"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
author: "Vincent Viallet (@zbal)"
'''
diff --git a/cloud/amazon/ec2_lc.py b/cloud/amazon/ec2_lc.py
index fa6c64490ad..41b7effa502 100644
--- a/cloud/amazon/ec2_lc.py
+++ b/cloud/amazon/ec2_lc.py
@@ -55,11 +55,6 @@ options:
description:
- A list of security groups into which instances should be found
required: false
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- aliases: ['aws_region', 'ec2_region']
volumes:
description:
- a list of volume dicts, each containing device name and optionally ephemeral id or snapshot id. Size and type (and number of iops for io device type) must be specified for a new volume or a root volume, and may be passed for a snapshot volume. For any volume, a volume size less than 1 will be interpreted as a request not to create the volume.
@@ -128,7 +123,9 @@ options:
required: false
default: null
version_added: "2.0"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = '''
diff --git a/cloud/amazon/ec2_metric_alarm.py b/cloud/amazon/ec2_metric_alarm.py
index b9ac1524794..94f303212ae 100644
--- a/cloud/amazon/ec2_metric_alarm.py
+++ b/cloud/amazon/ec2_metric_alarm.py
@@ -89,7 +89,9 @@ options:
description:
- A list of the names of action(s) to take when the alarm is in the 'ok' status
required: false
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = '''
@@ -115,9 +117,6 @@ EXAMPLES = '''
import sys
-from ansible.module_utils.basic import *
-from ansible.module_utils.ec2 import *
-
try:
import boto.ec2.cloudwatch
from boto.ec2.cloudwatch import CloudWatchConnection, MetricAlarm
@@ -184,7 +183,7 @@ def create_metric_alarm(connection, module):
comparisons = {'<=' : 'LessThanOrEqualToThreshold', '<' : 'LessThanThreshold', '>=' : 'GreaterThanOrEqualToThreshold', '>' : 'GreaterThanThreshold'}
alarm.comparison = comparisons[comparison]
- dim1 = module.params.get('dimensions', {})
+ dim1 = module.params.get('dimensions')
dim2 = alarm.dimensions
for keys in dim1:
@@ -255,12 +254,11 @@ def main():
unit=dict(type='str', choices=['Seconds', 'Microseconds', 'Milliseconds', 'Bytes', 'Kilobytes', 'Megabytes', 'Gigabytes', 'Terabytes', 'Bits', 'Kilobits', 'Megabits', 'Gigabits', 'Terabits', 'Percent', 'Count', 'Bytes/Second', 'Kilobytes/Second', 'Megabytes/Second', 'Gigabytes/Second', 'Terabytes/Second', 'Bits/Second', 'Kilobits/Second', 'Megabits/Second', 'Gigabits/Second', 'Terabits/Second', 'Count/Second', 'None']),
evaluation_periods=dict(type='int'),
description=dict(type='str'),
- dimensions=dict(type='dict'),
+ dimensions=dict(type='dict', default={}),
alarm_actions=dict(type='list'),
insufficient_data_actions=dict(type='list'),
ok_actions=dict(type='list'),
state=dict(default='present', choices=['present', 'absent']),
- region=dict(aliases=['aws_region', 'ec2_region']),
)
)
@@ -272,14 +270,22 @@ def main():
state = module.params.get('state')
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
- try:
- connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
- except (boto.exception.NoAuthHandlerFound, StandardError), e:
- module.fail_json(msg=str(e))
+
+ if region:
+ try:
+ connection = connect_to_aws(boto.ec2.cloudwatch, region, **aws_connect_params)
+ except (boto.exception.NoAuthHandlerFound, StandardError), e:
+ module.fail_json(msg=str(e))
+ else:
+ module.fail_json(msg="region must be specified")
if state == 'present':
create_metric_alarm(connection, module)
elif state == 'absent':
delete_metric_alarm(connection, module)
+
+from ansible.module_utils.basic import *
+from ansible.module_utils.ec2 import *
+
main()
diff --git a/cloud/amazon/ec2_scaling_policy.py b/cloud/amazon/ec2_scaling_policy.py
index 2856644ee9c..220fa325582 100644
--- a/cloud/amazon/ec2_scaling_policy.py
+++ b/cloud/amazon/ec2_scaling_policy.py
@@ -53,7 +53,9 @@ options:
description:
- The minimum period of time between which autoscaling actions can take place
required: false
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = '''
diff --git a/cloud/amazon/ec2_snapshot.py b/cloud/amazon/ec2_snapshot.py
index 29fd559bea5..09fa0d90389 100644
--- a/cloud/amazon/ec2_snapshot.py
+++ b/cloud/amazon/ec2_snapshot.py
@@ -22,11 +22,6 @@ description:
- creates an EC2 snapshot from an existing EBS volume
version_added: "1.5"
options:
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- aliases: ['aws_region', 'ec2_region']
volume_id:
description:
- volume from which to take the snapshot
@@ -82,7 +77,9 @@ options:
version_added: "1.9"
author: "Will Thames (@willthames)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/ec2_tag.py b/cloud/amazon/ec2_tag.py
index 7f5aa9ab4b5..0e005f0fb48 100644
--- a/cloud/amazon/ec2_tag.py
+++ b/cloud/amazon/ec2_tag.py
@@ -22,12 +22,6 @@ description:
- Creates, removes and lists tags from any EC2 resource. The resource is referenced by its resource id (e.g. an instance being i-XXXXXXX). It is designed to be used with complex args (tags), see the examples. This module has a dependency on python-boto.
version_added: "1.3"
options:
- region:
- description:
- - region in which the resource exists.
- required: false
- default: null
- aliases: ['aws_region', 'ec2_region']
resource:
description:
- The EC2 resource id.
@@ -49,7 +43,9 @@ options:
aliases: []
author: "Lester Wade (@lwade)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
@@ -73,7 +69,7 @@ tasks:
Env: production
exact_count: 1
group: "{{ security_group }}"
- keypair: ""{{ keypair }}"
+ keypair: "{{ keypair }}"
image: "{{ image_id }}"
instance_tags:
Name: dbserver
diff --git a/cloud/amazon/ec2_vol.py b/cloud/amazon/ec2_vol.py
index 228bb12cfbc..083dcc3d05f 100644
--- a/cloud/amazon/ec2_vol.py
+++ b/cloud/amazon/ec2_vol.py
@@ -74,12 +74,6 @@ options:
required: false
default: null
aliases: []
- region:
- description:
- - The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
- required: false
- default: null
- aliases: ['aws_region', 'ec2_region']
zone:
description:
- zone in which to create the volume, if unset uses the zone the instance is in (if set)
@@ -108,7 +102,9 @@ options:
choices: ['absent', 'present', 'list']
version_added: "1.6"
author: "Lester Wade (@lwade)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
@@ -409,7 +405,8 @@ def main():
'attachment_set': {
'attach_time': attachment.attach_time,
'device': attachment.device,
- 'status': attachment.status
+ 'status': attachment.status,
+ 'deleteOnTermination': attachment.deleteOnTermination
}
})
diff --git a/cloud/amazon/ec2_vpc.py b/cloud/amazon/ec2_vpc.py
index 611251e307f..a3003a6dcc6 100644
--- a/cloud/amazon/ec2_vpc.py
+++ b/cloud/amazon/ec2_vpc.py
@@ -94,14 +94,10 @@ options:
required: true
default: present
aliases: []
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- default: null
- aliases: ['aws_region', 'ec2_region']
author: "Carson Gee (@carsongee)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/ec2_vpc_net.py b/cloud/amazon/ec2_vpc_net.py
index 2ee730f59cb..51acbcaae37 100644
--- a/cloud/amazon/ec2_vpc_net.py
+++ b/cloud/amazon/ec2_vpc_net.py
@@ -72,7 +72,9 @@ options:
default: false
required: false
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/elasticache.py b/cloud/amazon/elasticache.py
index 31ed4696628..5d09521ae0c 100644
--- a/cloud/amazon/elasticache.py
+++ b/cloud/amazon/elasticache.py
@@ -57,6 +57,12 @@ options:
- The port number on which each of the cache nodes will accept connections
required: false
default: none
+ parameter_group:
+ description:
+ - Specify non-default parameter group names to be associated with cache cluster
+ required: false
+ default: None
+ version_added: "2.0"
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
@@ -91,13 +97,9 @@ options:
required: false
default: no
choices: [ "yes", "no" ]
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- default: null
- aliases: ['aws_region', 'ec2_region']
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
"""
EXAMPLES = """
@@ -148,7 +150,7 @@ class ElastiCacheManager(object):
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
- num_nodes, cache_port, cache_subnet_group,
+ num_nodes, cache_port, parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
@@ -158,6 +160,7 @@ class ElastiCacheManager(object):
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
+ self.parameter_group = parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
@@ -216,6 +219,7 @@ class ElastiCacheManager(object):
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
+ cache_parameter_group_name=self.parameter_group,
cache_subnet_group_name=self.cache_subnet_group,
preferred_availability_zone=self.zone,
port=self.cache_port)
@@ -291,6 +295,7 @@ class ElastiCacheManager(object):
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
+ cache_parameter_group_name=self.parameter_group,
security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
@@ -437,6 +442,7 @@ class ElastiCacheManager(object):
def _refresh_data(self, cache_cluster_data=None):
"""Refresh data about this cache cluster"""
+
if cache_cluster_data is None:
try:
response = self.conn.describe_cache_clusters(cache_cluster_id=self.name,
@@ -480,6 +486,7 @@ def main():
cache_engine_version={'required': False},
node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'},
+ parameter_group={'required': False, 'default': None},
cache_port={'required': False, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default],
@@ -514,6 +521,7 @@ def main():
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
+ parameter_group = module.params['parameter_group']
if cache_subnet_group and cache_security_groups == [default]:
cache_security_groups = []
@@ -532,6 +540,7 @@ def main():
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
+ parameter_group,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,
diff --git a/cloud/amazon/elasticache_subnet_group.py b/cloud/amazon/elasticache_subnet_group.py
index 4ea7e8aba16..0dcf126b170 100644
--- a/cloud/amazon/elasticache_subnet_group.py
+++ b/cloud/amazon/elasticache_subnet_group.py
@@ -42,13 +42,10 @@ options:
- List of subnet IDs that make up the Elasticache subnet group.
required: false
default: null
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- aliases: ['aws_region', 'ec2_region']
author: "Tim Mahoney (@timmahoney)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/iam.py b/cloud/amazon/iam.py
index df09b7ed614..8864cb10a6f 100644
--- a/cloud/amazon/iam.py
+++ b/cloud/amazon/iam.py
@@ -97,10 +97,12 @@ options:
aliases: [ 'ec2_access_key', 'access_key' ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.'
-author:
+author:
- "Jonathan I. Davila (@defionscode)"
- "Paul Seiffert (@seiffert)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/iam_cert.py b/cloud/amazon/iam_cert.py
index 1f58be753c8..0c36abef322 100644
--- a/cloud/amazon/iam_cert.py
+++ b/cloud/amazon/iam_cert.py
@@ -85,7 +85,9 @@ options:
requirements: [ "boto" ]
author: Jonathan I. Davila
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
@@ -241,13 +243,10 @@ def main():
if not HAS_BOTO:
module.fail_json(msg="Boto is required for this module")
- ec2_url, aws_access_key, aws_secret_key, region = get_ec2_creds(module)
+ region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
- iam = boto.iam.connection.IAMConnection(
- aws_access_key_id=aws_access_key,
- aws_secret_access_key=aws_secret_key,
- )
+ iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))
diff --git a/cloud/amazon/iam_policy.py b/cloud/amazon/iam_policy.py
index 04fc8086939..0d2ed506457 100644
--- a/cloud/amazon/iam_policy.py
+++ b/cloud/amazon/iam_policy.py
@@ -27,58 +27,40 @@ options:
required: true
default: null
choices: [ "user", "group", "role"]
- aliases: []
iam_name:
description:
- Name of IAM resource you wish to target for policy actions. In other words, the user name, group name or role name.
required: true
- aliases: []
policy_name:
description:
- The name label for the policy to create or remove.
- required: false
- aliases: []
+ required: true
policy_document:
description:
- The path to the properly json formatted policy file (mutually exclusive with C(policy_json))
required: false
- aliases: []
policy_json:
description:
- A properly json formatted policy as string (mutually exclusive with C(policy_document), see https://github.com/ansible/ansible/issues/7005#issuecomment-42894813 on how to use it properly)
required: false
- aliases: []
state:
description:
- Whether to create or delete the IAM policy.
required: true
default: null
choices: [ "present", "absent"]
- aliases: []
skip_duplicates:
description:
- By default the module looks for any policies that match the document you pass in, if there is a match it will not make a new policy object with the same rules. You can override this by specifying false which would allow for two policy objects with different names but same rules.
required: false
default: "/"
- aliases: []
- aws_secret_key:
- description:
- - AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_secret_key', 'secret_key' ]
- aws_access_key:
- description:
- - AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
- required: false
- default: null
- aliases: [ 'ec2_access_key', 'access_key' ]
-requirements: [ "boto" ]
notes:
- 'Currently boto does not support the removal of Managed Policies, the module will not work removing/adding managed policies.'
author: "Jonathan I. Davila (@defionscode)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
@@ -289,7 +271,7 @@ def main():
iam_name=dict(default=None, required=False),
policy_name=dict(default=None, required=True),
policy_document=dict(default=None, required=False),
- policy_json=dict(type='str', default=None, required=False),
+ policy_json=dict(default=None, required=False),
skip_duplicates=dict(type='bool', default=True, required=False)
))
diff --git a/cloud/amazon/rds.py b/cloud/amazon/rds.py
index d56c4ae12de..d509bdf4fa0 100644
--- a/cloud/amazon/rds.py
+++ b/cloud/amazon/rds.py
@@ -917,6 +917,7 @@ def validate_parameters(required_vars, valid_vars, module):
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
+ 'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
diff --git a/cloud/amazon/rds_param_group.py b/cloud/amazon/rds_param_group.py
index 7b875304810..b34e3090b53 100644
--- a/cloud/amazon/rds_param_group.py
+++ b/cloud/amazon/rds_param_group.py
@@ -61,14 +61,10 @@ options:
default: null
aliases: []
choices: [ 'mysql5.1', 'mysql5.5', 'mysql5.6', 'oracle-ee-11.2', 'oracle-se-11.2', 'oracle-se1-11.2', 'postgres9.3', 'postgres9.4', 'sqlserver-ee-10.5', 'sqlserver-ee-11.0', 'sqlserver-ex-10.5', 'sqlserver-ex-11.0', 'sqlserver-se-10.5', 'sqlserver-se-11.0', 'sqlserver-web-10.5', 'sqlserver-web-11.0']
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- default: null
- aliases: ['aws_region', 'ec2_region']
author: "Scott Anderson (@tastychutney)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/rds_subnet_group.py b/cloud/amazon/rds_subnet_group.py
index 3b998c34225..90f10027744 100644
--- a/cloud/amazon/rds_subnet_group.py
+++ b/cloud/amazon/rds_subnet_group.py
@@ -47,14 +47,10 @@ options:
required: false
default: null
aliases: []
- region:
- description:
- - The AWS region to use. If not specified then the value of the AWS_REGION or EC2_REGION environment variable, if any, is used.
- required: true
- default: null
- aliases: ['aws_region', 'ec2_region']
author: "Scott Anderson (@tastychutney)"
-extends_documentation_fragment: aws
+extends_documentation_fragment:
+ - aws
+ - ec2
'''
EXAMPLES = '''
diff --git a/cloud/amazon/s3.py b/cloud/amazon/s3.py
index 64f53cc042a..550998915d4 100644
--- a/cloud/amazon/s3.py
+++ b/cloud/amazon/s3.py
@@ -486,7 +486,7 @@ def main():
# First, we check to see if the bucket exists, we get "bucket" returned.
bucketrtn = bucket_check(module, s3, bucket)
if bucketrtn is False:
- module.fail_json(msg="Target bucket cannot be found", failed=True)
+ module.fail_json(msg="Source bucket cannot be found", failed=True)
# Next, we check to see if the key in the bucket exists. If it exists, it also returns key_matches md5sum check.
keyrtn = key_check(module, s3, bucket, obj, version=version)
diff --git a/cloud/azure/azure.py b/cloud/azure/azure.py
index c4fa41a6eb1..01a6240cf87 100644
--- a/cloud/azure/azure.py
+++ b/cloud/azure/azure.py
@@ -249,22 +249,29 @@ AZURE_ROLE_SIZES = ['ExtraSmall',
'Standard_G4',
'Standard_G5']
+from distutils.version import LooseVersion
+
try:
import azure as windows_azure
- from azure import WindowsAzureError, WindowsAzureMissingResourceError
+ if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.11.1":
+ from azure import WindowsAzureError as AzureException
+ from azure import WindowsAzureMissingResourceError as AzureMissingException
+ else:
+ from azure.common import AzureException as AzureException
+ from azure.common import AzureMissingResourceHttpError as AzureMissingException
+
from azure.servicemanagement import (ServiceManagementService, OSVirtualHardDisk, SSH, PublicKeys,
PublicKey, LinuxConfigurationSet, ConfigurationSetInputEndpoints,
ConfigurationSetInputEndpoint, Listener, WindowsConfigurationSet)
+
HAS_AZURE = True
except ImportError:
HAS_AZURE = False
-from distutils.version import LooseVersion
from types import MethodType
import json
-
def _wait_for_completion(azure, promise, wait_timeout, msg):
if not promise: return
wait_timeout = time.time() + wait_timeout
@@ -274,7 +281,7 @@ def _wait_for_completion(azure, promise, wait_timeout, msg):
if operation_result.status == "Succeeded":
return
- raise WindowsAzureError('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
+ raise AzureException('Timed out waiting for async operation ' + msg + ' "' + str(promise.request_id) + '" to complete.')
def _delete_disks_when_detached(azure, wait_timeout, disk_names):
def _handle_timeout(signum, frame):
@@ -289,7 +296,7 @@ def _delete_disks_when_detached(azure, wait_timeout, disk_names):
if disk.attached_to is None:
azure.delete_disk(disk.name, True)
disk_names.remove(disk_name)
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to get or delete disk, error was: %s" % (disk_name, str(e)))
finally:
signal.alarm(0)
@@ -347,13 +354,13 @@ def create_virtual_machine(module, azure):
result = azure.create_hosted_service(service_name=name, label=name, location=location)
_wait_for_completion(azure, result, wait_timeout, "create_hosted_service")
changed = True
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to create the new service, error was: %s" % str(e))
try:
# check to see if a vm with this name exists; if so, do nothing
azure.get_role(name, name, name)
- except WindowsAzureMissingResourceError:
+ except AzureMissingException:
# vm does not exist; create it
if os_type == 'linux':
@@ -419,13 +426,13 @@ def create_virtual_machine(module, azure):
virtual_network_name=virtual_network_name)
_wait_for_completion(azure, result, wait_timeout, "create_virtual_machine_deployment")
changed = True
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to create the new virtual machine, error was: %s" % str(e))
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
return (changed, urlparse(deployment.url).hostname, deployment)
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to lookup the deployment information for %s, error was: %s" % (name, str(e)))
@@ -453,9 +460,9 @@ def terminate_virtual_machine(module, azure):
disk_names = []
try:
deployment = azure.get_deployment_by_name(service_name=name, deployment_name=name)
- except WindowsAzureMissingResourceError, e:
+ except AzureMissingException, e:
pass # no such deployment or service
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to find the deployment, error was: %s" % str(e))
# Delete deployment
@@ -468,13 +475,13 @@ def terminate_virtual_machine(module, azure):
role_props = azure.get_role(name, deployment.name, role.role_name)
if role_props.os_virtual_hard_disk.disk_name not in disk_names:
disk_names.append(role_props.os_virtual_hard_disk.disk_name)
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to get the role %s, error was: %s" % (role.role_name, str(e)))
try:
result = azure.delete_deployment(name, deployment.name)
_wait_for_completion(azure, result, wait_timeout, "delete_deployment")
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to delete the deployment %s, error was: %s" % (deployment.name, str(e)))
# It's unclear when disks associated with terminated deployment get detatched.
@@ -482,14 +489,14 @@ def terminate_virtual_machine(module, azure):
# become detatched by polling the list of remaining disks and examining the state.
try:
_delete_disks_when_detached(azure, wait_timeout, disk_names)
- except (WindowsAzureError, TimeoutError), e:
+ except (AzureException, TimeoutError), e:
module.fail_json(msg=str(e))
try:
# Now that the vm is deleted, remove the cloud service
result = azure.delete_hosted_service(service_name=name)
_wait_for_completion(azure, result, wait_timeout, "delete_hosted_service")
- except WindowsAzureError, e:
+ except AzureException, e:
module.fail_json(msg="failed to delete the service %s, error was: %s" % (name, str(e)))
public_dns_name = urlparse(deployment.url).hostname
@@ -545,7 +552,8 @@ def main():
subscription_id, management_cert_path = get_azure_creds(module)
wait_timeout_redirects = int(module.params.get('wait_timeout_redirects'))
- if LooseVersion(windows_azure.__version__) <= "0.8.0":
+
+ if hasattr(windows_azure, '__version__') and LooseVersion(windows_azure.__version__) <= "0.8.0":
# wrapper for handling redirects which the sdk <= 0.8.0 is not following
azure = Wrapper(ServiceManagementService(subscription_id, management_cert_path), wait_timeout_redirects)
else:
@@ -597,7 +605,7 @@ class Wrapper(object):
while wait_timeout > time.time():
try:
return f()
- except WindowsAzureError, e:
+ except AzureException, e:
if not str(e).lower().find("temporary redirect") == -1:
time.sleep(5)
pass
diff --git a/cloud/digital_ocean/digital_ocean_domain.py b/cloud/digital_ocean/digital_ocean_domain.py
index 905b6dae2d0..3b7a2dce236 100644
--- a/cloud/digital_ocean/digital_ocean_domain.py
+++ b/cloud/digital_ocean/digital_ocean_domain.py
@@ -195,7 +195,7 @@ def core(module):
records = domain.records()
at_record = None
for record in records:
- if record.name == "@":
+ if record.name == "@" and record.record_type == 'A':
at_record = record
if not at_record.data == getkeyordie("ip"):
diff --git a/cloud/docker/docker.py b/cloud/docker/docker.py
index 0ab564208ba..e038aeb0239 100644
--- a/cloud/docker/docker.py
+++ b/cloud/docker/docker.py
@@ -97,9 +97,12 @@ options:
- You can specify a different logging driver for the container than for the daemon.
"json-file" Default logging driver for Docker. Writes JSON messages to file.
docker logs command is available only for this logging driver.
- "none" disables any logging for the container. docker logs won't be available with this driver.
+ "none" disables any logging for the container.
"syslog" Syslog logging driver for Docker. Writes log messages to syslog.
docker logs command is not available for this logging driver.
+ "journald" Journald logging driver for Docker. Writes log messages to "journald".
+ "gelf" Graylog Extended Log Format (GELF) logging driver for Docker. Writes log messages to a GELF endpoint likeGraylog or Logstash.
+ "fluentd" Fluentd logging driver for Docker. Writes log messages to "fluentd" (forward input).
If not defined explicitly, the Docker daemon's default ("json-file") will apply.
Requires docker >= 1.6.0.
required: false
@@ -108,11 +111,14 @@ options:
- json-file
- none
- syslog
+ - journald
+ - gelf
+ - fluentd
version_added: "2.0"
log_opt:
description:
- - Additional options to pass to the logging driver selected above. See Docker log-driver
- documentation for more information (https://docs.docker.com/reference/logging/overview/).
+ - Additional options to pass to the logging driver selected above. See Docker `log-driver
+ ` documentation for more information.
Requires docker >=1.7.0.
required: false
default: null
@@ -1056,11 +1062,11 @@ class DockerManager(object):
continue
# EXPOSED PORTS
- expected_exposed_ports = set((image['ContainerConfig']['ExposedPorts'] or {}).keys())
+ expected_exposed_ports = set((image['ContainerConfig'].get('ExposedPorts') or {}).keys())
for p in (self.exposed_ports or []):
expected_exposed_ports.add("/".join(p))
- actually_exposed_ports = set((container["Config"]["ExposedPorts"] or {}).keys())
+ actually_exposed_ports = set((container["Config"].get("ExposedPorts") or {}).keys())
if actually_exposed_ports != expected_exposed_ports:
self.reload_reasons.append('exposed_ports ({0} => {1})'.format(actually_exposed_ports, expected_exposed_ports))
@@ -1386,6 +1392,11 @@ class DockerManager(object):
changes = list(self.client.pull(image, tag=tag, stream=True, **extra_params))
try:
last = changes[-1]
+ # seems Docker 1.8 puts an empty dict at the end of the
+ # stream; catch that and get the previous instead
+ # https://github.com/ansible/ansible-modules-core/issues/2043
+ if last.strip() == '{}':
+ last = changes[-2]
except IndexError:
last = '{}'
status = json.loads(last).get('status', '')
@@ -1662,7 +1673,7 @@ def main():
net = dict(default=None),
pid = dict(default=None),
insecure_registry = dict(default=False, type='bool'),
- log_driver = dict(default=None, choices=['json-file', 'none', 'syslog']),
+ log_driver = dict(default=None, choices=['json-file', 'none', 'syslog', 'journald', 'gelf', 'fluentd']),
log_opt = dict(default=None, type='dict'),
cpu_set = dict(default=None),
cap_add = dict(default=None, type='list'),
diff --git a/cloud/openstack/quantum_router.py b/cloud/openstack/_quantum_router.py
similarity index 99%
rename from cloud/openstack/quantum_router.py
rename to cloud/openstack/_quantum_router.py
index ba94773bbe4..252e1618d90 100644
--- a/cloud/openstack/quantum_router.py
+++ b/cloud/openstack/_quantum_router.py
@@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
+deprecated: Deprecated in 2.0. Use os_router instead
short_description: Create or Remove router from openstack
description:
- Create or Delete routers from OpenStack
diff --git a/cloud/openstack/quantum_router_gateway.py b/cloud/openstack/_quantum_router_gateway.py
similarity index 99%
rename from cloud/openstack/quantum_router_gateway.py
rename to cloud/openstack/_quantum_router_gateway.py
index 48248662ed7..891cee55a09 100644
--- a/cloud/openstack/quantum_router_gateway.py
+++ b/cloud/openstack/_quantum_router_gateway.py
@@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router_gateway
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
+deprecated: Deprecated in 2.0. Use os_router instead
short_description: set/unset a gateway interface for the router with the specified external network
description:
- Creates/Removes a gateway interface from the router, used to associate a external network with a router to route external traffic.
diff --git a/cloud/openstack/quantum_router_interface.py b/cloud/openstack/_quantum_router_interface.py
similarity index 99%
rename from cloud/openstack/quantum_router_interface.py
rename to cloud/openstack/_quantum_router_interface.py
index 7374b542390..4073c7d3b10 100644
--- a/cloud/openstack/quantum_router_interface.py
+++ b/cloud/openstack/_quantum_router_interface.py
@@ -31,6 +31,7 @@ DOCUMENTATION = '''
module: quantum_router_interface
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
+deprecated: Deprecated in 2.0. Use os_router instead
short_description: Attach/Dettach a subnet's interface to a router
description:
- Attach/Dettach a subnet interface to a router, to provide a gateway for the subnet.
diff --git a/cloud/openstack/os_image_facts.py b/cloud/openstack/os_image_facts.py
new file mode 100644
index 00000000000..a54537172eb
--- /dev/null
+++ b/cloud/openstack/os_image_facts.py
@@ -0,0 +1,158 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+module: os_image_facts
+short_description: Retrieve facts about an image within OpenStack.
+version_added: "2.0"
+author: "Davide Agnello (@dagnello)"
+description:
+ - Retrieve facts about a image image from OpenStack.
+notes:
+ - Facts are placed in the C(openstack) variable.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ image:
+ description:
+ - Name or ID of the image
+ required: true
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about a previously created image named image1
+- os_image_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+ image: image1
+- debug: var=openstack
+'''
+
+RETURN = '''
+openstack_image:
+ description: has all the openstack facts about the image
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the image.
+ returned: success
+ type: string
+ status:
+ description: Image status.
+ returned: success
+ type: string
+ created_at:
+ description: Image created at timestamp.
+ returned: success
+ type: string
+ deleted:
+ description: Image deleted flag.
+ returned: success
+ type: boolean
+ container_format:
+ description: Container format of the image.
+ returned: success
+ type: string
+ min_ram:
+ description: Min amount of RAM required for this image.
+ returned: success
+ type: int
+ disk_format:
+ description: Disk format of the image.
+ returned: success
+ type: string
+ updated_at:
+ description: Image updated at timestamp.
+ returned: success
+ type: string
+ properties:
+ description: Additional properties associated with the image.
+ returned: success
+ type: dict
+ min_disk:
+ description: Min amount of disk space required for this image.
+ returned: success
+ type: int
+ protected:
+ description: Image protected flag.
+ returned: success
+ type: boolean
+ checksum:
+ description: Checksum for the image.
+ returned: success
+ type: string
+ owner:
+ description: Owner for the image.
+ returned: success
+ type: string
+ is_public:
+ description: Is plubic flag of the image.
+ returned: success
+ type: boolean
+ deleted_at:
+ description: Image deleted at timestamp.
+ returned: success
+ type: string
+ size:
+ description: Size of the image.
+ returned: success
+ type: int
+'''
+
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ image=dict(required=True),
+ )
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec, **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ image = cloud.get_image(module.params['image'])
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_image=image))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
+
diff --git a/cloud/openstack/os_network.py b/cloud/openstack/os_network.py
index f911ce71af1..bc41d3870f4 100644
--- a/cloud/openstack/os_network.py
+++ b/cloud/openstack/os_network.py
@@ -25,12 +25,12 @@ except ImportError:
DOCUMENTATION = '''
---
module: os_network
-short_description: Creates/Removes networks from OpenStack
+short_description: Creates/removes networks from OpenStack
extends_documentation_fragment: openstack
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- - Add or Remove network from OpenStack.
+ - Add or remove network from OpenStack.
options:
name:
description:
@@ -46,6 +46,11 @@ options:
- Whether the state should be marked as up or down.
required: false
default: true
+ external:
+ description:
+ - Whether this network is externally accessible.
+ required: false
+ default: false
state:
description:
- Indicate desired state of the resource.
@@ -56,14 +61,60 @@ requirements: ["shade"]
'''
EXAMPLES = '''
+# Create an externally accessible network named 'ext_network'.
- os_network:
- name: t1network
+ cloud: mycloud
state: present
- auth:
- auth_url: https://your_api_url.com:9000/v2.0
- username: user
- password: password
- project_name: someproject
+ name: ext_network
+ external: true
+'''
+
+RETURN = '''
+network:
+ description: Dictionary describing the network.
+ returned: On success when I(state) is 'present'.
+ type: dictionary
+ contains:
+ id:
+ description: Network ID.
+ type: string
+ sample: "4bb4f9a5-3bd2-4562-bf6a-d17a6341bb56"
+ name:
+ description: Network name.
+ type: string
+ sample: "ext_network"
+ shared:
+ description: Indicates whether this network is shared across all tenants.
+ type: bool
+ sample: false
+ status:
+ description: Network status.
+ type: string
+ sample: "ACTIVE"
+ mtu:
+ description: The MTU of a network resource.
+ type: integer
+ sample: 0
+ admin_state_up:
+ description: The administrative state of the network.
+ type: bool
+ sample: true
+ port_security_enabled:
+ description: The port security status
+ type: bool
+ sample: true
+ router:external:
+ description: Indicates whether this network is externally accessible.
+ type: bool
+ sample: true
+ tenant_id:
+ description: The tenant ID.
+ type: string
+ sample: "06820f94b9f54b119636be2728d216fc"
+ subnets:
+ description: The associated subnets.
+ type: list
+ sample: []
'''
@@ -72,6 +123,7 @@ def main():
name=dict(required=True),
shared=dict(default=False, type='bool'),
admin_state_up=dict(default=True, type='bool'),
+ external=dict(default=False, type='bool'),
state=dict(default='present', choices=['absent', 'present']),
)
@@ -85,6 +137,7 @@ def main():
name = module.params['name']
shared = module.params['shared']
admin_state_up = module.params['admin_state_up']
+ external = module.params['external']
try:
cloud = shade.openstack_cloud(**module.params)
@@ -92,7 +145,7 @@ def main():
if state == 'present':
if not net:
- net = cloud.create_network(name, shared, admin_state_up)
+ net = cloud.create_network(name, shared, admin_state_up, external)
module.exit_json(changed=False, network=net, id=net['id'])
elif state == 'absent':
@@ -109,4 +162,5 @@ def main():
# this is magic, see lib/ansible/module_common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
-main()
+if __name__ == "__main__":
+ main()
diff --git a/cloud/openstack/os_networks_facts.py b/cloud/openstack/os_networks_facts.py
new file mode 100644
index 00000000000..6ac8786463d
--- /dev/null
+++ b/cloud/openstack/os_networks_facts.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_networks_facts
+short_description: Retrieve facts about one or more OpenStack networks.
+version_added: "2.0"
+author: "Davide Agnello (@dagnello)"
+description:
+ - Retrieve facts about one or more networks from OpenStack.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ network:
+ description:
+ - Name or ID of the Network
+ required: false
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created networks
+- os_networks_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+- debug: var=openstack_networks
+
+# Gather facts about a previously created network by name
+- os_networks_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+ name: network1
+- debug: var=openstack_networks
+
+# Gather facts about a previously created network with filter (note: name and
+ filters parameters are Not mutually exclusive)
+- os_networks_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+ filters:
+ tenant_id: 55e2ce24b2a245b09f181bf025724cbe
+ subnets:
+ - 057d4bdf-6d4d-4728-bb0f-5ac45a6f7400
+ - 443d4dc0-91d4-4998-b21c-357d10433483
+- debug: var=openstack_networks
+'''
+
+RETURN = '''
+openstack_networks:
+ description: has all the openstack facts about the networks
+ returned: always, but can be null
+ type: complex
+ contains:
+ id:
+ description: Unique UUID.
+ returned: success
+ type: string
+ name:
+ description: Name given to the network.
+ returned: success
+ type: string
+ status:
+ description: Network status.
+ returned: success
+ type: string
+ subnets:
+ description: Subnet(s) included in this network.
+ returned: success
+ type: list of strings
+ tenant_id:
+ description: Tenant id associated with this network.
+ returned: success
+ type: string
+ shared:
+ description: Network shared flag.
+ returned: success
+ type: boolean
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ filters=dict(required=False, default=None)
+ )
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ networks = cloud.search_networks(module.params['name'],
+ module.params['filters'])
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_networks=networks))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/openstack/os_port.py b/cloud/openstack/os_port.py
new file mode 100644
index 00000000000..8564b07c914
--- /dev/null
+++ b/cloud/openstack/os_port.py
@@ -0,0 +1,395 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+
+DOCUMENTATION = '''
+---
+module: os_port
+short_description: Add/Update/Delete ports from an OpenStack cloud.
+extends_documentation_fragment: openstack
+author: "Davide Agnello (@dagnello)"
+version_added: "2.0"
+description:
+ - Add, Update or Remove ports from an OpenStack cloud. A state=present,
+ will ensure the port is created or updated if required.
+options:
+ network:
+ description:
+ - Network ID or name this port belongs to.
+ required: true
+ name:
+ description:
+ - Name that has to be given to the port.
+ required: false
+ default: None
+ fixed_ips:
+ description:
+ - Desired IP and/or subnet for this port. Subnet is referenced by
+ subnet_id and IP is referenced by ip_address.
+ required: false
+ default: None
+ admin_state_up:
+ description:
+ - Sets admin state.
+ required: false
+ default: None
+ mac_address:
+ description:
+ - MAC address of this port.
+ required: false
+ default: None
+ security_groups:
+ description:
+ - Security group(s) ID(s) or name(s) associated with the port (comma
+ separated for multiple security groups - no spaces between comma(s)
+ or YAML list).
+ required: false
+ default: None
+ no_security_groups:
+ description:
+ - Do not associate a security group with this port.
+ required: false
+ default: False
+ allowed_address_pairs:
+ description:
+ - Allowed address pairs list. Allowed address pairs are supported with
+ dictionary structure.
+ e.g. allowed_address_pairs:
+ - ip_address: 10.1.0.12
+ mac_address: ab:cd:ef:12:34:56
+ - ip_address: ...
+ required: false
+ default: None
+ extra_dhcp_opt:
+ description:
+ - Extra dhcp options to be assigned to this port. Extra options are
+ supported with dictionary structure.
+ e.g. extra_dhcp_opt:
+ - opt_name: opt name1
+ opt_value: value1
+ - opt_name: ...
+ required: false
+ default: None
+ device_owner:
+ description:
+ - The ID of the entity that uses this port.
+ required: false
+ default: None
+ device_id:
+ description:
+ - Device ID of device using this port.
+ required: false
+ default: None
+ state:
+ description:
+ - Should the resource be present or absent.
+ choices: [present, absent]
+ default: present
+'''
+
+EXAMPLES = '''
+# Create a port
+- os_port:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: port1
+ network: foo
+
+# Create a port with a static IP
+- os_port:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: port1
+ network: foo
+ fixed_ips:
+ - ip_address: 10.1.0.21
+
+# Create a port with No security groups
+- os_port:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: port1
+ network: foo
+ no_security_groups: True
+
+# Update the existing 'port1' port with multiple security groups (version 1)
+- os_port:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
+ username: admin
+ password: admin
+ project_name: admin
+ name: port1
+ security_groups: 1496e8c7-4918-482a-9172-f4f00fc4a3a5,057d4bdf-6d4d-472...
+
+# Update the existing 'port1' port with multiple security groups (version 2)
+- os_port:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/d
+ username: admin
+ password: admin
+ project_name: admin
+ name: port1
+ security_groups:
+ - 1496e8c7-4918-482a-9172-f4f00fc4a3a5
+ - 057d4bdf-6d4d-472...
+'''
+
+RETURN = '''
+id:
+ description: Unique UUID.
+ returned: success
+ type: string
+name:
+ description: Name given to the port.
+ returned: success
+ type: string
+network_id:
+ description: Network ID this port belongs in.
+ returned: success
+ type: string
+security_groups:
+ description: Security group(s) associated with this port.
+ returned: success
+ type: list of strings
+status:
+ description: Port's status.
+ returned: success
+ type: string
+fixed_ips:
+ description: Fixed ip(s) associated with this port.
+ returned: success
+ type: list of dicts
+tenant_id:
+ description: Tenant id associated with this port.
+ returned: success
+ type: string
+allowed_address_pairs:
+ description: Allowed address pairs with this port.
+ returned: success
+ type: list of dicts
+admin_state_up:
+ description: Admin state up flag for this port.
+ returned: success
+ type: bool
+'''
+
+
+def _needs_update(module, port, cloud):
+ """Check for differences in the updatable values.
+
+ NOTE: We don't currently allow name updates.
+ """
+ compare_simple = ['admin_state_up',
+ 'mac_address',
+ 'device_owner',
+ 'device_id']
+ compare_dict = ['allowed_address_pairs',
+ 'extra_dhcp_opt']
+ compare_comma_separated_list = ['security_groups']
+
+ for key in compare_simple:
+ if module.params[key] is not None and module.params[key] != port[key]:
+ return True
+ for key in compare_dict:
+ if module.params[key] is not None and cmp(module.params[key],
+ port[key]) != 0:
+ return True
+ for key in compare_comma_separated_list:
+ if module.params[key] is not None and (set(module.params[key]) !=
+ set(port[key])):
+ return True
+
+ # NOTE: if port was created or updated with 'no_security_groups=True',
+ # subsequent updates without 'no_security_groups' flag or
+ # 'no_security_groups=False' and no specified 'security_groups', will not
+ # result in an update to the port where the default security group is
+ # applied.
+ if module.params['no_security_groups'] and port['security_groups'] != []:
+ return True
+
+ if module.params['fixed_ips'] is not None:
+ for item in module.params['fixed_ips']:
+ if 'ip_address' in item:
+ # if ip_address in request does not match any in existing port,
+ # update is required.
+ if not any(match['ip_address'] == item['ip_address']
+ for match in port['fixed_ips']):
+ return True
+ if 'subnet_id' in item:
+ return True
+ for item in port['fixed_ips']:
+ # if ip_address in existing port does not match any in request,
+ # update is required.
+ if not any(match.get('ip_address') == item['ip_address']
+ for match in module.params['fixed_ips']):
+ return True
+
+ return False
+
+
+def _system_state_change(module, port, cloud):
+ state = module.params['state']
+ if state == 'present':
+ if not port:
+ return True
+ return _needs_update(module, port, cloud)
+ if state == 'absent' and port:
+ return True
+ return False
+
+
+def _compose_port_args(module, cloud):
+ port_kwargs = {}
+ optional_parameters = ['name',
+ 'fixed_ips',
+ 'admin_state_up',
+ 'mac_address',
+ 'security_groups',
+ 'allowed_address_pairs',
+ 'extra_dhcp_opt',
+ 'device_owner',
+ 'device_id']
+ for optional_param in optional_parameters:
+ if module.params[optional_param] is not None:
+ port_kwargs[optional_param] = module.params[optional_param]
+
+ if module.params['no_security_groups']:
+ port_kwargs['security_groups'] = []
+
+ return port_kwargs
+
+
+def get_security_group_id(module, cloud, security_group_name_or_id):
+ security_group = cloud.get_security_group(security_group_name_or_id)
+ if not security_group:
+ module.fail_json(msg="Security group: %s, was not found"
+ % security_group_name_or_id)
+ return security_group['id']
+
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ network=dict(required=False),
+ name=dict(required=False),
+ fixed_ips=dict(default=None),
+ admin_state_up=dict(default=None),
+ mac_address=dict(default=None),
+ security_groups=dict(default=None),
+ no_security_groups=dict(default=False, type='bool'),
+ allowed_address_pairs=dict(default=None),
+ extra_dhcp_opt=dict(default=None),
+ device_owner=dict(default=None),
+ device_id=dict(default=None),
+ state=dict(default='present', choices=['absent', 'present']),
+ )
+
+ module_kwargs = openstack_module_kwargs(
+ mutually_exclusive=[
+ ['no_security_groups', 'security_groups'],
+ ]
+ )
+
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+ name = module.params['name']
+ state = module.params['state']
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ if module.params['security_groups']:
+ if type(module.params['security_groups']) == str:
+ module.params['security_groups'] = module.params[
+ 'security_groups'].split(',')
+ # translate security_groups to UUID's if names where provided
+ module.params['security_groups'] = map(
+ lambda v: get_security_group_id(module, cloud, v),
+ module.params['security_groups'])
+
+ port = None
+ network_id = None
+ if name:
+ port = cloud.get_port(name)
+
+ if module.check_mode:
+ module.exit_json(changed=_system_state_change(module, port, cloud))
+
+ changed = False
+ if state == 'present':
+ if not port:
+ network = module.params['network']
+ if not network:
+ module.fail_json(
+ msg="Parameter 'network' is required in Port Create"
+ )
+ port_kwargs = _compose_port_args(module, cloud)
+ network_object = cloud.get_network(network)
+
+ if network_object:
+ network_id = network_object['id']
+ else:
+ module.fail_json(
+ msg="Specified network was not found."
+ )
+
+ port = cloud.create_port(network_id, **port_kwargs)
+ changed = True
+ else:
+ if _needs_update(module, port, cloud):
+ port_kwargs = _compose_port_args(module, cloud)
+ port = cloud.update_port(port['id'], **port_kwargs)
+ changed = True
+ module.exit_json(changed=changed, id=port['id'], port=port)
+
+ if state == 'absent':
+ if port:
+ cloud.delete_port(port['id'])
+ changed = True
+ module.exit_json(changed=changed)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/openstack/os_router.py b/cloud/openstack/os_router.py
new file mode 100644
index 00000000000..3d4218d2d14
--- /dev/null
+++ b/cloud/openstack/os_router.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+
+DOCUMENTATION = '''
+---
+module: os_router
+short_description: Create or delete routers from OpenStack
+extends_documentation_fragment: openstack
+version_added: "2.0"
+author: "David Shrewsbury (@Shrews)"
+description:
+ - Create or Delete routers from OpenStack. Although Neutron allows
+ routers to share the same name, this module enforces name uniqueness
+ to be more user friendly.
+options:
+ state:
+ description:
+ - Indicate desired state of the resource
+ choices: ['present', 'absent']
+ default: present
+ name:
+ description:
+ - Name to be give to the router
+ required: true
+ admin_state_up:
+ description:
+ - Desired admin state of the created or existing router.
+ required: false
+ default: true
+ enable_snat:
+ description:
+ - Enable Source NAT (SNAT) attribute.
+ required: false
+ default: true
+ network:
+ description:
+ - Unique name or ID of the external gateway network.
+ type: string
+ required: true when I(interfaces) or I(enable_snat) are provided,
+ false otherwise.
+ default: None
+ interfaces:
+ description:
+ - List of subnets to attach to the router. Each is a dictionary with
+ the subnet name or ID (subnet) and the IP address to assign on that
+ subnet (ip). If no IP is specified, one is automatically assigned from
+ that subnet.
+ required: false
+ default: None
+requirements: ["shade"]
+'''
+
+EXAMPLES = '''
+# Create a simple router, not attached to a gateway or subnets.
+- os_router:
+ cloud: mycloud
+ state: present
+ name: simple_router
+
+# Creates a router attached to ext_network1 and one subnet interface.
+# An IP address from subnet1's IP range will automatically be assigned
+# to that interface.
+- os_router:
+ cloud: mycloud
+ state: present
+ name: router1
+ network: ext_network1
+ interfaces:
+ - subnet: subnet1
+
+# Update existing router1 to include subnet2 (10.5.5.0/24), specifying
+# the IP address within subnet2's IP range we'd like for that interface.
+- os_router:
+ cloud: mycloud
+ state: present
+ name: router1
+ network: ext_network1
+ interfaces:
+ - subnet: subnet1
+ - subnet: subnet2
+ ip: 10.5.5.1
+
+# Delete router1
+- os_router:
+ cloud: mycloud
+ state: absent
+ name: router1
+'''
+
+RETURN = '''
+router:
+ description: Dictionary describing the router.
+ returned: On success when I(state) is 'present'
+ type: dictionary
+ contains:
+ id:
+ description: Router ID.
+ type: string
+ sample: "474acfe5-be34-494c-b339-50f06aa143e4"
+ name:
+ description: Router name.
+ type: string
+ sample: "router1"
+ admin_state_up:
+ description: Administrative state of the router.
+ type: boolean
+ sample: true
+ status:
+ description: The router status.
+ type: string
+ sample: "ACTIVE"
+ tenant_id:
+ description: The tenant ID.
+ type: string
+ sample: "861174b82b43463c9edc5202aadc60ef"
+ external_gateway_info:
+ description: The external gateway parameters.
+ type: dictionary
+ sample: {
+ "enable_snat": true,
+ "external_fixed_ips": [
+ {
+ "ip_address": "10.6.6.99",
+ "subnet_id": "4272cb52-a456-4c20-8f3c-c26024ecfa81"
+ }
+ ]
+ }
+ routes:
+ description: The extra routes configuration for L3 router.
+ type: list
+'''
+
+
+def _needs_update(cloud, module, router, network):
+ """Decide if the given router needs an update.
+ """
+ if router['admin_state_up'] != module.params['admin_state_up']:
+ return True
+ if router['external_gateway_info']['enable_snat'] != module.params['enable_snat']:
+ return True
+ if network:
+ if router['external_gateway_info']['network_id'] != network['id']:
+ return True
+
+ # check subnet interfaces
+ for new_iface in module.params['interfaces']:
+ subnet = cloud.get_subnet(new_iface['subnet'])
+ if not subnet:
+ module.fail_json(msg='subnet %s not found' % new_iface['subnet'])
+ exists = False
+
+ # compare the requested interface with existing, looking for an existing match
+ for existing_iface in router['external_gateway_info']['external_fixed_ips']:
+ if existing_iface['subnet_id'] == subnet['id']:
+ if 'ip' in new_iface:
+ if existing_iface['ip_address'] == new_iface['ip']:
+ # both subnet id and ip address match
+ exists = True
+ break
+ else:
+ # only the subnet was given, so ip doesn't matter
+ exists = True
+ break
+
+ # this interface isn't present on the existing router
+ if not exists:
+ return True
+
+ return False
+
+def _system_state_change(cloud, module, router, network):
+ """Check if the system state would be changed."""
+ state = module.params['state']
+ if state == 'absent' and router:
+ return True
+ if state == 'present':
+ if not router:
+ return True
+ return _needs_update(cloud, module, router, network)
+ return False
+
+def _build_kwargs(cloud, module, router, network):
+ kwargs = {
+ 'admin_state_up': module.params['admin_state_up'],
+ }
+
+ if router:
+ kwargs['name_or_id'] = router['id']
+ else:
+ kwargs['name'] = module.params['name']
+
+ if network:
+ kwargs['ext_gateway_net_id'] = network['id']
+ # can't send enable_snat unless we have a network
+ kwargs['enable_snat'] = module.params['enable_snat']
+
+ if module.params['interfaces']:
+ kwargs['ext_fixed_ips'] = []
+ for iface in module.params['interfaces']:
+ subnet = cloud.get_subnet(iface['subnet'])
+ if not subnet:
+ module.fail_json(msg='subnet %s not found' % iface['subnet'])
+ d = {'subnet_id': subnet['id']}
+ if 'ip' in iface:
+ d['ip_address'] = iface['ip']
+ kwargs['ext_fixed_ips'].append(d)
+
+ return kwargs
+
+def main():
+ argument_spec = openstack_full_argument_spec(
+ state=dict(default='present', choices=['absent', 'present']),
+ name=dict(required=True),
+ admin_state_up=dict(type='bool', default=True),
+ enable_snat=dict(type='bool', default=True),
+ network=dict(default=None),
+ interfaces=dict(type='list', default=None)
+ )
+
+ module_kwargs = openstack_module_kwargs()
+ module = AnsibleModule(argument_spec,
+ supports_check_mode=True,
+ **module_kwargs)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ state = module.params['state']
+ name = module.params['name']
+ network = module.params['network']
+
+ if module.params['interfaces'] and not network:
+ module.fail_json(msg='network is required when supplying interfaces')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ router = cloud.get_router(name)
+
+ net = None
+ if network:
+ net = cloud.get_network(network)
+ if not net:
+ module.fail_json(msg='network %s not found' % network)
+
+ if module.check_mode:
+ module.exit_json(
+ changed=_system_state_change(cloud, module, router, net)
+ )
+
+ if state == 'present':
+ changed = False
+
+ if not router:
+ kwargs = _build_kwargs(cloud, module, router, net)
+ router = cloud.create_router(**kwargs)
+ changed = True
+ else:
+ if _needs_update(cloud, module, router, net):
+ kwargs = _build_kwargs(cloud, module, router, net)
+ router = cloud.update_router(**kwargs)
+ changed = True
+
+ module.exit_json(changed=changed, router=router)
+
+ elif state == 'absent':
+ if not router:
+ module.exit_json(changed=False)
+ else:
+ cloud.delete_router(name)
+ module.exit_json(changed=True)
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message)
+
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/openstack/os_server.py b/cloud/openstack/os_server.py
index cd893536eaa..55d841ce7d5 100644
--- a/cloud/openstack/os_server.py
+++ b/cloud/openstack/os_server.py
@@ -82,7 +82,10 @@ options:
nics:
description:
- A list of networks to which the instance's interface should
- be attached. Networks may be referenced by net-id or net-name.
+ be attached. Networks may be referenced by net-id/net-name/port-id
+ or port-name.
+ - 'Also this accepts a string containing a list of net-id/port-id.
+ Eg: nics: "net-id=uuid-1,net-id=uuid-2"'
required: false
default: None
public_ip:
@@ -108,7 +111,8 @@ options:
meta:
description:
- A list of key value pairs that should be provided as a metadata to
- the new instance.
+ the new instance or a string containing a list of key-value pairs.
+ Eg: meta: "key1=value1,key2=value2"
required: false
default: None
wait:
@@ -241,6 +245,44 @@ EXAMPLES = '''
image: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
flavor_ram: 4096
flavor_include: Performance
+
+# Creates a new instance and attaches to multiple network
+- name: launch a compute instance
+ hosts: localhost
+ tasks:
+ - name: launch an instance with a string
+ os_server:
+ name: vm1
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: vm1
+ image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
+ key_name: ansible_key
+ timeout: 200
+ flavor: 4
+ nics: "net-id=4cb08b20-62fe-11e5-9d70-feff819cdc9f,net-id=542f0430-62fe-11e5-9d70-feff819cdc9f..."
+
+# Creates a new instance and attaches to a network and passes metadata to
+# the instance
+- os_server:
+ state: present
+ auth:
+ auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
+ username: admin
+ password: admin
+ project_name: admin
+ name: vm1
+ image: 4f905f38-e52a-43d2-b6ec-754a13ffb529
+ key_name: ansible_key
+ timeout: 200
+ flavor: 4
+ nics:
+ - net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
+ - net-name: another_network
+ meta: "hostname=test1,group=uge_master"
'''
@@ -252,25 +294,33 @@ def _exit_hostvars(module, cloud, server, changed=True):
def _network_args(module, cloud):
args = []
- for net in module.params['nics']:
- if net.get('net-id'):
- args.append(net)
- elif net.get('net-name'):
- by_name = cloud.get_network(net['net-name'])
- if not by_name:
- module.fail_json(
- msg='Could not find network by net-name: %s' %
- net['net-name'])
- args.append({'net-id': by_name['id']})
- elif net.get('port-id'):
- args.append(net)
- elif net.get('port-name'):
- by_name = cloud.get_port(net['port-name'])
- if not by_name:
- module.fail_json(
- msg='Could not find port by port-name: %s' %
- net['port-name'])
- args.append({'port-id': by_name['id']})
+ nics = module.params['nics']
+ if type(nics) == str :
+ for kv_str in nics.split(","):
+ nic = {}
+ k, v = kv_str.split("=")
+ nic[k] = v
+ args.append(nic)
+ else:
+ for net in module.params['nics']:
+ if net.get('net-id'):
+ args.append(net)
+ elif net.get('net-name'):
+ by_name = cloud.get_network(net['net-name'])
+ if not by_name:
+ module.fail_json(
+ msg='Could not find network by net-name: %s' %
+ net['net-name'])
+ args.append({'net-id': by_name['id']})
+ elif net.get('port-id'):
+ args.append(net)
+ elif net.get('port-name'):
+ by_name = cloud.get_port(net['port-name'])
+ if not by_name:
+ module.fail_json(
+ msg='Could not find port by port-name: %s' %
+ net['port-name'])
+ args.append({'port-id': by_name['id']})
return args
@@ -305,6 +355,13 @@ def _create_server(module, cloud):
nics = _network_args(module, cloud)
+ if type(module.params['meta']) is str:
+ metas = {}
+ for kv_str in module.params['meta'].split(","):
+ k, v = kv_str.split("=")
+ metas[k] = v
+ module.params['meta'] = metas
+
bootkwargs = dict(
name=module.params['name'],
image=image_id,
diff --git a/cloud/openstack/os_subnet.py b/cloud/openstack/os_subnet.py
index 22876c80869..d54268f415a 100644
--- a/cloud/openstack/os_subnet.py
+++ b/cloud/openstack/os_subnet.py
@@ -112,23 +112,23 @@ requirements:
EXAMPLES = '''
# Create a new (or update an existing) subnet on the specified network
- os_subnet:
- state=present
- network_name=network1
- name=net1subnet
- cidr=192.168.0.0/24
+ state: present
+ network_name: network1
+ name: net1subnet
+ cidr: 192.168.0.0/24
dns_nameservers:
- 8.8.8.7
- 8.8.8.8
host_routes:
- destination: 0.0.0.0/0
- nexthop: 123.456.78.9
+ nexthop: 12.34.56.78
- destination: 192.168.0.0/24
nexthop: 192.168.0.1
# Delete a subnet
- os_subnet:
- state=absent
- name=net1subnet
+ state: absent
+ name: net1subnet
# Create an ipv6 stateless subnet
- os_subnet:
diff --git a/cloud/openstack/os_subnets_facts.py b/cloud/openstack/os_subnets_facts.py
new file mode 100644
index 00000000000..ca94aa187f9
--- /dev/null
+++ b/cloud/openstack/os_subnets_facts.py
@@ -0,0 +1,152 @@
+#!/usr/bin/python
+
+# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
+#
+# This module is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This software is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this software. If not, see .
+
+try:
+ import shade
+ HAS_SHADE = True
+except ImportError:
+ HAS_SHADE = False
+
+DOCUMENTATION = '''
+---
+module: os_subnets_facts
+short_description: Retrieve facts about one or more OpenStack subnets.
+version_added: "2.0"
+author: "Davide Agnello (@dagnello)"
+description:
+ - Retrieve facts about one or more subnets from OpenStack.
+requirements:
+ - "python >= 2.6"
+ - "shade"
+options:
+ subnet:
+ description:
+ - Name or ID of the subnet
+ required: false
+ filters:
+ description:
+ - A dictionary of meta data to use for further filtering. Elements of
+ this dictionary may be additional dictionaries.
+ required: false
+extends_documentation_fragment: openstack
+'''
+
+EXAMPLES = '''
+# Gather facts about previously created subnets
+- os_subnets_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+- debug: var=openstack_subnets
+
+# Gather facts about a previously created subnet by name
+- os_subnets_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+ name: subnet1
+- debug: var=openstack_subnets
+
+# Gather facts about a previously created subnet with filter (note: name and
+ filters parameters are Not mutually exclusive)
+- os_subnets_facts:
+ auth:
+ auth_url: https://your_api_url.com:9000/v2.0
+ username: user
+ password: password
+ project_name: someproject
+ filters:
+ tenant_id: 55e2ce24b2a245b09f181bf025724cbe
+- debug: var=openstack_subnets
+'''
+
+RETURN = '''
+This module registers subnet details in facts named: openstack_subnets. If a
+subnet name/id and or filter does not result in a subnet found, an empty list
+is set in openstack_subnets.
+id:
+ description: Unique UUID.
+ returned: success
+ type: string
+name:
+ description: Name given to the subnet.
+ returned: success
+ type: string
+network_id:
+ description: Network ID this subnet belongs in.
+ returned: success
+ type: string
+cidr:
+ description: Subnet's CIDR.
+ returned: success
+ type: string
+gateway_ip:
+ description: Subnet's gateway ip.
+ returned: success
+ type: string
+enable_dhcp:
+ description: DHCP enable flag for this subnet.
+ returned: success
+ type: bool
+ip_version:
+ description: IP version for this subnet.
+ returned: success
+ type: int
+tenant_id:
+ description: Tenant id associated with this subnet.
+ returned: success
+ type: string
+dns_nameservers:
+ description: DNS name servers for this subnet.
+ returned: success
+ type: list of strings
+allocation_pools:
+ description: Allocation pools associated with this subnet.
+ returned: success
+ type: list of dicts
+'''
+
+def main():
+
+ argument_spec = openstack_full_argument_spec(
+ name=dict(required=False, default=None),
+ filters=dict(required=False, default=None)
+ )
+ module = AnsibleModule(argument_spec)
+
+ if not HAS_SHADE:
+ module.fail_json(msg='shade is required for this module')
+
+ try:
+ cloud = shade.openstack_cloud(**module.params)
+ subnets = cloud.search_subnets(module.params['name'],
+ module.params['filters'])
+ module.exit_json(changed=False, ansible_facts=dict(
+ openstack_subnets=subnets))
+
+ except shade.OpenStackCloudException as e:
+ module.fail_json(msg=e.message)
+
+# this is magic, see lib/ansible/module_common.py
+from ansible.module_utils.basic import *
+from ansible.module_utils.openstack import *
+if __name__ == '__main__':
+ main()
diff --git a/cloud/rackspace/rax_cdb.py b/cloud/rackspace/rax_cdb.py
index 6abadd2ebf4..ea5b16528e6 100644
--- a/cloud/rackspace/rax_cdb.py
+++ b/cloud/rackspace/rax_cdb.py
@@ -38,6 +38,16 @@ options:
description:
- Volume size of the database 1-150GB
default: 2
+ cdb_type:
+ description:
+ - type of instance (i.e. MySQL, MariaDB, Percona)
+ default: MySQL
+ version_added: "2.0"
+ cdb_version:
+ description:
+ - version of database (MySQL supports 5.1 and 5.6, MariaDB supports 10, Percona supports 5.6)
+ choices: ['5.1', '5.6', '10']
+ version_added: "2.0"
state:
description:
- Indicate desired state of the resource
@@ -68,6 +78,8 @@ EXAMPLES = '''
name: db-server1
flavor: 1
volume: 2
+ cdb_type: MySQL
+ cdb_version: 5.6
wait: yes
state: present
register: rax_db_server
@@ -91,10 +103,12 @@ def find_instance(name):
return False
-def save_instance(module, name, flavor, volume, wait, wait_timeout):
+def save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
for arg, value in dict(name=name, flavor=flavor,
- volume=volume).iteritems():
+ volume=volume, type=cdb_type, version=cdb_version
+ ).iteritems():
if not value:
module.fail_json(msg='%s is required for the "rax_cdb"'
' module' % arg)
@@ -118,7 +132,8 @@ def save_instance(module, name, flavor, volume, wait, wait_timeout):
if not instance:
action = 'create'
try:
- instance = cdb.create(name=name, flavor=flavor, volume=volume)
+ instance = cdb.create(name=name, flavor=flavor, volume=volume,
+ type=cdb_type, version=cdb_version)
except Exception, e:
module.fail_json(msg='%s' % e.message)
else:
@@ -189,11 +204,13 @@ def delete_instance(module, name, wait, wait_timeout):
cdb=rax_to_dict(instance))
-def rax_cdb(module, state, name, flavor, volume, wait, wait_timeout):
+def rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout):
# act on the state
if state == 'present':
- save_instance(module, name, flavor, volume, wait, wait_timeout)
+ save_instance(module, name, flavor, volume, cdb_type, cdb_version, wait,
+ wait_timeout)
elif state == 'absent':
delete_instance(module, name, wait, wait_timeout)
@@ -205,6 +222,8 @@ def main():
name=dict(type='str', required=True),
flavor=dict(type='int', default=1),
volume=dict(type='int', default=2),
+ cdb_type=dict(type='str', default='MySQL'),
+ cdb_version=dict(type='str', default='5.6'),
state=dict(default='present', choices=['present', 'absent']),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300),
@@ -222,12 +241,14 @@ def main():
name = module.params.get('name')
flavor = module.params.get('flavor')
volume = module.params.get('volume')
+ cdb_type = module.params.get('type')
+ cdb_version = module.params.get('version')
state = module.params.get('state')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
- rax_cdb(module, state, name, flavor, volume, wait, wait_timeout)
+ rax_cdb(module, state, name, flavor, volume, cdb_type, cdb_version, wait, wait_timeout)
# import module snippets
diff --git a/cloud/rackspace/rax_files_objects.py b/cloud/rackspace/rax_files_objects.py
index 0274a79004d..6da7b1882f0 100644
--- a/cloud/rackspace/rax_files_objects.py
+++ b/cloud/rackspace/rax_files_objects.py
@@ -271,7 +271,7 @@ def upload(module, cf, container, src, dest, meta, expires):
if path != src:
prefix = path.split(src)[-1].lstrip('/')
filenames = [os.path.join(prefix, name) for name in filenames
- if not os.path.isdir(name)]
+ if not os.path.isdir(os.path.join(path, name))]
objs += filenames
_objs = []
diff --git a/cloud/rackspace/rax_scaling_group.py b/cloud/rackspace/rax_scaling_group.py
index e6c14fdef0f..79b7395f400 100644
--- a/cloud/rackspace/rax_scaling_group.py
+++ b/cloud/rackspace/rax_scaling_group.py
@@ -105,6 +105,18 @@ options:
- Data to be uploaded to the servers config drive. This option implies
I(config_drive). Can be a file path or a string
version_added: 1.8
+ wait:
+ description:
+ - wait for the scaling group to finish provisioning the minimum amount of
+ servers
+ default: "no"
+ choices:
+ - "yes"
+ - "no"
+ wait_timeout:
+ description:
+ - how long before wait gives up, in seconds
+ default: 300
author: "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace
'''
@@ -144,7 +156,7 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
image=None, key_name=None, loadbalancers=[], meta={},
min_entities=0, max_entities=0, name=None, networks=[],
server_name=None, state='present', user_data=None,
- config_drive=False):
+ config_drive=False, wait=True, wait_timeout=300):
changed = False
au = pyrax.autoscale
@@ -315,6 +327,16 @@ def rax_asg(module, cooldown=300, disk_config=None, files={}, flavor=None,
sg.get()
+ if wait:
+ end_time = time.time() + wait_timeout
+ infinite = wait_timeout == 0
+ while infinite or time.time() < end_time:
+ state = sg.get_state()
+ if state["pending_capacity"] == 0:
+ break
+
+ time.sleep(5)
+
module.exit_json(changed=changed, autoscale_group=rax_to_dict(sg))
else:
@@ -350,6 +372,8 @@ def main():
server_name=dict(required=True),
state=dict(default='present', choices=['present', 'absent']),
user_data=dict(no_log=True),
+ wait=dict(default=False, type='bool'),
+ wait_timeout=dict(default=300),
)
)
diff --git a/files/copy.py b/files/copy.py
index 8f6d3d32f28..da976f9a692 100644
--- a/files/copy.py
+++ b/files/copy.py
@@ -77,6 +77,13 @@ options:
already existed.
required: false
version_added: "1.5"
+ remote_src:
+ description:
+ - If False, it will search for src at originating/master machine, if True it will go to the remote/target machine for the src. Default is False.
+ choices: [ "True", "False" ]
+ required: false
+ default: "False"
+ version_added: "2.0"
extends_documentation_fragment:
- files
- validate
diff --git a/files/file.py b/files/file.py
index c3267f7f18b..8219990d1f6 100644
--- a/files/file.py
+++ b/files/file.py
@@ -93,10 +93,10 @@ EXAMPLES = '''
# change file ownership, group and mode. When specifying mode using octal numbers, first digit should always be 0.
- file: path=/etc/foo.conf owner=foo group=foo mode=0644
- file: src=/file/to/link/to dest=/path/to/symlink owner=foo group=foo state=link
-- file: src=/tmp/{{ item.path }} dest={{ item.dest }} state=link
+- file: src=/tmp/{{ item.src }} dest={{ item.dest }} state=link
with_items:
- - { path: 'x', dest: 'y' }
- - { path: 'z', dest: 'k' }
+ - { src: 'x', dest: 'y' }
+ - { src: 'z', dest: 'k' }
# touch a file, using symbolic modes to set the permissions (equivalent to 0644)
- file: path=/etc/foo.conf state=touch mode="u=rw,g=r,o=r"
diff --git a/files/synchronize.py b/files/synchronize.py
index 73b0bb13364..0332c7580d2 100644
--- a/files/synchronize.py
+++ b/files/synchronize.py
@@ -204,10 +204,11 @@ synchronize: mode=pull src=some/relative/path dest=/some/absolute/path
# Synchronization of src on delegate host to dest on the current inventory host.
# If delegate_to is set to the current inventory host, this can be used to synchronize
-# two directories on that host.
-synchronize: >
- src=some/relative/path dest=/some/absolute/path
- delegate_to: delegate.host
+# two directories on that host.
+synchronize:
+ src: some/relative/path
+ dest: /some/absolute/path
+delegate_to: delegate.host
# Synchronize and delete files in dest on the remote host that are not found in src of localhost.
synchronize: src=some/relative/path dest=/some/absolute/path delete=yes
@@ -222,7 +223,12 @@ synchronize: src=some/relative/path dest=/some/absolute/path rsync_path="sudo rs
+ /var/conf # include /var/conf even though it was previously excluded
# Synchronize passing in extra rsync options
-synchronize: src=/tmp/helloworld dest=/var/www/helloword rsync_opts=--no-motd,--exclude=.git
+synchronize:
+ src: /tmp/helloworld
+ dest: /var/www/helloword
+ rsync_opts:
+ - "--no-motd"
+ - "--exclude=.git"
'''
diff --git a/network/basics/get_url.py b/network/basics/get_url.py
index fad0d58f878..41af97a5520 100644
--- a/network/basics/get_url.py
+++ b/network/basics/get_url.py
@@ -84,7 +84,11 @@ options:
Format: :, e.g.: checksum="sha256:D98291AC[...]B6DC7B97"
If you worry about portability, only the sha1 algorithm is available
on all platforms and python versions. The third party hashlib
- library can be installed for access to additional algorithms.'
+ library can be installed for access to additional algorithms.
+ Additionaly, if a checksum is passed to this parameter, and the file exist under
+ the C(dest) location, the destination_checksum would be calculated, and if
+ checksum equals destination_checksum, the file download would be skipped
+ (unless C(force) is true). '
version_added: "2.0"
required: false
default: null
diff --git a/packaging/language/pip.py b/packaging/language/pip.py
index 8bbae35038d..a4af27ccee5 100644
--- a/packaging/language/pip.py
+++ b/packaging/language/pip.py
@@ -239,7 +239,7 @@ def main():
virtualenv_python=dict(default=None, required=False, type='str'),
use_mirrors=dict(default='yes', type='bool'),
extra_args=dict(default=None, required=False),
- chdir=dict(default=None, required=False),
+ chdir=dict(default=None, required=False, type='path'),
executable=dict(default=None, required=False),
),
required_one_of=[['name', 'requirements']],
@@ -258,6 +258,10 @@ def main():
if state == 'latest' and version is not None:
module.fail_json(msg='version is incompatible with state=latest')
+ if chdir is None:
+ # this is done to avoid permissions issues with privilege escalation and virtualenvs
+ chdir = tempfile.gettempdir()
+
err = ''
out = ''
@@ -285,10 +289,7 @@ def main():
cmd += ' -p%s' % virtualenv_python
cmd = "%s %s" % (cmd, env)
- this_dir = tempfile.gettempdir()
- if chdir:
- this_dir = os.path.join(this_dir, chdir)
- rc, out_venv, err_venv = module.run_command(cmd, cwd=this_dir)
+ rc, out_venv, err_venv = module.run_command(cmd, cwd=chdir)
out += out_venv
err += err_venv
if rc != 0:
@@ -328,9 +329,6 @@ def main():
elif requirements:
cmd += ' -r %s' % requirements
- this_dir = tempfile.gettempdir()
- if chdir:
- this_dir = os.path.join(this_dir, chdir)
if module.check_mode:
if extra_args or requirements or state == 'latest' or not name:
@@ -340,7 +338,8 @@ def main():
module.exit_json(changed=True)
freeze_cmd = '%s freeze' % pip
- rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=this_dir)
+
+ rc, out_pip, err_pip = module.run_command(freeze_cmd, cwd=chdir)
if rc != 0:
module.exit_json(changed=True)
@@ -353,7 +352,7 @@ def main():
changed = (state == 'present' and not is_present) or (state == 'absent' and is_present)
module.exit_json(changed=changed, cmd=freeze_cmd, stdout=out, stderr=err)
- rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=this_dir)
+ rc, out_pip, err_pip = module.run_command(cmd, path_prefix=path_prefix, cwd=chdir)
out += out_pip
err += err_pip
if rc == 1 and state == 'absent' and \
diff --git a/packaging/os/redhat_subscription.py b/packaging/os/redhat_subscription.py
index 8e1482a8c4f..2e6887164cf 100644
--- a/packaging/os/redhat_subscription.py
+++ b/packaging/os/redhat_subscription.py
@@ -7,7 +7,7 @@ short_description: Manage Red Hat Network registration and subscriptions using t
description:
- Manage registration and subscription to the Red Hat Network entitlement platform.
version_added: "1.2"
-author: "James Laska (@jlaska)"
+author: "Barnaby Court (@barnabycourt)"
notes:
- In order to register a system, subscription-manager requires either a username and password, or an activationkey.
requirements:
diff --git a/packaging/os/rpm_key.py b/packaging/os/rpm_key.py
index 1d2d208e4be..dde35df7aad 100644
--- a/packaging/os/rpm_key.py
+++ b/packaging/os/rpm_key.py
@@ -61,7 +61,6 @@ EXAMPLES = '''
- rpm_key: state=absent key=DEADB33F
'''
import re
-import syslog
import os.path
import urllib2
import tempfile
@@ -74,7 +73,6 @@ def is_pubkey(string):
class RpmKey:
def __init__(self, module):
- self.syslogging = False
# If the key is a url, we need to check if it's present to be idempotent,
# to do that, we need to check the keyid, which we can get from the armor.
keyfile = None
@@ -163,9 +161,6 @@ class RpmKey:
return re.match('(0x)?[0-9a-f]{8}', keystr, flags=re.IGNORECASE)
def execute_command(self, cmd):
- if self.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
rc, stdout, stderr = self.module.run_command(cmd)
if rc != 0:
self.module.fail_json(msg=stderr)
diff --git a/packaging/os/yum.py b/packaging/os/yum.py
index c66e73ad98b..bcf9b283a95 100644
--- a/packaging/os/yum.py
+++ b/packaging/os/yum.py
@@ -26,7 +26,6 @@ import traceback
import os
import yum
import rpm
-import syslog
import platform
import tempfile
import shutil
@@ -169,10 +168,6 @@ BUFSIZE = 65536
def_qf = "%{name}-%{version}-%{release}.%{arch}"
-def log(msg):
- syslog.openlog('ansible-yum', 0, syslog.LOG_USER)
- syslog.syslog(syslog.LOG_NOTICE, msg)
-
def yum_base(conf_file=None):
my = yum.YumBase()
@@ -760,7 +755,11 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if update_all:
cmd = yum_basecmd + ['update']
+ will_update = set(updates.keys())
+ will_update_from_other_package = dict()
else:
+ will_update = set()
+ will_update_from_other_package = dict()
for spec in items:
# some guess work involved with groups. update @ will install the group if missing
if spec.startswith('@'):
@@ -784,8 +783,19 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
nothing_to_do = False
break
- if spec in pkgs['update'] and spec in updates.keys():
- nothing_to_do = False
+ # this contains the full NVR and spec could contain wildcards
+ # or virtual provides (like "python-*" or "smtp-daemon") while
+ # updates contains name only.
+ this_name_only = '-'.join(this.split('-')[:-2])
+ if spec in pkgs['update'] and this_name_only in updates.keys():
+ nothing_to_do = False
+ will_update.add(spec)
+ # Massage the updates list
+ if spec != this_name_only:
+ # For reporting what packages would be updated more
+ # succinctly
+ will_update_from_other_package[spec] = this_name_only
+ break
if nothing_to_do:
res['results'].append("All packages providing %s are up to date" % spec)
@@ -798,12 +808,6 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
res['msg'] += "The following packages have pending transactions: %s" % ", ".join(conflicts)
module.fail_json(**res)
- # list of package updates
- if update_all:
- will_update = updates.keys()
- else:
- will_update = [u for u in pkgs['update'] if u in updates.keys() or u.startswith('@')]
-
# check_mode output
if module.check_mode:
to_update = []
@@ -811,6 +815,9 @@ def latest(module, items, repoq, yum_basecmd, conf_file, en_repos, dis_repos):
if w.startswith('@'):
to_update.append((w, None))
msg = '%s will be updated' % w
+ elif w not in updates:
+ other_pkg = will_update_from_other_package[w]
+ to_update.append((w, 'because of (at least) %s-%s.%s from %s' % (other_pkg, updates[other_pkg]['version'], updates[other_pkg]['dist'], updates[other_pkg]['repo'])))
else:
to_update.append((w, '%s.%s from %s' % (updates[w]['version'], updates[w]['dist'], updates[w]['repo'])))
diff --git a/source_control/git.py b/source_control/git.py
index 4b1620392a0..d42b284abc5 100644
--- a/source_control/git.py
+++ b/source_control/git.py
@@ -453,7 +453,7 @@ def is_local_branch(git_path, module, dest, branch):
def is_not_a_branch(git_path, module, dest):
branches = get_branches(git_path, module, dest)
for b in branches:
- if b.startswith('* ') and 'no branch' in b:
+ if b.startswith('* ') and ('no branch' in b or 'detached from' in b):
return True
return False
diff --git a/source_control/subversion.py b/source_control/subversion.py
index 24cc065c5a4..64a1b38b044 100644
--- a/source_control/subversion.py
+++ b/source_control/subversion.py
@@ -171,9 +171,10 @@ class Subversion(object):
'''True if revisioned files have been added or modified. Unrevisioned files are ignored.'''
lines = self._exec(["status", "--quiet", "--ignore-externals", self.dest])
# The --quiet option will return only modified files.
-
+ # Match only revisioned files, i.e. ignore status '?'.
+ regex = re.compile(r'^[^?X]')
# Has local mods if more than 0 modifed revisioned files.
- return len(filter(len, lines)) > 0
+ return len(filter(regex.match, lines)) > 0
def needs_update(self):
curr, url = self.get_revision()
diff --git a/system/cron.py b/system/cron.py
index 0415fb252a2..63319096c42 100644
--- a/system/cron.py
+++ b/system/cron.py
@@ -67,6 +67,7 @@ options:
cron_file:
description:
- If specified, uses this file in cron.d instead of an individual user's crontab.
+ To use the C(cron_file) parameter you must specify the C(user) as well.
required: false
default: null
backup:
@@ -178,9 +179,6 @@ class CronTab(object):
self.lines = None
self.ansible = "#Ansible: "
- # select whether we dump additional debug info through syslog
- self.syslogging = False
-
if cron_file:
self.cron_file = '/etc/cron.d/%s' % cron_file
else:
@@ -218,10 +216,6 @@ class CronTab(object):
self.lines.append(l)
count += 1
- def log_message(self, message):
- if self.syslogging:
- syslog.syslog(syslog.LOG_NOTICE, 'ansible: "%s"' % message)
-
def is_empty(self):
if len(self.lines) == 0:
return True
@@ -458,9 +452,7 @@ def main():
os.umask(022)
crontab = CronTab(module, user, cron_file)
- if crontab.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'cron instantiated - name: "%s"' % name)
+ module.debug('cron instantiated - name: "%s"' % name)
# --- user input validation ---
@@ -495,6 +487,7 @@ def main():
(backuph, backup_file) = tempfile.mkstemp(prefix='crontab')
crontab.write(backup_file)
+
if crontab.cron_file and not name and not do_install:
changed = crontab.remove_job_file()
module.exit_json(changed=changed,cron_file=cron_file,state=state)
diff --git a/system/group.py b/system/group.py
index ab542d9bc47..8edb93a1d0a 100644
--- a/system/group.py
+++ b/system/group.py
@@ -57,7 +57,6 @@ EXAMPLES = '''
'''
import grp
-import syslog
import platform
class Group(object):
@@ -86,13 +85,8 @@ class Group(object):
self.name = module.params['name']
self.gid = module.params['gid']
self.system = module.params['system']
- self.syslogging = False
def execute_command(self, cmd):
- if self.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
-
return self.module.run_command(cmd)
def group_del(self):
@@ -395,11 +389,9 @@ def main():
group = Group(module)
- if group.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - platform %s' % group.platform)
- if user.distribution:
- syslog.syslog(syslog.LOG_NOTICE, 'Group instantiated - distribution %s' % group.distribution)
+ module.debug('Group instantiated - platform %s' % group.platform)
+ if group.distribution:
+ module.debug('Group instantiated - distribution %s' % group.distribution)
rc = None
out = ''
diff --git a/system/service.py b/system/service.py
index 4255ecb83ab..f9a8b1e24c1 100644
--- a/system/service.py
+++ b/system/service.py
@@ -74,14 +74,6 @@ options:
description:
- Additional arguments provided on the command line
aliases: [ 'args' ]
- must_exist:
- required: false
- default: true
- version_added: "2.0"
- description:
- - Avoid a module failure if the named service does not exist. Useful
- for opportunistically starting/stopping/restarting a list of
- potential services.
'''
EXAMPLES = '''
@@ -106,8 +98,6 @@ EXAMPLES = '''
# Example action to restart network service for interface eth0
- service: name=network state=restarted args=eth0
-# Example action to restart nova-compute if it exists
-- service: name=nova-compute state=restarted must_exist=no
'''
import platform
@@ -169,9 +159,6 @@ class Service(object):
self.rcconf_value = None
self.svc_change = False
- # select whether we dump additional debug info through syslog
- self.syslogging = False
-
# ===========================================
# Platform specific methods (must be replaced by subclass).
@@ -191,9 +178,6 @@ class Service(object):
# Generic methods that should be used on all platforms.
def execute_command(self, cmd, daemonize=False):
- if self.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Command %s, daemonize %r' % (cmd, daemonize))
# Most things don't need to be daemonized
if not daemonize:
@@ -481,11 +465,8 @@ class LinuxService(Service):
self.enable_cmd = location['chkconfig']
if self.enable_cmd is None:
- if self.module.params['must_exist']:
- self.module.fail_json(msg="no service or tool found for: %s" % self.name)
- else:
- # exiting without change on non-existent service
- self.module.exit_json(changed=False, exists=False)
+ # exiting without change on non-existent service
+ self.module.exit_json(changed=False, exists=False)
# If no service control tool selected yet, try to see if 'service' is available
if self.svc_cmd is None and location.get('service', False):
@@ -493,11 +474,7 @@ class LinuxService(Service):
# couldn't find anything yet
if self.svc_cmd is None and not self.svc_initscript:
- if self.module.params['must_exist']:
- self.module.fail_json(msg='cannot find \'service\' binary or init script for service, possible typo in service name?, aborting')
- else:
- # exiting without change on non-existent service
- self.module.exit_json(changed=False, exists=False)
+ self.module.exit_json(changed=False, exists=False)
if location.get('initctl', False):
self.svc_initctl = location['initctl']
@@ -1442,7 +1419,6 @@ def main():
enabled = dict(type='bool'),
runlevel = dict(required=False, default='default'),
arguments = dict(aliases=['args'], default=''),
- must_exist = dict(type='bool', default=True),
),
supports_check_mode=True
)
@@ -1451,11 +1427,9 @@ def main():
service = Service(module)
- if service.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - platform %s' % service.platform)
- if service.distribution:
- syslog.syslog(syslog.LOG_NOTICE, 'Service instantiated - distribution %s' % service.distribution)
+ module.debug('Service instantiated - platform %s' % service.platform)
+ if service.distribution:
+ module.debug('Service instantiated - distribution %s' % service.distribution)
rc = 0
out = ''
@@ -1527,4 +1501,5 @@ def main():
module.exit_json(**result)
from ansible.module_utils.basic import *
+
main()
diff --git a/system/sysctl.py b/system/sysctl.py
index e48d5df74c5..db1652955fc 100644
--- a/system/sysctl.py
+++ b/system/sysctl.py
@@ -123,6 +123,8 @@ class SysctlModule(object):
def process(self):
+ self.platform = get_platform().lower()
+
# Whitespace is bad
self.args['name'] = self.args['name'].strip()
self.args['value'] = self._parse_value(self.args['value'])
@@ -206,7 +208,11 @@ class SysctlModule(object):
# Use the sysctl command to find the current value
def get_token_curr_value(self, token):
- thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
+ if self.platform == 'openbsd':
+ # openbsd doesn't support -e, just drop it
+ thiscmd = "%s -n %s" % (self.sysctl_cmd, token)
+ else:
+ thiscmd = "%s -e -n %s" % (self.sysctl_cmd, token)
rc,out,err = self.module.run_command(thiscmd)
if rc != 0:
return None
@@ -217,7 +223,11 @@ class SysctlModule(object):
def set_token_value(self, token, value):
if len(value.split()) > 0:
value = '"' + value + '"'
- thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
+ if self.platform == 'openbsd':
+ # openbsd doesn't accept -w, but since it's not needed, just drop it
+ thiscmd = "%s %s=%s" % (self.sysctl_cmd, token, value)
+ else:
+ thiscmd = "%s -w %s=%s" % (self.sysctl_cmd, token, value)
rc,out,err = self.module.run_command(thiscmd)
if rc != 0:
self.module.fail_json(msg='setting %s failed: %s' % (token, out + err))
@@ -227,9 +237,20 @@ class SysctlModule(object):
# Run sysctl -p
def reload_sysctl(self):
# do it
- if get_platform().lower() == 'freebsd':
+ if self.platform == 'freebsd':
# freebsd doesn't support -p, so reload the sysctl service
rc,out,err = self.module.run_command('/etc/rc.d/sysctl reload')
+ elif self.platform == 'openbsd':
+ # openbsd doesn't support -p and doesn't have a sysctl service,
+ # so we have to set every value with its own sysctl call
+ for k, v in self.file_values.items():
+ rc = 0
+ if k != self.args['name']:
+ rc = self.set_token_value(k, v)
+ if rc != 0:
+ break
+ if rc == 0 and self.args['state'] == "present":
+ rc = self.set_token_value(self.args['name'], self.args['value'])
else:
# system supports reloading via the -p flag to sysctl, so we'll use that
sysctl_args = [self.sysctl_cmd, '-p', self.sysctl_file]
diff --git a/system/user.py b/system/user.py
index 45ce77381ce..499228953b2 100755
--- a/system/user.py
+++ b/system/user.py
@@ -83,7 +83,7 @@ options:
description:
- Optionally set the user's password to this crypted value. See
the user example in the github examples directory for what this looks
- like in a playbook. See U(http://docs.ansible.com/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
+ like in a playbook. See U(http://docs.ansible.com/ansible/faq.html#how-do-i-generate-crypted-passwords-for-the-user-module)
for details on various ways to generate these password values.
Note on Darwin system, this value has to be cleartext.
Beware of security issues.
@@ -212,7 +212,6 @@ EXAMPLES = '''
import os
import pwd
import grp
-import syslog
import platform
import socket
import time
@@ -290,15 +289,8 @@ class User(object):
else:
self.ssh_file = os.path.join('.ssh', 'id_%s' % self.ssh_type)
- # select whether we dump additional debug info through syslog
- self.syslogging = False
-
def execute_command(self, cmd, use_unsafe_shell=False, data=None):
- if self.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'Command %s' % '|'.join(cmd))
-
return self.module.run_command(cmd, use_unsafe_shell=use_unsafe_shell, data=data)
def remove_user_userdel(self):
@@ -2079,11 +2071,9 @@ def main():
user = User(module)
- if user.syslogging:
- syslog.openlog('ansible-%s' % os.path.basename(__file__))
- syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - platform %s' % user.platform)
- if user.distribution:
- syslog.syslog(syslog.LOG_NOTICE, 'User instantiated - distribution %s' % user.distribution)
+ module.debug('User instantiated - platform %s' % user.platform)
+ if user.distribution:
+ module.debug('User instantiated - distribution %s' % user.distribution)
rc = None
out = ''
diff --git a/utilities/helper/accelerate.py b/utilities/helper/accelerate.py
index 8ae8ab263be..e9e3b2ef5f6 100644
--- a/utilities/helper/accelerate.py
+++ b/utilities/helper/accelerate.py
@@ -272,6 +272,7 @@ class LocalSocketThread(Thread):
pass
def terminate(self):
+ super(LocalSocketThread, self).terminate()
self.terminated = True
self.s.shutdown(socket.SHUT_RDWR)
self.s.close()
@@ -311,7 +312,6 @@ class ThreadedTCPServer(SocketServer.ThreadingTCPServer):
SocketServer.ThreadingTCPServer.__init__(self, server_address, RequestHandlerClass)
def shutdown(self):
- self.local_thread.terminate()
self.running = False
SocketServer.ThreadingTCPServer.shutdown(self)
@@ -472,8 +472,6 @@ class ThreadedTCPRequestHandler(SocketServer.BaseRequestHandler):
def command(self, data):
if 'cmd' not in data:
return dict(failed=True, msg='internal error: cmd is required')
- if 'tmp_path' not in data:
- return dict(failed=True, msg='internal error: tmp_path is required')
vvvv("executing: %s" % data['cmd'])
@@ -601,15 +599,14 @@ def daemonize(module, password, port, timeout, minutes, use_ipv6, pid_file):
server.shutdown()
else:
# reschedule the check
- vvvv("daemon idle for %d seconds (timeout=%d)" % (total_seconds,minutes*60))
- signal.alarm(30)
+ signal.alarm(1)
except:
pass
finally:
server.last_event_lock.release()
signal.signal(signal.SIGALRM, timer_handler)
- signal.alarm(30)
+ signal.alarm(1)
tries = 5
while tries > 0:
diff --git a/web_infrastructure/supervisorctl.py b/web_infrastructure/supervisorctl.py
index 9bc4c0b8afa..4da81ed6e0e 100644
--- a/web_infrastructure/supervisorctl.py
+++ b/web_infrastructure/supervisorctl.py
@@ -122,7 +122,7 @@ def main():
if supervisorctl_path:
supervisorctl_path = os.path.expanduser(supervisorctl_path)
- if os.path.exists(supervisorctl_path) and module.is_executable(supervisorctl_path):
+ if os.path.exists(supervisorctl_path) and is_executable(supervisorctl_path):
supervisorctl_args = [supervisorctl_path]
else:
module.fail_json(
@@ -239,5 +239,6 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
-
-main()
+# is_executable from basic
+if __name__ == '__main__':
+ main()
diff --git a/windows/setup.ps1 b/windows/setup.ps1
index 3e3317d0450..0b3e2c897e3 100644
--- a/windows/setup.ps1
+++ b/windows/setup.ps1
@@ -64,6 +64,15 @@ Set-Attr $result.ansible_facts "ansible_os_name" ($win32_os.Name.Split('|')[0]).
Set-Attr $result.ansible_facts "ansible_distribution" $osversion.VersionString
Set-Attr $result.ansible_facts "ansible_distribution_version" $osversion.Version.ToString()
+$date = New-Object psobject
+Set-Attr $date "date" (Get-Date -format d)
+Set-Attr $date "year" (Get-Date -format yyyy)
+Set-Attr $date "month" (Get-Date -format MM)
+Set-Attr $date "day" (Get-Date -format dd)
+Set-Attr $date "hour" (Get-Date -format HH)
+Set-Attr $date "iso8601" (Get-Date -format s)
+Set-Attr $result.ansible_facts "ansible_date_time" $date
+
Set-Attr $result.ansible_facts "ansible_totalmem" $capacity
Set-Attr $result.ansible_facts "ansible_lastboot" $win32_os.lastbootuptime.ToString("u")
@@ -77,6 +86,10 @@ $psversion = $PSVersionTable.PSVersion.Major
Set-Attr $result.ansible_facts "ansible_powershell_version" $psversion
$winrm_https_listener_parent_path = Get-ChildItem -Path WSMan:\localhost\Listener -Recurse | Where-Object {$_.PSChildName -eq "Transport" -and $_.Value -eq "HTTPS"} | select PSParentPath
+$winrm_https_listener_path = $null
+$https_listener = $null
+$winrm_cert_thumbprint = $null
+$uppercase_cert_thumbprint = $null
if ($winrm_https_listener_parent_path ) {
$winrm_https_listener_path = $winrm_https_listener_parent_path.PSParentPath.Substring($winrm_https_listener_parent_path.PSParentPath.LastIndexOf("\"))
diff --git a/windows/win_file.ps1 b/windows/win_file.ps1
index f8416120abf..f387780123c 100644
--- a/windows/win_file.ps1
+++ b/windows/win_file.ps1
@@ -71,18 +71,15 @@ If (Test-Path $path)
}
Else
{
- # Only files have the .Directory attribute.
- If ( $state -eq "directory" -and $fileinfo.Directory )
+ If ( $state -eq "directory" -and -not $fileinfo.PsIsContainer )
{
Fail-Json (New-Object psobject) "path is not a directory"
}
- # Only files have the .Directory attribute.
- If ( $state -eq "file" -and -not $fileinfo.Directory )
+ If ( $state -eq "file" -and $fileinfo.PsIsContainer )
{
Fail-Json (New-Object psobject) "path is not a file"
}
-
}
}
Else
diff --git a/windows/win_get_url.ps1 b/windows/win_get_url.ps1
index 18977bff1ef..a83ad2633b0 100644
--- a/windows/win_get_url.ps1
+++ b/windows/win_get_url.ps1
@@ -30,7 +30,7 @@ If ($params.url) {
$url = $params.url
}
Else {
- Fail-Json $result "mising required argument: url"
+ Fail-Json $result "missing required argument: url"
}
If ($params.dest) {
diff --git a/windows/win_lineinfile.ps1 b/windows/win_lineinfile.ps1
index ddf1d4e3000..4ba9086a6e9 100644
--- a/windows/win_lineinfile.ps1
+++ b/windows/win_lineinfile.ps1
@@ -387,8 +387,11 @@ Elseif (Test-Path $dest) {
$found = $FALSE;
Foreach ($encoding in $sortedlist.GetValueList()) {
$preamble = $encoding.GetPreamble();
- If ($preamble) {
- Foreach ($i in 0..$preamble.Length) {
+ If ($preamble -and $bom) {
+ Foreach ($i in 0..($preamble.Length - 1)) {
+ If ($i -ge $bom.Length) {
+ break;
+ }
If ($preamble[$i] -ne $bom[$i]) {
break;
}
@@ -427,7 +430,7 @@ If ($state -eq "present") {
}
Else {
- If ($regex -eq $FALSE -and $line -eq $FALSE) {
+ If ($regexp -eq $FALSE -and $line -eq $FALSE) {
Fail-Json (New-Object psobject) "one of line= or regexp= is required with state=absent";
}