Merge remote-tracking branch 'upstream/devel' into devel
This commit is contained in:
commit
c57669a39f
9 changed files with 1884 additions and 13 deletions
|
@ -143,10 +143,15 @@ def create_or_update_dynamo_table(connection, module):
|
||||||
read_capacity = module.params.get('read_capacity')
|
read_capacity = module.params.get('read_capacity')
|
||||||
write_capacity = module.params.get('write_capacity')
|
write_capacity = module.params.get('write_capacity')
|
||||||
|
|
||||||
schema = [
|
if range_key_name:
|
||||||
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
|
schema = [
|
||||||
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
|
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type)),
|
||||||
]
|
RangeKey(range_key_name, DYNAMO_TYPE_MAP.get(range_key_type))
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
schema = [
|
||||||
|
HashKey(hash_key_name, DYNAMO_TYPE_MAP.get(hash_key_type))
|
||||||
|
]
|
||||||
throughput = {
|
throughput = {
|
||||||
'read': read_capacity,
|
'read': read_capacity,
|
||||||
'write': write_capacity
|
'write': write_capacity
|
||||||
|
|
|
@ -133,7 +133,7 @@ def main():
|
||||||
|
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -20,10 +20,10 @@ short_description: Manage s3 buckets in AWS
|
||||||
description:
|
description:
|
||||||
- Manage s3 buckets in AWS
|
- Manage s3 buckets in AWS
|
||||||
version_added: "2.0"
|
version_added: "2.0"
|
||||||
author: Rob White (@wimnat)
|
author: "Rob White (@wimnat)"
|
||||||
options:
|
options:
|
||||||
force:
|
force:
|
||||||
description:
|
description:
|
||||||
- When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
|
- When trying to delete a bucket, delete all keys in the bucket first (an s3 bucket must be empty for a successful deletion)
|
||||||
required: false
|
required: false
|
||||||
default: no
|
default: no
|
||||||
|
@ -40,11 +40,12 @@ options:
|
||||||
default: null
|
default: null
|
||||||
region:
|
region:
|
||||||
description:
|
description:
|
||||||
- AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard.
|
- AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard.
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
s3_url:
|
s3_url:
|
||||||
description: S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
description:
|
||||||
|
- S3 URL endpoint for usage with Eucalypus, fakes3, etc. Otherwise assumes AWS
|
||||||
default: null
|
default: null
|
||||||
aliases: [ S3_URL ]
|
aliases: [ S3_URL ]
|
||||||
requester_pays:
|
requester_pays:
|
||||||
|
@ -65,12 +66,12 @@ options:
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
versioning:
|
versioning:
|
||||||
description:
|
description:
|
||||||
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
|
- Whether versioning is enabled or disabled (note that once versioning is enabled, it can only be suspended)
|
||||||
required: false
|
required: false
|
||||||
default: no
|
default: no
|
||||||
choices: [ 'yes', 'no' ]
|
choices: [ 'yes', 'no' ]
|
||||||
|
|
||||||
extends_documentation_fragment: aws
|
extends_documentation_fragment: aws
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
@ -387,4 +388,4 @@ from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
421
cloud/amazon/s3_lifecycle.py
Normal file
421
cloud/amazon/s3_lifecycle.py
Normal file
|
@ -0,0 +1,421 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
# This is a free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This Ansible library is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this library. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: s3_lifecycle
|
||||||
|
short_description: Manage s3 bucket lifecycle rules in AWS
|
||||||
|
description:
|
||||||
|
- Manage s3 bucket lifecycle rules in AWS
|
||||||
|
version_added: "2.0"
|
||||||
|
author: Rob White (@wimnat)
|
||||||
|
notes:
|
||||||
|
- If specifying expiration time as days then transition time must also be specified in days
|
||||||
|
- If specifying expiration time as a date then transition time must also be specified as a date
|
||||||
|
requirements:
|
||||||
|
- python-dateutil
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- "Name of the s3 bucket"
|
||||||
|
required: true
|
||||||
|
expiration_date:
|
||||||
|
description:
|
||||||
|
- "Indicates the lifetime of the objects that are subject to the rule by the date they will expire. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
expiration_days:
|
||||||
|
description:
|
||||||
|
- "Indicates the lifetime, in days, of the objects that are subject to the rule. The value must be a non-zero positive integer."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
prefix:
|
||||||
|
description:
|
||||||
|
- "Prefix identifying one or more objects to which the rule applies. If no prefix is specified, the rule will apply to the whole bucket."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
region:
|
||||||
|
description:
|
||||||
|
- "AWS region to create the bucket in. If not set then the value of the AWS_REGION and EC2_REGION environment variables are checked, followed by the aws_region and ec2_region settings in the Boto config file. If none of those are set the region defaults to the S3 Location: US Standard."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
rule_id:
|
||||||
|
description:
|
||||||
|
- "Unique identifier for the rule. The value cannot be longer than 255 characters. A unique value for the rule will be generated if no value is provided."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- "Create or remove the lifecycle rule"
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: [ 'present', 'absent' ]
|
||||||
|
status:
|
||||||
|
description:
|
||||||
|
- "If 'enabled', the rule is currently being applied. If 'disabled', the rule is not currently being applied."
|
||||||
|
required: false
|
||||||
|
default: enabled
|
||||||
|
choices: [ 'enabled', 'disabled' ]
|
||||||
|
storage_class:
|
||||||
|
description:
|
||||||
|
- "The storage class to transition to. Currently there is only one valid value - 'glacier'."
|
||||||
|
required: false
|
||||||
|
default: glacier
|
||||||
|
choices: [ 'glacier' ]
|
||||||
|
transition_date:
|
||||||
|
description:
|
||||||
|
- "Indicates the lifetime of the objects that are subject to the rule by the date they will transition to a different storage class. The value must be ISO-8601 format, the time must be midnight and a GMT timezone must be specified. If transition_days is not specified, this parameter is required."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
transition_days:
|
||||||
|
description:
|
||||||
|
- "Indicates when, in days, an object transitions to a different storage class. If transition_date is not specified, this parameter is required."
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
|
||||||
|
extends_documentation_fragment: aws
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Note: These examples do not set authentication details, see the AWS Guide for details.
|
||||||
|
|
||||||
|
# Configure a lifecycle rule on a bucket to expire (delete) items with a prefix of /logs/ after 30 days
|
||||||
|
- s3_lifecycle:
|
||||||
|
name: mybucket
|
||||||
|
expiration_days: 30
|
||||||
|
prefix: /logs/
|
||||||
|
status: enabled
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier after 7 days and then delete after 90 days
|
||||||
|
- s3_lifecycle:
|
||||||
|
name: mybucket
|
||||||
|
transition_days: 7
|
||||||
|
expiration_days: 90
|
||||||
|
prefix: /logs/
|
||||||
|
status: enabled
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# Configure a lifecycle rule to transition all items with a prefix of /logs/ to glacier on 31 Dec 2020 and then delete on 31 Dec 2030. Note that midnight GMT must be specified.
|
||||||
|
# Be sure to quote your date strings
|
||||||
|
- s3_lifecycle:
|
||||||
|
name: mybucket
|
||||||
|
transition_date: "2020-12-30T00:00:00.000Z"
|
||||||
|
expiration_date: "2030-12-30T00:00:00.000Z"
|
||||||
|
prefix: /logs/
|
||||||
|
status: enabled
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# Disable the rule created above
|
||||||
|
- s3_lifecycle:
|
||||||
|
name: mybucket
|
||||||
|
prefix: /logs/
|
||||||
|
status: disabled
|
||||||
|
state: present
|
||||||
|
|
||||||
|
# Delete the lifecycle rule created above
|
||||||
|
- s3_lifecycle:
|
||||||
|
name: mybucket
|
||||||
|
prefix: /logs/
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
import copy
|
||||||
|
import datetime
|
||||||
|
|
||||||
|
try:
|
||||||
|
import dateutil.parser
|
||||||
|
HAS_DATEUTIL = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_DATEUTIL = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import boto.ec2
|
||||||
|
from boto.s3.connection import OrdinaryCallingFormat, Location
|
||||||
|
from boto.s3.lifecycle import Lifecycle, Rule, Expiration, Transition
|
||||||
|
from boto.exception import BotoServerError, S3CreateError, S3ResponseError
|
||||||
|
HAS_BOTO = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTO = False
|
||||||
|
|
||||||
|
def create_lifecycle_rule(connection, module):
|
||||||
|
|
||||||
|
name = module.params.get("name")
|
||||||
|
expiration_date = module.params.get("expiration_date")
|
||||||
|
expiration_days = module.params.get("expiration_days")
|
||||||
|
prefix = module.params.get("prefix")
|
||||||
|
rule_id = module.params.get("rule_id")
|
||||||
|
status = module.params.get("status")
|
||||||
|
storage_class = module.params.get("storage_class")
|
||||||
|
transition_date = module.params.get("transition_date")
|
||||||
|
transition_days = module.params.get("transition_days")
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
bucket = connection.get_bucket(name)
|
||||||
|
except S3ResponseError, e:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
# Get the bucket's current lifecycle rules
|
||||||
|
try:
|
||||||
|
current_lifecycle_obj = bucket.get_lifecycle_config()
|
||||||
|
except S3ResponseError, e:
|
||||||
|
if e.error_code == "NoSuchLifecycleConfiguration":
|
||||||
|
current_lifecycle_obj = Lifecycle()
|
||||||
|
else:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
# Create expiration
|
||||||
|
if expiration_days is not None:
|
||||||
|
expiration_obj = Expiration(days=expiration_days)
|
||||||
|
elif expiration_date is not None:
|
||||||
|
expiration_obj = Expiration(date=expiration_date)
|
||||||
|
else:
|
||||||
|
expiration_obj = None
|
||||||
|
|
||||||
|
# Create transition
|
||||||
|
if transition_days is not None:
|
||||||
|
transition_obj = Transition(days=transition_days, storage_class=storage_class.upper())
|
||||||
|
elif transition_date is not None:
|
||||||
|
transition_obj = Transition(date=transition_date, storage_class=storage_class.upper())
|
||||||
|
else:
|
||||||
|
transition_obj = None
|
||||||
|
|
||||||
|
# Create rule
|
||||||
|
rule = Rule(rule_id, prefix, status.title(), expiration_obj, transition_obj)
|
||||||
|
|
||||||
|
# Create lifecycle
|
||||||
|
lifecycle_obj = Lifecycle()
|
||||||
|
|
||||||
|
appended = False
|
||||||
|
# If current_lifecycle_obj is not None then we have rules to compare, otherwise just add the rule
|
||||||
|
if current_lifecycle_obj:
|
||||||
|
# If rule ID exists, use that for comparison otherwise compare based on prefix
|
||||||
|
for existing_rule in current_lifecycle_obj:
|
||||||
|
if rule.id == existing_rule.id:
|
||||||
|
if compare_rule(rule, existing_rule):
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
appended = True
|
||||||
|
else:
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
changed = True
|
||||||
|
appended = True
|
||||||
|
elif rule.prefix == existing_rule.prefix:
|
||||||
|
existing_rule.id = None
|
||||||
|
if compare_rule(rule, existing_rule):
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
appended = True
|
||||||
|
else:
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
changed = True
|
||||||
|
appended = True
|
||||||
|
# If nothing appended then append now as the rule must not exist
|
||||||
|
if not appended:
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
lifecycle_obj.append(rule)
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
# Write lifecycle to bucket
|
||||||
|
try:
|
||||||
|
bucket.configure_lifecycle(lifecycle_obj)
|
||||||
|
except S3ResponseError, e:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
def compare_rule(rule_a, rule_b):
|
||||||
|
|
||||||
|
# Copy objects
|
||||||
|
rule1 = copy.deepcopy(rule_a)
|
||||||
|
rule2 = copy.deepcopy(rule_b)
|
||||||
|
|
||||||
|
# Delete Rule from Rule
|
||||||
|
try:
|
||||||
|
del rule1.Rule
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
try:
|
||||||
|
del rule2.Rule
|
||||||
|
except AttributeError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Extract Expiration and Transition objects
|
||||||
|
rule1_expiration = rule1.expiration
|
||||||
|
rule1_transition = rule1.transition
|
||||||
|
rule2_expiration = rule2.expiration
|
||||||
|
rule2_transition = rule2.transition
|
||||||
|
|
||||||
|
# Delete the Expiration and Transition objects from the Rule objects
|
||||||
|
del rule1.expiration
|
||||||
|
del rule1.transition
|
||||||
|
del rule2.expiration
|
||||||
|
del rule2.transition
|
||||||
|
|
||||||
|
# Compare
|
||||||
|
if rule1_transition is None:
|
||||||
|
rule1_transition = Transition()
|
||||||
|
if rule2_transition is None:
|
||||||
|
rule2_transition = Transition()
|
||||||
|
if rule1_expiration is None:
|
||||||
|
rule1_expiration = Expiration()
|
||||||
|
if rule2_expiration is None:
|
||||||
|
rule2_expiration = Expiration()
|
||||||
|
|
||||||
|
if (rule1.__dict__ == rule2.__dict__) and (rule1_expiration.__dict__ == rule2_expiration.__dict__) and (rule1_transition.__dict__ == rule2_transition.__dict__):
|
||||||
|
return True
|
||||||
|
else:
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def destroy_lifecycle_rule(connection, module):
|
||||||
|
|
||||||
|
name = module.params.get("name")
|
||||||
|
prefix = module.params.get("prefix")
|
||||||
|
rule_id = module.params.get("rule_id")
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
if prefix is None:
|
||||||
|
prefix = ""
|
||||||
|
|
||||||
|
try:
|
||||||
|
bucket = connection.get_bucket(name)
|
||||||
|
except S3ResponseError, e:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
# Get the bucket's current lifecycle rules
|
||||||
|
try:
|
||||||
|
current_lifecycle_obj = bucket.get_lifecycle_config()
|
||||||
|
except S3ResponseError, e:
|
||||||
|
if e.error_code == "NoSuchLifecycleConfiguration":
|
||||||
|
module.exit_json(changed=changed)
|
||||||
|
else:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
# Create lifecycle
|
||||||
|
lifecycle_obj = Lifecycle()
|
||||||
|
|
||||||
|
# Check if rule exists
|
||||||
|
# If an ID exists, use that otherwise compare based on prefix
|
||||||
|
if rule_id is not None:
|
||||||
|
for existing_rule in current_lifecycle_obj:
|
||||||
|
if rule_id == existing_rule.id:
|
||||||
|
# We're not keeping the rule (i.e. deleting) so mark as changed
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
lifecycle_obj.append(existing_rule)
|
||||||
|
else:
|
||||||
|
for existing_rule in current_lifecycle_obj:
|
||||||
|
if prefix == existing_rule.prefix:
|
||||||
|
# We're not keeping the rule (i.e. deleting) so mark as changed
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
lifecycle_obj.append(existing_rule)
|
||||||
|
|
||||||
|
|
||||||
|
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
|
||||||
|
try:
|
||||||
|
if lifecycle_obj:
|
||||||
|
bucket.configure_lifecycle(lifecycle_obj)
|
||||||
|
else:
|
||||||
|
bucket.delete_lifecycle_configuration()
|
||||||
|
except BotoServerError, e:
|
||||||
|
module.fail_json(msg=e.message)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
argument_spec = ec2_argument_spec()
|
||||||
|
argument_spec.update(
|
||||||
|
dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
expiration_days = dict(default=None, required=False, type='int'),
|
||||||
|
expiration_date = dict(default=None, required=False, type='str'),
|
||||||
|
prefix = dict(default=None, required=False),
|
||||||
|
requester_pays = dict(default='no', type='bool'),
|
||||||
|
rule_id = dict(required=False),
|
||||||
|
state = dict(default='present', choices=['present', 'absent']),
|
||||||
|
status = dict(default='enabled', choices=['enabled', 'disabled']),
|
||||||
|
storage_class = dict(default='glacier', choices=['glacier']),
|
||||||
|
transition_days = dict(default=None, required=False, type='int'),
|
||||||
|
transition_date = dict(default=None, required=False, type='str')
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec=argument_spec,
|
||||||
|
mutually_exclusive = [
|
||||||
|
[ 'expiration_days', 'expiration_date' ],
|
||||||
|
[ 'expiration_days', 'transition_date' ],
|
||||||
|
[ 'transition_days', 'transition_date' ],
|
||||||
|
[ 'transition_days', 'expiration_date' ]
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_BOTO:
|
||||||
|
module.fail_json(msg='boto required for this module')
|
||||||
|
|
||||||
|
if not HAS_DATEUTIL:
|
||||||
|
module.fail_json(msg='dateutil required for this module')
|
||||||
|
|
||||||
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
|
||||||
|
if region in ('us-east-1', '', None):
|
||||||
|
# S3ism for the US Standard region
|
||||||
|
location = Location.DEFAULT
|
||||||
|
else:
|
||||||
|
# Boto uses symbolic names for locations but region strings will
|
||||||
|
# actually work fine for everything except us-east-1 (US Standard)
|
||||||
|
location = region
|
||||||
|
try:
|
||||||
|
connection = boto.s3.connect_to_region(location, is_secure=True, calling_format=OrdinaryCallingFormat(), **aws_connect_params)
|
||||||
|
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||||
|
if connection is None:
|
||||||
|
connection = boto.connect_s3(**aws_connect_params)
|
||||||
|
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
expiration_date = module.params.get("expiration_date")
|
||||||
|
transition_date = module.params.get("transition_date")
|
||||||
|
state = module.params.get("state")
|
||||||
|
|
||||||
|
# If expiration_date set, check string is valid
|
||||||
|
if expiration_date is not None:
|
||||||
|
try:
|
||||||
|
datetime.datetime.strptime(expiration_date, "%Y-%m-%dT%H:%M:%S.000Z")
|
||||||
|
except ValueError, e:
|
||||||
|
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
||||||
|
|
||||||
|
if transition_date is not None:
|
||||||
|
try:
|
||||||
|
datetime.datetime.strptime(transition_date, "%Y-%m-%dT%H:%M:%S.000Z")
|
||||||
|
except ValueError, e:
|
||||||
|
module.fail_json(msg="expiration_date is not a valid ISO-8601 format. The time must be midnight and a timezone of GMT must be included")
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
create_lifecycle_rule(connection, module)
|
||||||
|
elif state == 'absent':
|
||||||
|
destroy_lifecycle_rule(connection, module)
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -53,6 +53,21 @@ options:
|
||||||
- If not set, first found service offering is used.
|
- If not set, first found service offering is used.
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
|
cpu:
|
||||||
|
description:
|
||||||
|
- The number of CPUs to allocate to the instance, used with custom service offerings
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
cpu_speed:
|
||||||
|
description:
|
||||||
|
- The clock speed/shares allocated to the instance, used with custom service offerings
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
memory:
|
||||||
|
description:
|
||||||
|
- The memory allocated to the instance, used with custom service offerings
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
template:
|
template:
|
||||||
description:
|
description:
|
||||||
- Name or id of the template to be used for creating the new instance.
|
- Name or id of the template to be used for creating the new instance.
|
||||||
|
@ -547,6 +562,18 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
||||||
user_data = base64.b64encode(user_data)
|
user_data = base64.b64encode(user_data)
|
||||||
return user_data
|
return user_data
|
||||||
|
|
||||||
|
def get_details(self):
|
||||||
|
res = None
|
||||||
|
cpu = self.module.params.get('cpu')
|
||||||
|
cpu_speed = self.module.params.get('cpu_speed')
|
||||||
|
memory = self.module.params.get('memory')
|
||||||
|
if all([cpu, cpu_speed, memory]):
|
||||||
|
res = [{
|
||||||
|
'cpuNumber': cpu,
|
||||||
|
'cpuSpeed': cpu_speed,
|
||||||
|
'memory': memory,
|
||||||
|
}]
|
||||||
|
return res
|
||||||
|
|
||||||
def deploy_instance(self, start_vm=True):
|
def deploy_instance(self, start_vm=True):
|
||||||
self.result['changed'] = True
|
self.result['changed'] = True
|
||||||
|
@ -577,6 +604,7 @@ class AnsibleCloudStackInstance(AnsibleCloudStack):
|
||||||
args['rootdisksize'] = self.module.params.get('root_disk_size')
|
args['rootdisksize'] = self.module.params.get('root_disk_size')
|
||||||
args['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
|
args['securitygroupnames'] = ','.join(self.module.params.get('security_groups'))
|
||||||
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
|
args['affinitygroupnames'] = ','.join(self.module.params.get('affinity_groups'))
|
||||||
|
args['details'] = self.get_details()
|
||||||
|
|
||||||
template_iso = self.get_template_or_iso()
|
template_iso = self.get_template_or_iso()
|
||||||
if 'hypervisor' not in template_iso:
|
if 'hypervisor' not in template_iso:
|
||||||
|
@ -798,6 +826,9 @@ def main():
|
||||||
group = dict(default=None),
|
group = dict(default=None),
|
||||||
state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'),
|
state = dict(choices=['present', 'deployed', 'started', 'stopped', 'restarted', 'absent', 'destroyed', 'expunged'], default='present'),
|
||||||
service_offering = dict(default=None),
|
service_offering = dict(default=None),
|
||||||
|
cpu = dict(default=None, type='int'),
|
||||||
|
cpu_speed = dict(default=None, type='int'),
|
||||||
|
memory = dict(default=None, type='int'),
|
||||||
template = dict(default=None),
|
template = dict(default=None),
|
||||||
iso = dict(default=None),
|
iso = dict(default=None),
|
||||||
networks = dict(type='list', aliases=[ 'network' ], default=None),
|
networks = dict(type='list', aliases=[ 'network' ], default=None),
|
||||||
|
@ -832,6 +863,7 @@ def main():
|
||||||
),
|
),
|
||||||
required_together = (
|
required_together = (
|
||||||
['api_key', 'api_secret', 'api_url'],
|
['api_key', 'api_secret', 'api_url'],
|
||||||
|
['cpu', 'cpu_speed', 'memory'],
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
)
|
)
|
||||||
|
|
|
@ -37,6 +37,13 @@ options:
|
||||||
required: false
|
required: false
|
||||||
default: no
|
default: no
|
||||||
choices: [ "yes", "no" ]
|
choices: [ "yes", "no" ]
|
||||||
|
production:
|
||||||
|
description:
|
||||||
|
- Install with --production flag
|
||||||
|
required: false
|
||||||
|
default: no
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
version_added: "2.0"
|
||||||
path:
|
path:
|
||||||
description:
|
description:
|
||||||
- The base path where to install the bower packages
|
- The base path where to install the bower packages
|
||||||
|
@ -76,6 +83,7 @@ class Bower(object):
|
||||||
self.module = module
|
self.module = module
|
||||||
self.name = kwargs['name']
|
self.name = kwargs['name']
|
||||||
self.offline = kwargs['offline']
|
self.offline = kwargs['offline']
|
||||||
|
self.production = kwargs['production']
|
||||||
self.path = kwargs['path']
|
self.path = kwargs['path']
|
||||||
self.version = kwargs['version']
|
self.version = kwargs['version']
|
||||||
|
|
||||||
|
@ -94,6 +102,9 @@ class Bower(object):
|
||||||
if self.offline:
|
if self.offline:
|
||||||
cmd.append('--offline')
|
cmd.append('--offline')
|
||||||
|
|
||||||
|
if self.production:
|
||||||
|
cmd.append('--production')
|
||||||
|
|
||||||
# If path is specified, cd into that path and run the command.
|
# If path is specified, cd into that path and run the command.
|
||||||
cwd = None
|
cwd = None
|
||||||
if self.path:
|
if self.path:
|
||||||
|
@ -148,6 +159,7 @@ def main():
|
||||||
arg_spec = dict(
|
arg_spec = dict(
|
||||||
name=dict(default=None),
|
name=dict(default=None),
|
||||||
offline=dict(default='no', type='bool'),
|
offline=dict(default='no', type='bool'),
|
||||||
|
production=dict(default='no', type='bool'),
|
||||||
path=dict(required=True),
|
path=dict(required=True),
|
||||||
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
|
state=dict(default='present', choices=['present', 'absent', 'latest', ]),
|
||||||
version=dict(default=None),
|
version=dict(default=None),
|
||||||
|
@ -158,6 +170,7 @@ def main():
|
||||||
|
|
||||||
name = module.params['name']
|
name = module.params['name']
|
||||||
offline = module.params['offline']
|
offline = module.params['offline']
|
||||||
|
production = module.params['production']
|
||||||
path = os.path.expanduser(module.params['path'])
|
path = os.path.expanduser(module.params['path'])
|
||||||
state = module.params['state']
|
state = module.params['state']
|
||||||
version = module.params['version']
|
version = module.params['version']
|
||||||
|
@ -165,7 +178,7 @@ def main():
|
||||||
if state == 'absent' and not name:
|
if state == 'absent' and not name:
|
||||||
module.fail_json(msg='uninstalling a package is only available for named packages')
|
module.fail_json(msg='uninstalling a package is only available for named packages')
|
||||||
|
|
||||||
bower = Bower(module, name=name, offline=offline, path=path, version=version)
|
bower = Bower(module, name=name, offline=offline, production=production, path=path, version=version)
|
||||||
|
|
||||||
changed = False
|
changed = False
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
|
|
|
@ -52,6 +52,13 @@ LOCALE_NORMALIZATION = {
|
||||||
".utf8": ".UTF-8",
|
".utf8": ".UTF-8",
|
||||||
".eucjp": ".EUC-JP",
|
".eucjp": ".EUC-JP",
|
||||||
".iso885915": ".ISO-8859-15",
|
".iso885915": ".ISO-8859-15",
|
||||||
|
".cp1251": ".CP1251",
|
||||||
|
".koi8r": ".KOI8-R",
|
||||||
|
".armscii8": ".ARMSCII-8",
|
||||||
|
".euckr": ".EUC-KR",
|
||||||
|
".gbk": ".GBK",
|
||||||
|
".gb18030": ".GB18030",
|
||||||
|
".euctw": ".EUC-TW",
|
||||||
}
|
}
|
||||||
|
|
||||||
# ===========================================
|
# ===========================================
|
||||||
|
|
1305
windows/win_package.ps1
Normal file
1305
windows/win_package.ps1
Normal file
File diff suppressed because it is too large
Load diff
87
windows/win_package.py
Normal file
87
windows/win_package.py
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2014, Trond Hindenes <trond@hindenes.com>, and others
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
# this is a windows documentation stub. actual code lives in the .ps1
|
||||||
|
# file of the same name
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: win_package
|
||||||
|
version_added: "1.7"
|
||||||
|
short_description: Installs/Uninstalls a installable package, either from local file system or url
|
||||||
|
description:
|
||||||
|
- Installs or uninstalls a package
|
||||||
|
options:
|
||||||
|
path:
|
||||||
|
description:
|
||||||
|
- Location of the package to be installed (either on file system, network share or url)
|
||||||
|
required: true
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- name of the package. Just for logging reasons, will use the value of path if name isn't specified
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
product_id:
|
||||||
|
description:
|
||||||
|
- product id of the installed package (used for checking if already installed)
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
arguments:
|
||||||
|
description:
|
||||||
|
- Any arguments the installer needs
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Install or Uninstall
|
||||||
|
choices:
|
||||||
|
- present
|
||||||
|
- absent
|
||||||
|
default: present
|
||||||
|
aliases: [ensure]
|
||||||
|
user_name:
|
||||||
|
description:
|
||||||
|
- Username of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_password for this to function properly.
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
user_password:
|
||||||
|
description:
|
||||||
|
- Password of an account with access to the package if its located on a file share. Only needed if the winrm user doesn't have access to the package. Also specify user_name for this to function properly.
|
||||||
|
default: null
|
||||||
|
aliases: []
|
||||||
|
author: Trond Hindenes
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Playbook example
|
||||||
|
- name: Install the vc thingy
|
||||||
|
win_package:
|
||||||
|
name="Microsoft Visual C thingy"
|
||||||
|
path="http://download.microsoft.com/download/1/6/B/16B06F60-3B20-4FF2-B699-5E9B7962F9AE/VSU_4/vcredist_x64.exe"
|
||||||
|
ProductId="{CF2BEA3C-26EA-32F8-AA9B-331F7E34BA97}"
|
||||||
|
Arguments="/install /passive /norestart"
|
||||||
|
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
Loading…
Reference in a new issue