Merge remote-tracking branch 'refs/remotes/ansible/devel' into devel
This commit is contained in:
commit
a550644d9f
47 changed files with 2788 additions and 237 deletions
|
@ -49,6 +49,8 @@ Docker: @cove @joshuaconner @softzilla @smashwilson
|
||||||
|
|
||||||
Red Hat Network: @barnabycourt @vritant @flossware
|
Red Hat Network: @barnabycourt @vritant @flossware
|
||||||
|
|
||||||
|
Zabbix: @cove @harrisongu @abulimov
|
||||||
|
|
||||||
PR Process
|
PR Process
|
||||||
=======
|
=======
|
||||||
|
|
||||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
||||||
2.0.0-0.4.beta2
|
2.0.0-0.5.beta3
|
||||||
|
|
|
@ -268,8 +268,7 @@ def main():
|
||||||
|
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
|
connection = connect_to_aws(boto.dynamodb2, region, **aws_connect_params)
|
||||||
|
except (NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
except (NoAuthHandlerFound, StandardError), e:
|
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
|
|
@ -184,7 +184,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2.elb, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -194,4 +194,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -195,9 +195,6 @@ def create_eni(connection, module):
|
||||||
instance_id = module.params.get("instance_id")
|
instance_id = module.params.get("instance_id")
|
||||||
if instance_id == 'None':
|
if instance_id == 'None':
|
||||||
instance_id = None
|
instance_id = None
|
||||||
do_detach = True
|
|
||||||
else:
|
|
||||||
do_detach = False
|
|
||||||
device_index = module.params.get("device_index")
|
device_index = module.params.get("device_index")
|
||||||
subnet_id = module.params.get('subnet_id')
|
subnet_id = module.params.get('subnet_id')
|
||||||
private_ip_address = module.params.get('private_ip_address')
|
private_ip_address = module.params.get('private_ip_address')
|
||||||
|
@ -212,7 +209,7 @@ def create_eni(connection, module):
|
||||||
if instance_id is not None:
|
if instance_id is not None:
|
||||||
try:
|
try:
|
||||||
eni.attach(instance_id, device_index)
|
eni.attach(instance_id, device_index)
|
||||||
except BotoServerError as ex:
|
except BotoServerError:
|
||||||
eni.delete()
|
eni.delete()
|
||||||
raise
|
raise
|
||||||
# Wait to allow creation / attachment to finish
|
# Wait to allow creation / attachment to finish
|
||||||
|
@ -236,8 +233,6 @@ def modify_eni(connection, module):
|
||||||
else:
|
else:
|
||||||
do_detach = False
|
do_detach = False
|
||||||
device_index = module.params.get("device_index")
|
device_index = module.params.get("device_index")
|
||||||
subnet_id = module.params.get('subnet_id')
|
|
||||||
private_ip_address = module.params.get('private_ip_address')
|
|
||||||
description = module.params.get('description')
|
description = module.params.get('description')
|
||||||
security_groups = module.params.get('security_groups')
|
security_groups = module.params.get('security_groups')
|
||||||
force_detach = module.params.get("force_detach")
|
force_detach = module.params.get("force_detach")
|
||||||
|
@ -245,7 +240,6 @@ def modify_eni(connection, module):
|
||||||
delete_on_termination = module.params.get("delete_on_termination")
|
delete_on_termination = module.params.get("delete_on_termination")
|
||||||
changed = False
|
changed = False
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Get the eni with the eni_id specified
|
# Get the eni with the eni_id specified
|
||||||
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
eni_result_set = connection.get_all_network_interfaces(eni_id)
|
||||||
|
@ -376,7 +370,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -403,4 +397,5 @@ from ansible.module_utils.ec2 import *
|
||||||
# this is magic, see lib/ansible/module_common.py
|
# this is magic, see lib/ansible/module_common.py
|
||||||
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -113,7 +113,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
|
@ -77,6 +77,12 @@ def get_instance_info(instance):
|
||||||
for interface in instance.interfaces:
|
for interface in instance.interfaces:
|
||||||
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
|
interfaces.append({ 'id': interface.id, 'mac_address': interface.mac_address }.copy())
|
||||||
|
|
||||||
|
# If an instance is terminated, sourceDestCheck is no longer returned
|
||||||
|
try:
|
||||||
|
source_dest_check = instance.sourceDestCheck
|
||||||
|
except AttributeError:
|
||||||
|
source_dest_check = None
|
||||||
|
|
||||||
instance_info = { 'id': instance.id,
|
instance_info = { 'id': instance.id,
|
||||||
'kernel': instance.kernel,
|
'kernel': instance.kernel,
|
||||||
'instance_profile': instance.instance_profile,
|
'instance_profile': instance.instance_profile,
|
||||||
|
@ -90,7 +96,7 @@ def get_instance_info(instance):
|
||||||
'ramdisk': instance.ramdisk,
|
'ramdisk': instance.ramdisk,
|
||||||
'tags': instance.tags,
|
'tags': instance.tags,
|
||||||
'key_name': instance.key_name,
|
'key_name': instance.key_name,
|
||||||
'source_destination_check': instance.sourceDestCheck,
|
'source_destination_check': source_dest_check,
|
||||||
'image_id': instance.image_id,
|
'image_id': instance.image_id,
|
||||||
'groups': groups,
|
'groups': groups,
|
||||||
'interfaces': interfaces,
|
'interfaces': interfaces,
|
||||||
|
@ -148,7 +154,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
connection = connect_to_aws(boto.ec2, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -161,4 +167,3 @@ from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
|
@ -134,7 +134,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
|
@ -571,7 +571,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -598,4 +598,3 @@ from ansible.module_utils.ec2 import * # noqa
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
|
@ -164,7 +164,7 @@ def ensure_tags(vpc_conn, resource_id, tags, add_only, check_mode):
|
||||||
if to_delete and not add_only:
|
if to_delete and not add_only:
|
||||||
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
|
vpc_conn.delete_tags(resource_id, to_delete, dry_run=check_mode)
|
||||||
|
|
||||||
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags)
|
to_add = dict((k, tags[k]) for k in tags if k not in cur_tags or cur_tags[k] != tags[k])
|
||||||
if to_add:
|
if to_add:
|
||||||
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
|
vpc_conn.create_tags(resource_id, to_add, dry_run=check_mode)
|
||||||
|
|
||||||
|
@ -242,7 +242,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -270,4 +270,3 @@ from ansible.module_utils.ec2 import * # noqa
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
main()
|
main()
|
||||||
|
|
||||||
|
|
|
@ -111,7 +111,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
connection = connect_to_aws(boto.vpc, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
|
|
@ -25,7 +25,7 @@ description:
|
||||||
- Creates or terminates ecs clusters.
|
- Creates or terminates ecs clusters.
|
||||||
version_added: "2.0"
|
version_added: "2.0"
|
||||||
author: Mark Chance(@Java1Guy)
|
author: Mark Chance(@Java1Guy)
|
||||||
requirements: [ json, time, boto, boto3 ]
|
requirements: [ boto, boto3 ]
|
||||||
options:
|
options:
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
|
@ -100,8 +100,9 @@ status:
|
||||||
returned: ACTIVE
|
returned: ACTIVE
|
||||||
type: string
|
type: string
|
||||||
'''
|
'''
|
||||||
|
import time
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import json, time
|
|
||||||
import boto
|
import boto
|
||||||
HAS_BOTO = True
|
HAS_BOTO = True
|
||||||
except ImportError:
|
except ImportError:
|
||||||
|
@ -147,7 +148,7 @@ class EcsClusterManager:
|
||||||
c = self.find_in_array(response['clusters'], cluster_name)
|
c = self.find_in_array(response['clusters'], cluster_name)
|
||||||
if c:
|
if c:
|
||||||
return c
|
return c
|
||||||
raise StandardError("Unknown problem describing cluster %s." % cluster_name)
|
raise Exception("Unknown problem describing cluster %s." % cluster_name)
|
||||||
|
|
||||||
def create_cluster(self, clusterName = 'default'):
|
def create_cluster(self, clusterName = 'default'):
|
||||||
response = self.ecs.create_cluster(clusterName=clusterName)
|
response = self.ecs.create_cluster(clusterName=clusterName)
|
||||||
|
@ -175,8 +176,6 @@ def main():
|
||||||
if not HAS_BOTO3:
|
if not HAS_BOTO3:
|
||||||
module.fail_json(msg='boto3 is required.')
|
module.fail_json(msg='boto3 is required.')
|
||||||
|
|
||||||
cluster_name = module.params['name']
|
|
||||||
|
|
||||||
cluster_mgr = EcsClusterManager(module)
|
cluster_mgr = EcsClusterManager(module)
|
||||||
try:
|
try:
|
||||||
existing = cluster_mgr.describe_cluster(module.params['name'])
|
existing = cluster_mgr.describe_cluster(module.params['name'])
|
||||||
|
|
434
cloud/amazon/route53_facts.py
Normal file
434
cloud/amazon/route53_facts.py
Normal file
|
@ -0,0 +1,434 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
module: route53_facts
|
||||||
|
short_description: Retrieves route53 details using AWS methods
|
||||||
|
description:
|
||||||
|
- Gets various details related to Route53 zone, record set or health check details
|
||||||
|
version_added: "2.0"
|
||||||
|
options:
|
||||||
|
query:
|
||||||
|
description:
|
||||||
|
- specifies the query action to take
|
||||||
|
required: True
|
||||||
|
choices: [
|
||||||
|
'change',
|
||||||
|
'checker_ip_range',
|
||||||
|
'health_check',
|
||||||
|
'hosted_zone',
|
||||||
|
'record_sets',
|
||||||
|
'reusable_delegation_set',
|
||||||
|
]
|
||||||
|
change_id:
|
||||||
|
description:
|
||||||
|
- The ID of the change batch request.
|
||||||
|
The value that you specify here is the value that
|
||||||
|
ChangeResourceRecordSets returned in the Id element
|
||||||
|
when you submitted the request.
|
||||||
|
required: false
|
||||||
|
hosted_zone_id:
|
||||||
|
description:
|
||||||
|
- The Hosted Zone ID of the DNS zone
|
||||||
|
required: false
|
||||||
|
max_items:
|
||||||
|
description:
|
||||||
|
- Maximum number of items to return for various get/list requests
|
||||||
|
required: false
|
||||||
|
next_marker:
|
||||||
|
description:
|
||||||
|
- "Some requests such as list_command: hosted_zones will return a maximum
|
||||||
|
number of entries - EG 100. If the number of entries exceeds this maximum
|
||||||
|
another request can be sent using the NextMarker entry from the first response
|
||||||
|
to get the next page of results"
|
||||||
|
required: false
|
||||||
|
delegation_set_id:
|
||||||
|
description:
|
||||||
|
- The DNS Zone delegation set ID
|
||||||
|
required: false
|
||||||
|
start_record_name:
|
||||||
|
description:
|
||||||
|
- "The first name in the lexicographic ordering of domain names that you want
|
||||||
|
the list_command: record_sets to start listing from"
|
||||||
|
required: false
|
||||||
|
type:
|
||||||
|
description:
|
||||||
|
- The type of DNS record
|
||||||
|
required: false
|
||||||
|
choices: [ 'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS' ]
|
||||||
|
dns_name:
|
||||||
|
description:
|
||||||
|
- The first name in the lexicographic ordering of domain names that you want
|
||||||
|
the list_command to start listing from
|
||||||
|
required: false
|
||||||
|
resource_id:
|
||||||
|
description:
|
||||||
|
- The ID/s of the specified resource/s
|
||||||
|
required: false
|
||||||
|
aliases: ['resource_ids']
|
||||||
|
health_check_id:
|
||||||
|
description:
|
||||||
|
- The ID of the health check
|
||||||
|
required: false
|
||||||
|
hosted_zone_method:
|
||||||
|
description:
|
||||||
|
- "This is used in conjunction with query: hosted_zone.
|
||||||
|
It allows for listing details, counts or tags of various
|
||||||
|
hosted zone details."
|
||||||
|
required: false
|
||||||
|
choices: [
|
||||||
|
'details',
|
||||||
|
'list',
|
||||||
|
'list_by_name',
|
||||||
|
'count',
|
||||||
|
'tags',
|
||||||
|
]
|
||||||
|
default: 'list'
|
||||||
|
health_check_method:
|
||||||
|
description:
|
||||||
|
- "This is used in conjunction with query: health_check.
|
||||||
|
It allows for listing details, counts or tags of various
|
||||||
|
health check details."
|
||||||
|
required: false
|
||||||
|
choices: [
|
||||||
|
'list',
|
||||||
|
'details',
|
||||||
|
'status',
|
||||||
|
'failure_reason',
|
||||||
|
'count',
|
||||||
|
'tags',
|
||||||
|
]
|
||||||
|
default: 'list'
|
||||||
|
author: Karen Cheng(@Etherdaemon)
|
||||||
|
extends_documentation_fragment: aws
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Simple example of listing all hosted zones
|
||||||
|
- name: List all hosted zones
|
||||||
|
route53_facts:
|
||||||
|
query: hosted_zone
|
||||||
|
register: hosted_zones
|
||||||
|
|
||||||
|
# Getting a count of hosted zones
|
||||||
|
- name: Return a count of all hosted zones
|
||||||
|
route53_facts:
|
||||||
|
query: hosted_zone
|
||||||
|
hosted_zone_method: count
|
||||||
|
register: hosted_zone_count
|
||||||
|
|
||||||
|
- name: List the first 20 resource record sets in a given hosted zone
|
||||||
|
route53_facts:
|
||||||
|
profile: account_name
|
||||||
|
query: record_sets
|
||||||
|
hosted_zone_id: 'ZZZ1111112222'
|
||||||
|
max_items: 20
|
||||||
|
register: record_sets
|
||||||
|
|
||||||
|
- name: List first 20 health checks
|
||||||
|
route53_facts:
|
||||||
|
query: health_check
|
||||||
|
health_check_method: list
|
||||||
|
max_items: 20
|
||||||
|
register: health_checks
|
||||||
|
|
||||||
|
- name: Get health check last failure_reason
|
||||||
|
route53_facts:
|
||||||
|
query: health_check
|
||||||
|
health_check_method: failure_reason
|
||||||
|
health_check_id: '00000000-1111-2222-3333-12345678abcd'
|
||||||
|
register: health_check_failure_reason
|
||||||
|
|
||||||
|
- name: Retrieve reusable delegation set details
|
||||||
|
route53_facts:
|
||||||
|
query: reusable_delegation_set
|
||||||
|
delegation_set_id: 'delegation id'
|
||||||
|
register: delegation_sets
|
||||||
|
|
||||||
|
'''
|
||||||
|
try:
|
||||||
|
import json
|
||||||
|
import boto
|
||||||
|
import botocore
|
||||||
|
HAS_BOTO = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTO = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import boto3
|
||||||
|
HAS_BOTO3 = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_BOTO3 = False
|
||||||
|
|
||||||
|
|
||||||
|
def get_hosted_zone(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('hosted_zone_id'):
|
||||||
|
params['HostedZoneId'] = module.params.get('hosted_zone_id')
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="Hosted Zone Id is required")
|
||||||
|
|
||||||
|
results = client.get_hosted_zone(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def reusable_delegation_set_details(client, module):
|
||||||
|
params = dict()
|
||||||
|
if not module.params.get('delegation_set_id'):
|
||||||
|
if module.params.get('max_items'):
|
||||||
|
params['MaxItems'] = module.params.get('max_items')
|
||||||
|
|
||||||
|
if module.params.get('next_marker'):
|
||||||
|
params['Marker'] = module.params.get('next_marker')
|
||||||
|
|
||||||
|
results = client.list_reusable_delegation_sets(**params)
|
||||||
|
else:
|
||||||
|
params['DelegationSetId'] = module.params.get('delegation_set_id')
|
||||||
|
results = client.get_reusable_delegation_set(**params)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def list_hosted_zones(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('max_items'):
|
||||||
|
params['MaxItems'] = module.params.get('max_items')
|
||||||
|
|
||||||
|
if module.params.get('next_marker'):
|
||||||
|
params['Marker'] = module.params.get('next_marker')
|
||||||
|
|
||||||
|
if module.params.get('delegation_set_id'):
|
||||||
|
params['DelegationSetId'] = module.params.get('delegation_set_id')
|
||||||
|
|
||||||
|
results = client.list_hosted_zones(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def list_hosted_zones_by_name(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('hosted_zone_id'):
|
||||||
|
params['HostedZoneId'] = module.params.get('hosted_zone_id')
|
||||||
|
|
||||||
|
if module.params.get('dns_name'):
|
||||||
|
params['DNSName'] = module.params.get('dns_name')
|
||||||
|
|
||||||
|
if module.params.get('max_items'):
|
||||||
|
params['MaxItems'] = module.params.get('max_items')
|
||||||
|
|
||||||
|
results = client.list_hosted_zones_by_name(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def change_details(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('change_id'):
|
||||||
|
params['Id'] = module.params.get('change_id')
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="change_id is required")
|
||||||
|
|
||||||
|
results = client.get_change(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def checker_ip_range_details(client, module):
|
||||||
|
results = client.get_checker_ip_ranges()
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_count(client, module):
|
||||||
|
if module.params.get('query') == 'health_check':
|
||||||
|
results = client.get_health_check_count()
|
||||||
|
else:
|
||||||
|
results = client.get_hosted_zone_count()
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_health_check(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if not module.params.get('health_check_id'):
|
||||||
|
module.fail_json(msg="health_check_id is required")
|
||||||
|
else:
|
||||||
|
params['HealthCheckId'] = module.params.get('health_check_id')
|
||||||
|
|
||||||
|
if module.params.get('health_check_method') == 'details':
|
||||||
|
results = client.get_health_check(**params)
|
||||||
|
elif module.params.get('health_check_method') == 'failure_reason':
|
||||||
|
results = client.get_health_check_last_failure_reason(**params)
|
||||||
|
elif module.params.get('health_check_method') == 'status':
|
||||||
|
results = client.get_health_check_status(**params)
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_resource_tags(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('resource_id'):
|
||||||
|
params['ResourceIds'] = module.params.get('resource_id')
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="resource_id or resource_ids is required")
|
||||||
|
|
||||||
|
if module.params.get('query') == 'health_check':
|
||||||
|
params['ResourceType'] = 'healthcheck'
|
||||||
|
else:
|
||||||
|
params['ResourceType'] = 'hostedzone'
|
||||||
|
|
||||||
|
results = client.list_tags_for_resources(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def list_health_checks(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('max_items'):
|
||||||
|
params['MaxItems'] = module.params.get('max_items')
|
||||||
|
|
||||||
|
if module.params.get('next_marker'):
|
||||||
|
params['Marker'] = module.params.get('next_marker')
|
||||||
|
|
||||||
|
results = client.list_health_checks(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def record_sets_details(client, module):
|
||||||
|
params = dict()
|
||||||
|
|
||||||
|
if module.params.get('hosted_zone_id'):
|
||||||
|
params['HostedZoneId'] = module.params.get('hosted_zone_id')
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="Hosted Zone Id is required")
|
||||||
|
|
||||||
|
if module.params.get('start_record_name'):
|
||||||
|
params['StartRecordName'] = module.params.get('start_record_name')
|
||||||
|
|
||||||
|
if module.params.get('type') and not module.params.get('start_record_name'):
|
||||||
|
module.fail_json(msg="start_record_name must be specified if type is set")
|
||||||
|
elif module.params.get('type'):
|
||||||
|
params['StartRecordType'] = module.params.get('type')
|
||||||
|
|
||||||
|
results = client.list_resource_record_sets(**params)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def health_check_details(client, module):
|
||||||
|
health_check_invocations = {
|
||||||
|
'list': list_health_checks,
|
||||||
|
'details': get_health_check,
|
||||||
|
'status': get_health_check,
|
||||||
|
'failure_reason': get_health_check,
|
||||||
|
'count': get_count,
|
||||||
|
'tags': get_resource_tags,
|
||||||
|
}
|
||||||
|
|
||||||
|
results = health_check_invocations[module.params.get('health_check_method')](client, module)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def hosted_zone_details(client, module):
|
||||||
|
hosted_zone_invocations = {
|
||||||
|
'details': get_hosted_zone,
|
||||||
|
'list': list_hosted_zones,
|
||||||
|
'list_by_name': list_hosted_zones_by_name,
|
||||||
|
'count': get_count,
|
||||||
|
'tags': get_resource_tags,
|
||||||
|
}
|
||||||
|
|
||||||
|
results = hosted_zone_invocations[module.params.get('hosted_zone_method')](client, module)
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = ec2_argument_spec()
|
||||||
|
argument_spec.update(dict(
|
||||||
|
query=dict(choices=[
|
||||||
|
'change',
|
||||||
|
'checker_ip_range',
|
||||||
|
'health_check',
|
||||||
|
'hosted_zone',
|
||||||
|
'record_sets',
|
||||||
|
'reusable_delegation_set',
|
||||||
|
], required=True),
|
||||||
|
change_id=dict(),
|
||||||
|
hosted_zone_id=dict(),
|
||||||
|
max_items=dict(type='str'),
|
||||||
|
next_marker=dict(),
|
||||||
|
delegation_set_id=dict(),
|
||||||
|
start_record_name=dict(),
|
||||||
|
type=dict(choices=[
|
||||||
|
'A', 'CNAME', 'MX', 'AAAA', 'TXT', 'PTR', 'SRV', 'SPF', 'NS'
|
||||||
|
]),
|
||||||
|
dns_name=dict(),
|
||||||
|
resource_id=dict(type='list', aliases=['resource_ids']),
|
||||||
|
health_check_id=dict(),
|
||||||
|
hosted_zone_method=dict(choices=[
|
||||||
|
'details',
|
||||||
|
'list',
|
||||||
|
'list_by_name',
|
||||||
|
'count',
|
||||||
|
'tags'
|
||||||
|
], default='list'),
|
||||||
|
health_check_method=dict(choices=[
|
||||||
|
'list',
|
||||||
|
'details',
|
||||||
|
'status',
|
||||||
|
'failure_reason',
|
||||||
|
'count',
|
||||||
|
'tags',
|
||||||
|
], default='list'),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=argument_spec,
|
||||||
|
mutually_exclusive=[
|
||||||
|
['hosted_zone_method', 'health_check_method'],
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
# Validate Requirements
|
||||||
|
if not (HAS_BOTO or HAS_BOTO3):
|
||||||
|
module.fail_json(msg='json and boto/boto3 is required.')
|
||||||
|
|
||||||
|
try:
|
||||||
|
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||||
|
route53 = boto3_conn(module, conn_type='client', resource='route53', region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||||
|
except boto.exception.NoAuthHandlerFound, e:
|
||||||
|
module.fail_json(msg="Can't authorize connection - "+str(e))
|
||||||
|
|
||||||
|
invocations = {
|
||||||
|
'change': change_details,
|
||||||
|
'checker_ip_range': checker_ip_range_details,
|
||||||
|
'health_check': health_check_details,
|
||||||
|
'hosted_zone': hosted_zone_details,
|
||||||
|
'record_sets': record_sets_details,
|
||||||
|
'reusable_delegation_set': reusable_delegation_set_details,
|
||||||
|
}
|
||||||
|
results = invocations[module.params.get('query')](route53, module)
|
||||||
|
|
||||||
|
module.exit_json(**results)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -129,11 +129,10 @@ def create_tags_container(tags):
|
||||||
tags_obj.add_tag_set(tag_set)
|
tags_obj.add_tag_set(tag_set)
|
||||||
return tags_obj
|
return tags_obj
|
||||||
|
|
||||||
def create_bucket(connection, module):
|
def create_bucket(connection, module, location):
|
||||||
|
|
||||||
policy = module.params.get("policy")
|
policy = module.params.get("policy")
|
||||||
name = module.params.get("name")
|
name = module.params.get("name")
|
||||||
region = module.params.get("region")
|
|
||||||
requester_pays = module.params.get("requester_pays")
|
requester_pays = module.params.get("requester_pays")
|
||||||
tags = module.params.get("tags")
|
tags = module.params.get("tags")
|
||||||
versioning = module.params.get("versioning")
|
versioning = module.params.get("versioning")
|
||||||
|
@ -143,7 +142,7 @@ def create_bucket(connection, module):
|
||||||
bucket = connection.get_bucket(name)
|
bucket = connection.get_bucket(name)
|
||||||
except S3ResponseError, e:
|
except S3ResponseError, e:
|
||||||
try:
|
try:
|
||||||
bucket = connection.create_bucket(name, location=region)
|
bucket = connection.create_bucket(name, location=location)
|
||||||
changed = True
|
changed = True
|
||||||
except S3CreateError, e:
|
except S3CreateError, e:
|
||||||
module.fail_json(msg=e.message)
|
module.fail_json(msg=e.message)
|
||||||
|
@ -376,7 +375,7 @@ def main():
|
||||||
state = module.params.get("state")
|
state = module.params.get("state")
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
create_bucket(connection, module)
|
create_bucket(connection, module, location)
|
||||||
elif state == 'absent':
|
elif state == 'absent':
|
||||||
destroy_bucket(connection, module)
|
destroy_bucket(connection, module)
|
||||||
|
|
||||||
|
|
|
@ -324,7 +324,6 @@ def destroy_lifecycle_rule(connection, module):
|
||||||
else:
|
else:
|
||||||
lifecycle_obj.append(existing_rule)
|
lifecycle_obj.append(existing_rule)
|
||||||
|
|
||||||
|
|
||||||
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
|
# Write lifecycle to bucket or, if there no rules left, delete lifecycle configuration
|
||||||
try:
|
try:
|
||||||
if lifecycle_obj:
|
if lifecycle_obj:
|
||||||
|
@ -385,7 +384,7 @@ def main():
|
||||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||||
if connection is None:
|
if connection is None:
|
||||||
connection = boto.connect_s3(**aws_connect_params)
|
connection = boto.connect_s3(**aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
expiration_date = module.params.get("expiration_date")
|
expiration_date = module.params.get("expiration_date")
|
||||||
|
|
|
@ -162,10 +162,9 @@ def main():
|
||||||
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
# use this as fallback because connect_to_region seems to fail in boto + non 'classic' aws accounts in some cases
|
||||||
if connection is None:
|
if connection is None:
|
||||||
connection = boto.connect_s3(**aws_connect_params)
|
connection = boto.connect_s3(**aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
|
||||||
state = module.params.get("state")
|
state = module.params.get("state")
|
||||||
|
|
||||||
if state == 'present':
|
if state == 'present':
|
||||||
|
|
261
cloud/amazon/sns_topic.py
Executable file
261
cloud/amazon/sns_topic.py
Executable file
|
@ -0,0 +1,261 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
module: sns_topic
|
||||||
|
short_description: Manages AWS SNS topics and subscriptions
|
||||||
|
description:
|
||||||
|
- The M(sns_topic) module allows you to create, delete, and manage subscriptions for AWS SNS topics.
|
||||||
|
version_added: 2.0
|
||||||
|
author: "Joel Thompson (@joelthompson)"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- The name or ARN of the SNS topic to converge
|
||||||
|
required: true
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Whether to create or destroy an SNS topic
|
||||||
|
required: false
|
||||||
|
default: present
|
||||||
|
choices: ["absent", "present"]
|
||||||
|
display_name:
|
||||||
|
description:
|
||||||
|
- Display name of the topic
|
||||||
|
required: False
|
||||||
|
policy:
|
||||||
|
description:
|
||||||
|
- Policy to apply to the SNS topic
|
||||||
|
required: False
|
||||||
|
delivery_policy:
|
||||||
|
description:
|
||||||
|
- Delivery policy to apply to the SNS topic
|
||||||
|
required: False
|
||||||
|
subscriptions:
|
||||||
|
description:
|
||||||
|
- List of subscriptions to apply to the topic. Note that AWS requires
|
||||||
|
subscriptions to be confirmed, so you will need to confirm any new
|
||||||
|
subscriptions.
|
||||||
|
purge_subscriptions:
|
||||||
|
description:
|
||||||
|
- "Whether to purge any subscriptions not listed here. NOTE: AWS does not
|
||||||
|
allow you to purge any PendingConfirmation subscriptions, so if any
|
||||||
|
exist and would be purged, they are silently skipped. This means that
|
||||||
|
somebody could come back later and confirm the subscription. Sorry.
|
||||||
|
Blame Amazon."
|
||||||
|
default: True
|
||||||
|
extends_documentation_fragment: aws
|
||||||
|
requirements: [ "boto" ]
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = """
|
||||||
|
|
||||||
|
- name: Create alarm SNS topic
|
||||||
|
sns_topic:
|
||||||
|
name: "alarms"
|
||||||
|
state: present
|
||||||
|
display_name: "alarm SNS topic"
|
||||||
|
delivery_policy:
|
||||||
|
http:
|
||||||
|
defaultHealthyRetryPolicy:
|
||||||
|
minDelayTarget: 2
|
||||||
|
maxDelayTarget: 4
|
||||||
|
numRetries: 3
|
||||||
|
numMaxDelayRetries: 5
|
||||||
|
backoffFunction: "<linear|arithmetic|geometric|exponential>"
|
||||||
|
disableSubscriptionOverrides: True
|
||||||
|
defaultThrottlePolicy:
|
||||||
|
maxReceivesPerSecond: 10
|
||||||
|
subscriptions:
|
||||||
|
- endpoint: "my_email_address@example.com"
|
||||||
|
protocol: "email"
|
||||||
|
- endpoint: "my_mobile_number"
|
||||||
|
protocol: "sms"
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import sys
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
|
||||||
|
try:
|
||||||
|
import boto
|
||||||
|
import boto.sns
|
||||||
|
except ImportError:
|
||||||
|
print "failed=True msg='boto required for this module'"
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
|
||||||
|
def canonicalize_endpoint(protocol, endpoint):
|
||||||
|
if protocol == 'sms':
|
||||||
|
import re
|
||||||
|
return re.sub('[^0-9]*', '', endpoint)
|
||||||
|
return endpoint
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_topics(connection):
|
||||||
|
next_token = None
|
||||||
|
topics = []
|
||||||
|
while True:
|
||||||
|
response = connection.get_all_topics(next_token)
|
||||||
|
topics.extend(response['ListTopicsResponse']['ListTopicsResult']['Topics'])
|
||||||
|
next_token = \
|
||||||
|
response['ListTopicsResponse']['ListTopicsResult']['NextToken']
|
||||||
|
if not next_token:
|
||||||
|
break
|
||||||
|
return [t['TopicArn'] for t in topics]
|
||||||
|
|
||||||
|
|
||||||
|
def arn_topic_lookup(connection, short_topic):
|
||||||
|
# topic names cannot have colons, so this captures the full topic name
|
||||||
|
all_topics = get_all_topics(connection)
|
||||||
|
lookup_topic = ':%s' % short_topic
|
||||||
|
for topic in all_topics:
|
||||||
|
if topic.endswith(lookup_topic):
|
||||||
|
return topic
|
||||||
|
return None
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = ec2_argument_spec()
|
||||||
|
argument_spec.update(
|
||||||
|
dict(
|
||||||
|
name=dict(type='str', required=True),
|
||||||
|
state=dict(type='str', default='present', choices=['present',
|
||||||
|
'absent']),
|
||||||
|
display_name=dict(type='str', required=False),
|
||||||
|
policy=dict(type='dict', required=False),
|
||||||
|
delivery_policy=dict(type='dict', required=False),
|
||||||
|
subscriptions=dict(type='list', required=False),
|
||||||
|
purge_subscriptions=dict(type='bool', default=True),
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=True)
|
||||||
|
|
||||||
|
name = module.params.get('name')
|
||||||
|
state = module.params.get('state')
|
||||||
|
display_name = module.params.get('display_name')
|
||||||
|
policy = module.params.get('policy')
|
||||||
|
delivery_policy = module.params.get('delivery_policy')
|
||||||
|
subscriptions = module.params.get('subscriptions')
|
||||||
|
purge_subscriptions = module.params.get('purge_subscriptions')
|
||||||
|
check_mode = module.check_mode
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
topic_created = False
|
||||||
|
attributes_set = []
|
||||||
|
subscriptions_added = []
|
||||||
|
subscriptions_deleted = []
|
||||||
|
|
||||||
|
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
|
||||||
|
if not region:
|
||||||
|
module.fail_json(msg="region must be specified")
|
||||||
|
try:
|
||||||
|
connection = connect_to_aws(boto.sns, region, **aws_connect_params)
|
||||||
|
except boto.exception.NoAuthHandlerFound, e:
|
||||||
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
|
# topics cannot contain ':', so thats the decider
|
||||||
|
if ':' in name:
|
||||||
|
all_topics = get_all_topics(connection)
|
||||||
|
if name in all_topics:
|
||||||
|
arn_topic = name
|
||||||
|
elif state == 'absent':
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
else:
|
||||||
|
module.fail_json(msg="specified an ARN for a topic but it doesn't"
|
||||||
|
" exist")
|
||||||
|
else:
|
||||||
|
arn_topic = arn_topic_lookup(connection, name)
|
||||||
|
if not arn_topic:
|
||||||
|
if state == 'absent':
|
||||||
|
module.exit_json(changed=False)
|
||||||
|
elif check_mode:
|
||||||
|
module.exit_json(changed=True, topic_created=True,
|
||||||
|
subscriptions_added=subscriptions,
|
||||||
|
subscriptions_deleted=[])
|
||||||
|
|
||||||
|
changed=True
|
||||||
|
topic_created = True
|
||||||
|
connection.create_topic(name)
|
||||||
|
arn_topic = arn_topic_lookup(connection, name)
|
||||||
|
while not arn_topic:
|
||||||
|
time.sleep(3)
|
||||||
|
arn_topic = arn_topic_lookup(connection, name)
|
||||||
|
|
||||||
|
if arn_topic and state == "absent":
|
||||||
|
if not check_mode:
|
||||||
|
connection.delete_topic(arn_topic)
|
||||||
|
module.exit_json(changed=True)
|
||||||
|
|
||||||
|
topic_attributes = connection.get_topic_attributes(arn_topic) \
|
||||||
|
['GetTopicAttributesResponse'] ['GetTopicAttributesResult'] \
|
||||||
|
['Attributes']
|
||||||
|
if display_name and display_name != topic_attributes['DisplayName']:
|
||||||
|
changed = True
|
||||||
|
attributes_set.append('display_name')
|
||||||
|
if not check_mode:
|
||||||
|
connection.set_topic_attributes(arn_topic, 'DisplayName',
|
||||||
|
display_name)
|
||||||
|
|
||||||
|
if policy and policy != json.loads(topic_attributes['policy']):
|
||||||
|
changed = True
|
||||||
|
attributes_set.append('policy')
|
||||||
|
if not check_mode:
|
||||||
|
connection.set_topic_attributes(arn_topic, 'Policy',
|
||||||
|
json.dumps(policy))
|
||||||
|
|
||||||
|
if delivery_policy and ('DeliveryPolicy' not in topic_attributes or \
|
||||||
|
delivery_policy != json.loads(topic_attributes['DeliveryPolicy'])):
|
||||||
|
changed = True
|
||||||
|
attributes_set.append('delivery_policy')
|
||||||
|
if not check_mode:
|
||||||
|
connection.set_topic_attributes(arn_topic, 'DeliveryPolicy',
|
||||||
|
json.dumps(delivery_policy))
|
||||||
|
|
||||||
|
|
||||||
|
next_token = None
|
||||||
|
aws_subscriptions = []
|
||||||
|
while True:
|
||||||
|
response = connection.get_all_subscriptions_by_topic(arn_topic,
|
||||||
|
next_token)
|
||||||
|
aws_subscriptions.extend(response['ListSubscriptionsByTopicResponse'] \
|
||||||
|
['ListSubscriptionsByTopicResult']['Subscriptions'])
|
||||||
|
next_token = response['ListSubscriptionsByTopicResponse'] \
|
||||||
|
['ListSubscriptionsByTopicResult']['NextToken']
|
||||||
|
if not next_token:
|
||||||
|
break
|
||||||
|
|
||||||
|
desired_subscriptions = [(sub['protocol'],
|
||||||
|
canonicalize_endpoint(sub['protocol'], sub['endpoint'])) for sub in
|
||||||
|
subscriptions]
|
||||||
|
aws_subscriptions_list = []
|
||||||
|
|
||||||
|
for sub in aws_subscriptions:
|
||||||
|
sub_key = (sub['Protocol'], sub['Endpoint'])
|
||||||
|
aws_subscriptions_list.append(sub_key)
|
||||||
|
if purge_subscriptions and sub_key not in desired_subscriptions and \
|
||||||
|
sub['SubscriptionArn'] != 'PendingConfirmation':
|
||||||
|
changed = True
|
||||||
|
subscriptions_deleted.append(sub_key)
|
||||||
|
if not check_mode:
|
||||||
|
connection.unsubscribe(sub['SubscriptionArn'])
|
||||||
|
|
||||||
|
for (protocol, endpoint) in desired_subscriptions:
|
||||||
|
if (protocol, endpoint) not in aws_subscriptions_list:
|
||||||
|
changed = True
|
||||||
|
subscriptions_added.append(sub)
|
||||||
|
if not check_mode:
|
||||||
|
connection.subscribe(arn_topic, protocol, endpoint)
|
||||||
|
|
||||||
|
module.exit_json(changed=changed, topic_created=topic_created,
|
||||||
|
attributes_set=attributes_set,
|
||||||
|
subscriptions_added=subscriptions_added,
|
||||||
|
subscriptions_deleted=subscriptions_deleted, sns_arn=arn_topic)
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
|
main()
|
|
@ -216,7 +216,7 @@ def main():
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
|
connection = connect_to_aws(boto.sqs, region, **aws_connect_params)
|
||||||
|
|
||||||
except (NoAuthHandlerFound, StandardError), e:
|
except (NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
|
|
||||||
state = module.params.get('state')
|
state = module.params.get('state')
|
||||||
|
@ -230,4 +230,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
|
@ -84,9 +84,6 @@ ec2_tag:
|
||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import boto.sts
|
import boto.sts
|
||||||
from boto.exception import BotoServerError
|
from boto.exception import BotoServerError
|
||||||
|
@ -138,7 +135,7 @@ def main():
|
||||||
if region:
|
if region:
|
||||||
try:
|
try:
|
||||||
connection = connect_to_aws(boto.sts, region, **aws_connect_params)
|
connection = connect_to_aws(boto.sts, region, **aws_connect_params)
|
||||||
except (boto.exception.NoAuthHandlerFound, StandardError), e:
|
except (boto.exception.NoAuthHandlerFound, AnsibleAWSError), e:
|
||||||
module.fail_json(msg=str(e))
|
module.fail_json(msg=str(e))
|
||||||
else:
|
else:
|
||||||
module.fail_json(msg="region must be specified")
|
module.fail_json(msg="region must be specified")
|
||||||
|
@ -153,4 +150,5 @@ def main():
|
||||||
from ansible.module_utils.basic import *
|
from ansible.module_utils.basic import *
|
||||||
from ansible.module_utils.ec2 import *
|
from ansible.module_utils.ec2 import *
|
||||||
|
|
||||||
main()
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
|
467
cloud/cloudstack/cs_volume.py
Normal file
467
cloud/cloudstack/cs_volume.py
Normal file
|
@ -0,0 +1,467 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# (c) 2015, Jefferson Girão <jefferson@girao.net>
|
||||||
|
# (c) 2015, René Moser <mail@renemoser.net>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: cs_volume
|
||||||
|
short_description: Manages volumes on Apache CloudStack based clouds.
|
||||||
|
description:
|
||||||
|
- Create, destroy, attach, detach volumes.
|
||||||
|
version_added: "2.1"
|
||||||
|
author:
|
||||||
|
- "Jefferson Girão (@jeffersongirao)"
|
||||||
|
- "René Moser (@resmo)"
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name of the volume.
|
||||||
|
- C(name) can only contain ASCII letters.
|
||||||
|
required: true
|
||||||
|
account:
|
||||||
|
description:
|
||||||
|
- Account the volume is related to.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
custom_id:
|
||||||
|
description:
|
||||||
|
- Custom id to the resource.
|
||||||
|
- Allowed to Root Admins only.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
disk_offering:
|
||||||
|
description:
|
||||||
|
- Name of the disk offering to be used.
|
||||||
|
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
display_volume:
|
||||||
|
description:
|
||||||
|
- Whether to display the volume to the end user or not.
|
||||||
|
- Allowed to Root Admins only.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
domain:
|
||||||
|
description:
|
||||||
|
- Name of the domain the volume to be deployed in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
max_iops:
|
||||||
|
description:
|
||||||
|
- Max iops
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
min_iops:
|
||||||
|
description:
|
||||||
|
- Min iops
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
project:
|
||||||
|
description:
|
||||||
|
- Name of the project the volume to be deployed in.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
size:
|
||||||
|
description:
|
||||||
|
- Size of disk in GB
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
snapshot:
|
||||||
|
description:
|
||||||
|
- The snapshot name for the disk volume.
|
||||||
|
- Required one of C(disk_offering), C(snapshot) if volume is not already C(state=present).
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
force:
|
||||||
|
description:
|
||||||
|
- Force removal of volume even it is attached to a VM.
|
||||||
|
- Considered on C(state=absnet) only.
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
vm:
|
||||||
|
description:
|
||||||
|
- Name of the virtual machine to attach the volume to.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
zone:
|
||||||
|
description:
|
||||||
|
- Name of the zone in which the volume should be deployed.
|
||||||
|
- If not set, default zone is used.
|
||||||
|
required: false
|
||||||
|
default: null
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- State of the volume.
|
||||||
|
required: false
|
||||||
|
default: 'present'
|
||||||
|
choices: [ 'present', 'absent', 'attached', 'detached' ]
|
||||||
|
poll_async:
|
||||||
|
description:
|
||||||
|
- Poll async jobs until job has finished.
|
||||||
|
required: false
|
||||||
|
default: true
|
||||||
|
extends_documentation_fragment: cloudstack
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Create volume within project, zone with specified storage options
|
||||||
|
- local_action:
|
||||||
|
module: cs_volume
|
||||||
|
name: web-vm-1-volume
|
||||||
|
project: Integration
|
||||||
|
zone: ch-zrh-ix-01
|
||||||
|
disk_offering: PerfPlus Storage
|
||||||
|
size: 20
|
||||||
|
|
||||||
|
# Create/attach volume to instance
|
||||||
|
- local_action:
|
||||||
|
module: cs_volume
|
||||||
|
name: web-vm-1-volume
|
||||||
|
disk_offering: PerfPlus Storage
|
||||||
|
size: 20
|
||||||
|
vm: web-vm-1
|
||||||
|
state: attached
|
||||||
|
|
||||||
|
# Detach volume
|
||||||
|
- local_action:
|
||||||
|
module: cs_volume
|
||||||
|
name: web-vm-1-volume
|
||||||
|
state: detached
|
||||||
|
|
||||||
|
# Remove volume
|
||||||
|
- local_action:
|
||||||
|
module: cs_volume
|
||||||
|
name: web-vm-1-volume
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
id:
|
||||||
|
description: ID of the volume.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample:
|
||||||
|
name:
|
||||||
|
description: Name of the volume.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web-volume-01
|
||||||
|
display_name:
|
||||||
|
description: Display name of the volume.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web-volume-01
|
||||||
|
group:
|
||||||
|
description: Group the volume belongs to
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web
|
||||||
|
domain:
|
||||||
|
description: Domain the volume belongs to
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: example domain
|
||||||
|
project:
|
||||||
|
description: Project the volume belongs to
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Production
|
||||||
|
zone:
|
||||||
|
description: Name of zone the volume is in.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: ch-gva-2
|
||||||
|
created:
|
||||||
|
description: Date of the volume was created.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2014-12-01T14:57:57+0100
|
||||||
|
attached:
|
||||||
|
description: Date of the volume was attached.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 2014-12-01T14:57:57+0100
|
||||||
|
type:
|
||||||
|
description: Disk volume type.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: DATADISK
|
||||||
|
size:
|
||||||
|
description: Size of disk volume.
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 20
|
||||||
|
vm:
|
||||||
|
description: Name of the vm the volume is attached to (not returned when detached)
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: web-01
|
||||||
|
state:
|
||||||
|
description: State of the volume
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: Attached
|
||||||
|
device_id:
|
||||||
|
description: Id of the device on user vm the volume is attached to (not returned when detached)
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: 1
|
||||||
|
'''
|
||||||
|
|
||||||
|
try:
|
||||||
|
from cs import CloudStack, CloudStackException, read_config
|
||||||
|
has_lib_cs = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_cs = False
|
||||||
|
|
||||||
|
# import cloudstack common
|
||||||
|
from ansible.module_utils.cloudstack import *
|
||||||
|
|
||||||
|
|
||||||
|
class AnsibleCloudStackVolume(AnsibleCloudStack):
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
super(AnsibleCloudStackVolume, self).__init__(module)
|
||||||
|
self.returns = {
|
||||||
|
'group': 'group',
|
||||||
|
'attached': 'attached',
|
||||||
|
'vmname': 'vm',
|
||||||
|
'deviceid': 'device_id',
|
||||||
|
'type': 'type',
|
||||||
|
'size': 'size',
|
||||||
|
}
|
||||||
|
self.volume = None
|
||||||
|
|
||||||
|
#TODO implement in cloudstack utils
|
||||||
|
def get_disk_offering(self, key=None):
|
||||||
|
disk_offering = self.module.params.get('disk_offering')
|
||||||
|
if not disk_offering:
|
||||||
|
return None
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['domainid'] = self.get_domain(key='id')
|
||||||
|
|
||||||
|
disk_offerings = self.cs.listDiskOfferings(**args)
|
||||||
|
if disk_offerings:
|
||||||
|
for d in disk_offerings['diskoffering']:
|
||||||
|
if disk_offering in [d['displaytext'], d['name'], d['id']]:
|
||||||
|
return self._get_by_key(key, d)
|
||||||
|
self.module.fail_json(msg="Disk offering '%s' not found" % disk_offering)
|
||||||
|
|
||||||
|
|
||||||
|
def get_volume(self):
|
||||||
|
if not self.volume:
|
||||||
|
args = {}
|
||||||
|
args['account'] = self.get_account(key='name')
|
||||||
|
args['domainid'] = self.get_domain(key='id')
|
||||||
|
args['projectid'] = self.get_project(key='id')
|
||||||
|
args['type'] = 'DATADISK'
|
||||||
|
|
||||||
|
volumes = self.cs.listVolumes(**args)
|
||||||
|
if volumes:
|
||||||
|
volume_name = self.module.params.get('name')
|
||||||
|
for v in volumes['volume']:
|
||||||
|
if volume_name.lower() == v['name'].lower():
|
||||||
|
self.volume = v
|
||||||
|
break
|
||||||
|
return self.volume
|
||||||
|
|
||||||
|
|
||||||
|
def get_snapshot(self, key=None):
|
||||||
|
snapshot = self.module.params.get('snapshot')
|
||||||
|
if not snapshot:
|
||||||
|
return None
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = snapshot
|
||||||
|
args['account'] = self.get_account('name')
|
||||||
|
args['domainid'] = self.get_domain('id')
|
||||||
|
args['projectid'] = self.get_project('id')
|
||||||
|
|
||||||
|
snapshots = self.cs.listSnapshots(**args)
|
||||||
|
if snapshots:
|
||||||
|
return self._get_by_key(key, snapshots['snapshot'][0])
|
||||||
|
self.module.fail_json(msg="Snapshot with name %s not found" % snapshot)
|
||||||
|
|
||||||
|
|
||||||
|
def present_volume(self):
|
||||||
|
volume = self.get_volume()
|
||||||
|
if not volume:
|
||||||
|
disk_offering_id = self.get_disk_offering(key='id')
|
||||||
|
snapshot_id = self.get_snapshot(key='id')
|
||||||
|
|
||||||
|
if not disk_offering_id and not snapshot_id:
|
||||||
|
self.module.fail_json(msg="Required one of: disk_offering,snapshot")
|
||||||
|
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['name'] = self.module.params.get('name')
|
||||||
|
args['account'] = self.get_account(key='name')
|
||||||
|
args['domainid'] = self.get_domain(key='id')
|
||||||
|
args['diskofferingid'] = disk_offering_id
|
||||||
|
args['displayvolume'] = self.module.params.get('display_volume')
|
||||||
|
args['maxiops'] = self.module.params.get('max_iops')
|
||||||
|
args['miniops'] = self.module.params.get('min_iops')
|
||||||
|
args['projectid'] = self.get_project(key='id')
|
||||||
|
args['size'] = self.module.params.get('size')
|
||||||
|
args['snapshotid'] = snapshot_id
|
||||||
|
args['zoneid'] = self.get_zone(key='id')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.createVolume(**args)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
volume = self.poll_job(res, 'volume')
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def attached_volume(self):
|
||||||
|
volume = self.present_volume()
|
||||||
|
|
||||||
|
if volume.get('virtualmachineid') != self.get_vm(key='id'):
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
volume = self.detached_volume()
|
||||||
|
|
||||||
|
if 'attached' not in volume:
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
args = {}
|
||||||
|
args['id'] = volume['id']
|
||||||
|
args['virtualmachineid'] = self.get_vm(key='id')
|
||||||
|
args['deviceid'] = self.module.params.get('device_id')
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.attachVolume(**args)
|
||||||
|
if 'errortext' in res:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
volume = self.poll_job(res, 'volume')
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def detached_volume(self):
|
||||||
|
volume = self.present_volume()
|
||||||
|
|
||||||
|
if volume:
|
||||||
|
if 'attached' not in volume:
|
||||||
|
return volume
|
||||||
|
|
||||||
|
self.result['changed'] = True
|
||||||
|
|
||||||
|
if not self.module.check_mode:
|
||||||
|
res = self.cs.detachVolume(id=volume['id'])
|
||||||
|
if 'errortext' in volume:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
volume = self.poll_job(res, 'volume')
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def absent_volume(self):
|
||||||
|
volume = self.get_volume()
|
||||||
|
|
||||||
|
if volume:
|
||||||
|
if 'attached' in volume:
|
||||||
|
if self.module.param.get('force'):
|
||||||
|
self.detached_volume()
|
||||||
|
else:
|
||||||
|
self.module.fail_json(msg="Volume '%s' is attached, use force=true for detaching and removing the volume." % volume.get('name'))
|
||||||
|
|
||||||
|
self.result['changed'] = True
|
||||||
|
if not self.module.check_mode:
|
||||||
|
volume = self.detached_volume()
|
||||||
|
|
||||||
|
res = self.cs.deleteVolume(id=volume['id'])
|
||||||
|
if 'errortext' in volume:
|
||||||
|
self.module.fail_json(msg="Failed: '%s'" % res['errortext'])
|
||||||
|
poll_async = self.module.params.get('poll_async')
|
||||||
|
if poll_async:
|
||||||
|
res = self.poll_job(res, 'volume')
|
||||||
|
|
||||||
|
return volume
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = cs_argument_spec()
|
||||||
|
argument_spec.update(dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
disk_offering = dict(default=None),
|
||||||
|
display_volume = dict(choices=BOOLEANS, default=True),
|
||||||
|
max_iops = dict(type='int', default=None),
|
||||||
|
min_iops = dict(type='int', default=None),
|
||||||
|
size = dict(type='int', default=None),
|
||||||
|
snapshot = dict(default=None),
|
||||||
|
vm = dict(default=None),
|
||||||
|
device_id = dict(type='int', default=None),
|
||||||
|
custom_id = dict(default=None),
|
||||||
|
force = dict(choices=BOOLEANS, default=False),
|
||||||
|
state = dict(choices=['present', 'absent', 'attached', 'detached'], default='present'),
|
||||||
|
zone = dict(default=None),
|
||||||
|
domain = dict(default=None),
|
||||||
|
account = dict(default=None),
|
||||||
|
project = dict(default=None),
|
||||||
|
poll_async = dict(choices=BOOLEANS, default=True),
|
||||||
|
))
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=argument_spec,
|
||||||
|
required_together=cs_required_together(),
|
||||||
|
mutually_exclusive = (
|
||||||
|
['snapshot', 'disk_offering'],
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_cs:
|
||||||
|
module.fail_json(msg="python library cs required: pip install cs")
|
||||||
|
|
||||||
|
try:
|
||||||
|
acs_vol = AnsibleCloudStackVolume(module)
|
||||||
|
|
||||||
|
state = module.params.get('state')
|
||||||
|
|
||||||
|
if state in ['absent']:
|
||||||
|
volume = acs_vol.absent_volume()
|
||||||
|
elif state in ['attached']:
|
||||||
|
volume = acs_vol.attached_volume()
|
||||||
|
elif state in ['detached']:
|
||||||
|
volume = acs_vol.detached_volume()
|
||||||
|
else:
|
||||||
|
volume = acs_vol.present_volume()
|
||||||
|
|
||||||
|
result = acs_vol.get_result(volume)
|
||||||
|
|
||||||
|
except CloudStackException, e:
|
||||||
|
module.fail_json(msg='CloudStackException: %s' % str(e))
|
||||||
|
|
||||||
|
module.exit_json(**result)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
0
cloud/docker/__init__.py
Normal file
0
cloud/docker/__init__.py
Normal file
261
cloud/docker/docker_login.py
Normal file
261
cloud/docker/docker_login.py
Normal file
|
@ -0,0 +1,261 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
#
|
||||||
|
|
||||||
|
# (c) 2015, Olaf Kilian <olaf.kilian@symanex.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
######################################################################
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: docker_login
|
||||||
|
author: Olaf Kilian
|
||||||
|
version_added: "2.0"
|
||||||
|
short_description: Manage Docker registry logins
|
||||||
|
description:
|
||||||
|
- Ansible version of the "docker login" CLI command.
|
||||||
|
- This module allows you to login to a Docker registry without directly pulling an image or performing any other actions.
|
||||||
|
- It will write your login credentials to your local .dockercfg file that is compatible to the Docker CLI client as well as docker-py and all other Docker related modules that are based on docker-py.
|
||||||
|
options:
|
||||||
|
registry:
|
||||||
|
description:
|
||||||
|
- "URL of the registry, defaults to: https://index.docker.io/v1/"
|
||||||
|
required: false
|
||||||
|
default: "https://index.docker.io/v1/"
|
||||||
|
username:
|
||||||
|
description:
|
||||||
|
- The username for the registry account
|
||||||
|
required: true
|
||||||
|
password:
|
||||||
|
description:
|
||||||
|
- The plaintext password for the registry account
|
||||||
|
required: true
|
||||||
|
email:
|
||||||
|
description:
|
||||||
|
- The email address for the registry account. Note that private registries usually don't need this, but if you want to log into your Docker Hub account (default behaviour) you need to specify this in order to be able to log in.
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
reauth:
|
||||||
|
description:
|
||||||
|
- Whether refresh existing authentication on the Docker server (boolean)
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
dockercfg_path:
|
||||||
|
description:
|
||||||
|
- Use a custom path for the .dockercfg file
|
||||||
|
required: false
|
||||||
|
default: ~/.docker/config.json
|
||||||
|
docker_url:
|
||||||
|
descriptions:
|
||||||
|
- Refers to the protocol+hostname+port where the Docker server is hosted
|
||||||
|
required: false
|
||||||
|
default: unix://var/run/docker.sock
|
||||||
|
timeout:
|
||||||
|
description:
|
||||||
|
- The HTTP request timeout in seconds
|
||||||
|
required: false
|
||||||
|
default: 600
|
||||||
|
|
||||||
|
requirements: [ "python >= 2.6", "docker-py >= 1.1.0" ]
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
Login to a Docker registry without performing any other action. Make sure that the user you are using is either in the docker group which owns the Docker socket or use sudo to perform login actions:
|
||||||
|
|
||||||
|
- name: login to DockerHub remote registry using your account
|
||||||
|
docker_login:
|
||||||
|
username: docker
|
||||||
|
password: rekcod
|
||||||
|
email: docker@docker.io
|
||||||
|
|
||||||
|
- name: login to private Docker remote registry and force reauthentification
|
||||||
|
docker_login:
|
||||||
|
registry: your.private.registry.io
|
||||||
|
username: yourself
|
||||||
|
password: secrets3
|
||||||
|
reauth: yes
|
||||||
|
|
||||||
|
- name: login to DockerHub remote registry using a custom dockercfg file location
|
||||||
|
docker_login:
|
||||||
|
username: docker
|
||||||
|
password: rekcod
|
||||||
|
email: docker@docker.io
|
||||||
|
dockercfg_path: /tmp/.mydockercfg
|
||||||
|
|
||||||
|
'''
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import json
|
||||||
|
import base64
|
||||||
|
from urlparse import urlparse
|
||||||
|
from distutils.version import LooseVersion
|
||||||
|
|
||||||
|
try:
|
||||||
|
import docker.client
|
||||||
|
from docker.errors import APIError as DockerAPIError
|
||||||
|
has_lib_docker = True
|
||||||
|
if LooseVersion(docker.__version__) >= LooseVersion("1.1.0"):
|
||||||
|
has_correct_lib_docker_version = True
|
||||||
|
else:
|
||||||
|
has_correct_lib_docker_version = False
|
||||||
|
except ImportError:
|
||||||
|
has_lib_docker = False
|
||||||
|
|
||||||
|
try:
|
||||||
|
import requests
|
||||||
|
has_lib_requests = True
|
||||||
|
except ImportError:
|
||||||
|
has_lib_requests = False
|
||||||
|
|
||||||
|
|
||||||
|
class DockerLoginManager:
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
|
||||||
|
self.module = module
|
||||||
|
self.registry = self.module.params.get('registry')
|
||||||
|
self.username = self.module.params.get('username')
|
||||||
|
self.password = self.module.params.get('password')
|
||||||
|
self.email = self.module.params.get('email')
|
||||||
|
self.reauth = self.module.params.get('reauth')
|
||||||
|
self.dockercfg_path = os.path.expanduser(self.module.params.get('dockercfg_path'))
|
||||||
|
|
||||||
|
docker_url = urlparse(module.params.get('docker_url'))
|
||||||
|
self.client = docker.Client(base_url=docker_url.geturl(), timeout=module.params.get('timeout'))
|
||||||
|
|
||||||
|
self.changed = False
|
||||||
|
self.response = False
|
||||||
|
self.log = list()
|
||||||
|
|
||||||
|
def login(self):
|
||||||
|
|
||||||
|
if self.reauth:
|
||||||
|
self.log.append("Enforcing reauthentification")
|
||||||
|
|
||||||
|
# Connect to registry and login if not already logged in or reauth is enforced.
|
||||||
|
try:
|
||||||
|
self.response = self.client.login(
|
||||||
|
self.username,
|
||||||
|
password=self.password,
|
||||||
|
email=self.email,
|
||||||
|
registry=self.registry,
|
||||||
|
reauth=self.reauth,
|
||||||
|
dockercfg_path=self.dockercfg_path
|
||||||
|
)
|
||||||
|
except DockerAPIError as e:
|
||||||
|
self.module.fail_json(msg="Docker API Error: %s" % e.explanation)
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg="failed to login to the remote registry", error=repr(e))
|
||||||
|
|
||||||
|
# Get status from registry response.
|
||||||
|
if "Status" in self.response:
|
||||||
|
self.log.append(self.response["Status"])
|
||||||
|
|
||||||
|
# Update the dockercfg if not in check mode.
|
||||||
|
if not self.module.check_mode:
|
||||||
|
self.update_dockercfg()
|
||||||
|
|
||||||
|
# This is what the underlaying docker-py unfortunately doesn't do (yet).
|
||||||
|
def update_dockercfg(self):
|
||||||
|
|
||||||
|
# Create dockercfg file if it does not exist.
|
||||||
|
if not os.path.exists(self.dockercfg_path):
|
||||||
|
dockercfg_path_dir = os.path.dirname(self.dockercfg_path)
|
||||||
|
if not os.path.exists(dockercfg_path_dir):
|
||||||
|
os.makedirs(dockercfg_path_dir)
|
||||||
|
open(self.dockercfg_path, "w")
|
||||||
|
self.log.append("Created new Docker config file at %s" % self.dockercfg_path)
|
||||||
|
else:
|
||||||
|
self.log.append("Found existing Docker config file at %s" % self.dockercfg_path)
|
||||||
|
|
||||||
|
# Build a dict for the existing dockercfg.
|
||||||
|
try:
|
||||||
|
docker_config = json.load(open(self.dockercfg_path, "r"))
|
||||||
|
except ValueError:
|
||||||
|
docker_config = dict()
|
||||||
|
if "auths" not in docker_config:
|
||||||
|
docker_config["auths"] = dict()
|
||||||
|
if self.registry not in docker_config["auths"]:
|
||||||
|
docker_config["auths"][self.registry] = dict()
|
||||||
|
|
||||||
|
# Calculate docker credentials based on current parameters.
|
||||||
|
new_docker_config = dict(
|
||||||
|
auth = base64.b64encode(self.username + b':' + self.password),
|
||||||
|
email = self.email
|
||||||
|
)
|
||||||
|
|
||||||
|
# Update config if persisted credentials differ from current credentials.
|
||||||
|
if new_docker_config != docker_config["auths"][self.registry]:
|
||||||
|
docker_config["auths"][self.registry] = new_docker_config
|
||||||
|
try:
|
||||||
|
json.dump(docker_config, open(self.dockercfg_path, "w"), indent=4, sort_keys=True)
|
||||||
|
except Exception as e:
|
||||||
|
self.module.fail_json(msg="failed to write auth details to file", error=repr(e))
|
||||||
|
self.log.append("Updated Docker config with new credentials.")
|
||||||
|
self.changed = True
|
||||||
|
|
||||||
|
# Compatible to docker-py auth.decode_docker_auth()
|
||||||
|
def encode_docker_auth(self, auth):
|
||||||
|
s = base64.b64decode(auth)
|
||||||
|
login, pwd = s.split(b':', 1)
|
||||||
|
return login.decode('ascii'), pwd.decode('ascii')
|
||||||
|
|
||||||
|
def get_msg(self):
|
||||||
|
return ". ".join(self.log)
|
||||||
|
|
||||||
|
def has_changed(self):
|
||||||
|
return self.changed
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec = dict(
|
||||||
|
registry = dict(required=False, default='https://index.docker.io/v1/'),
|
||||||
|
username = dict(required=True),
|
||||||
|
password = dict(required=True, no_log=True),
|
||||||
|
email = dict(required=False, default=None),
|
||||||
|
reauth = dict(required=False, default=False, type='bool'),
|
||||||
|
dockercfg_path = dict(required=False, default='~/.docker/config.json'),
|
||||||
|
docker_url = dict(default='unix://var/run/docker.sock'),
|
||||||
|
timeout = dict(default=10, type='int')
|
||||||
|
),
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
if not has_lib_docker:
|
||||||
|
module.fail_json(msg="python library docker-py required: pip install docker-py>=1.1.0")
|
||||||
|
|
||||||
|
if not has_correct_lib_docker_version:
|
||||||
|
module.fail_json(msg="your version of docker-py is outdated: pip install docker-py>=1.1.0")
|
||||||
|
|
||||||
|
if not has_lib_requests:
|
||||||
|
module.fail_json(msg="python library requests required: pip install requests")
|
||||||
|
|
||||||
|
try:
|
||||||
|
manager = DockerLoginManager(module)
|
||||||
|
manager.login()
|
||||||
|
module.exit_json(changed=manager.has_changed(), msg=manager.get_msg())
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg="Module execution has failed due to an unexpected error", error=repr(e))
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
201
cloud/openstack/os_project.py
Normal file
201
cloud/openstack/os_project.py
Normal file
|
@ -0,0 +1,201 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# Copyright (c) 2015 IBM Corporation
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
try:
|
||||||
|
import shade
|
||||||
|
HAS_SHADE = True
|
||||||
|
except ImportError:
|
||||||
|
HAS_SHADE = False
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: os_project
|
||||||
|
short_description: Manage OpenStack Projects
|
||||||
|
extends_documentation_fragment: openstack
|
||||||
|
version_added: "2.0"
|
||||||
|
author: "Alberto Gireud (@agireud)"
|
||||||
|
description:
|
||||||
|
- Manage OpenStack Projects. Projects can be created,
|
||||||
|
updated or deleted using this module. A project will be updated
|
||||||
|
if I(name) matches an existing project and I(state) is present.
|
||||||
|
The value for I(name) cannot be updated without deleting and
|
||||||
|
re-creating the project.
|
||||||
|
options:
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name for the project
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
description:
|
||||||
|
- Description for the project
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
domain_id:
|
||||||
|
description:
|
||||||
|
- Domain id to create the project in if the cloud supports domains
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
enabled:
|
||||||
|
description:
|
||||||
|
- Is the project enabled
|
||||||
|
required: false
|
||||||
|
default: True
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Should the resource be present or absent.
|
||||||
|
choices: [present, absent]
|
||||||
|
default: present
|
||||||
|
requirements:
|
||||||
|
- "python >= 2.6"
|
||||||
|
- "shade"
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Create a project
|
||||||
|
- os_project:
|
||||||
|
cloud: mycloud
|
||||||
|
state: present
|
||||||
|
name: demoproject
|
||||||
|
description: demodescription
|
||||||
|
domain_id: demoid
|
||||||
|
enabled: True
|
||||||
|
|
||||||
|
# Delete a project
|
||||||
|
- os_project:
|
||||||
|
cloud: mycloud
|
||||||
|
state: absent
|
||||||
|
name: demoproject
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
project:
|
||||||
|
description: Dictionary describing the project.
|
||||||
|
returned: On success when I(state) is 'present'
|
||||||
|
type: dictionary
|
||||||
|
contains:
|
||||||
|
id:
|
||||||
|
description: Project ID
|
||||||
|
type: string
|
||||||
|
sample: "f59382db809c43139982ca4189404650"
|
||||||
|
name:
|
||||||
|
description: Project name
|
||||||
|
type: string
|
||||||
|
sample: "demoproject"
|
||||||
|
description:
|
||||||
|
description: Project description
|
||||||
|
type: string
|
||||||
|
sample: "demodescription"
|
||||||
|
enabled:
|
||||||
|
description: Boolean to indicate if project is enabled
|
||||||
|
type: bool
|
||||||
|
sample: True
|
||||||
|
'''
|
||||||
|
|
||||||
|
def _needs_update(module, project):
|
||||||
|
keys = ('description', 'enabled')
|
||||||
|
for key in keys:
|
||||||
|
if module.params[key] is not None and module.params[key] != project.get(key):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def _system_state_change(module, project):
|
||||||
|
state = module.params['state']
|
||||||
|
if state == 'present':
|
||||||
|
if project is None:
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
if _needs_update(module, project):
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
changed = False
|
||||||
|
|
||||||
|
elif state == 'absent':
|
||||||
|
if project is None:
|
||||||
|
changed=False
|
||||||
|
else:
|
||||||
|
changed=True
|
||||||
|
|
||||||
|
return changed;
|
||||||
|
|
||||||
|
def main():
|
||||||
|
|
||||||
|
argument_spec = openstack_full_argument_spec(
|
||||||
|
name=dict(required=True),
|
||||||
|
description=dict(required=False, default=None),
|
||||||
|
domain=dict(required=False, default=None),
|
||||||
|
enabled=dict(default=True, type='bool'),
|
||||||
|
state=dict(default='present', choices=['absent', 'present'])
|
||||||
|
)
|
||||||
|
|
||||||
|
module_kwargs = openstack_module_kwargs()
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec,
|
||||||
|
supports_check_mode=True,
|
||||||
|
**module_kwargs
|
||||||
|
)
|
||||||
|
|
||||||
|
if not HAS_SHADE:
|
||||||
|
module.fail_json(msg='shade is required for this module')
|
||||||
|
|
||||||
|
name = module.params['name']
|
||||||
|
description = module.params['description']
|
||||||
|
domain = module.params['domain']
|
||||||
|
enabled = module.params['enabled']
|
||||||
|
state = module.params['state']
|
||||||
|
|
||||||
|
try:
|
||||||
|
cloud = shade.openstack_cloud(**module.params)
|
||||||
|
project = cloud.get_project(name)
|
||||||
|
|
||||||
|
if module.check_mode:
|
||||||
|
module.exit_json(changed=_system_state_change(module, project))
|
||||||
|
|
||||||
|
if state == 'present':
|
||||||
|
if project is None:
|
||||||
|
project = cloud.create_project(
|
||||||
|
name=name, description=description,
|
||||||
|
domain_id=domain,
|
||||||
|
enabled=enabled)
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
if _needs_update(module, project):
|
||||||
|
project = cloud.update_project(
|
||||||
|
project['id'], description=description,
|
||||||
|
enabled=enabled)
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
changed = False
|
||||||
|
module.exit_json(changed=changed, project=project)
|
||||||
|
|
||||||
|
elif state == 'absent':
|
||||||
|
if project is None:
|
||||||
|
changed=False
|
||||||
|
else:
|
||||||
|
cloud.delete_project(project['id'])
|
||||||
|
changed=True
|
||||||
|
module.exit_json(changed=changed)
|
||||||
|
|
||||||
|
except shade.OpenStackCloudException as e:
|
||||||
|
module.fail_json(msg=e.message, extra_data=e.extra_data)
|
||||||
|
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.openstack import *
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -26,7 +26,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- A list of ZooKeeper servers (format '[server]:[port]').
|
- A list of ZooKeeper servers (format '[server]:[port]').
|
||||||
required: true
|
required: true
|
||||||
path:
|
name:
|
||||||
description:
|
description:
|
||||||
- The path of the znode.
|
- The path of the znode.
|
||||||
required: true
|
required: true
|
||||||
|
|
|
@ -124,7 +124,7 @@ EXAMPLES = '''
|
||||||
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
|
- mongodb_user: database=burgers name=joe password=12345 roles='readWriteAnyDatabase' state=present
|
||||||
|
|
||||||
# add a user to database in a replica set, the primary server is automatically discovered and written to
|
# add a user to database in a replica set, the primary server is automatically discovered and written to
|
||||||
- mongodb_user: database=burgers name=bob replica_set=blecher password=12345 roles='readWriteAnyDatabase' state=present
|
- mongodb_user: database=burgers name=bob replica_set=belcher password=12345 roles='readWriteAnyDatabase' state=present
|
||||||
'''
|
'''
|
||||||
|
|
||||||
import ConfigParser
|
import ConfigParser
|
||||||
|
|
|
@ -337,7 +337,8 @@ def main():
|
||||||
else:
|
else:
|
||||||
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password)
|
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
|
errno, errstr = e.args
|
||||||
|
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (%s: %s)" % (errno, errstr) )
|
||||||
try:
|
try:
|
||||||
cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
|
cursor = db_connection.cursor(cursorclass=MySQLdb.cursors.DictCursor)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
|
|
292
files/blockinfile.py
Normal file
292
files/blockinfile.py
Normal file
|
@ -0,0 +1,292 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2014, 2015 YAEGASHI Takeshi <yaegashi@debian.org>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
|
||||||
|
DOCUMENTATION = """
|
||||||
|
---
|
||||||
|
module: blockinfile
|
||||||
|
author:
|
||||||
|
- 'YAEGASHI Takeshi (@yaegashi)'
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- files
|
||||||
|
- validate
|
||||||
|
short_description: Insert/update/remove a text block
|
||||||
|
surrounded by marker lines.
|
||||||
|
version_added: '2.0'
|
||||||
|
description:
|
||||||
|
- This module will insert/update/remove a block of multi-line text
|
||||||
|
surrounded by customizable marker lines.
|
||||||
|
notes:
|
||||||
|
- This module supports check mode.
|
||||||
|
options:
|
||||||
|
dest:
|
||||||
|
aliases: [ name, destfile ]
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- The file to modify.
|
||||||
|
state:
|
||||||
|
required: false
|
||||||
|
choices: [ present, absent ]
|
||||||
|
default: present
|
||||||
|
description:
|
||||||
|
- Whether the block should be there or not.
|
||||||
|
marker:
|
||||||
|
required: false
|
||||||
|
default: '# {mark} ANSIBLE MANAGED BLOCK'
|
||||||
|
description:
|
||||||
|
- The marker line template.
|
||||||
|
"{mark}" will be replaced with "BEGIN" or "END".
|
||||||
|
block:
|
||||||
|
aliases: [ content ]
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
|
description:
|
||||||
|
- The text to insert inside the marker lines.
|
||||||
|
If it's missing or an empty string,
|
||||||
|
the block will be removed as if C(state) were specified to C(absent).
|
||||||
|
insertafter:
|
||||||
|
required: false
|
||||||
|
default: EOF
|
||||||
|
description:
|
||||||
|
- If specified, the block will be inserted after the last match of
|
||||||
|
specified regular expression. A special value is available; C(EOF) for
|
||||||
|
inserting the block at the end of the file. If specified regular
|
||||||
|
expresion has no matches, C(EOF) will be used instead.
|
||||||
|
choices: [ 'EOF', '*regex*' ]
|
||||||
|
insertbefore:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- If specified, the block will be inserted before the last match of
|
||||||
|
specified regular expression. A special value is available; C(BOF) for
|
||||||
|
inserting the block at the beginning of the file. If specified regular
|
||||||
|
expresion has no matches, the block will be inserted at the end of the
|
||||||
|
file.
|
||||||
|
choices: [ 'BOF', '*regex*' ]
|
||||||
|
create:
|
||||||
|
required: false
|
||||||
|
default: 'no'
|
||||||
|
choices: [ 'yes', 'no' ]
|
||||||
|
description:
|
||||||
|
- Create a new file if it doesn't exist.
|
||||||
|
backup:
|
||||||
|
required: false
|
||||||
|
default: 'no'
|
||||||
|
choices: [ 'yes', 'no' ]
|
||||||
|
description:
|
||||||
|
- Create a backup file including the timestamp information so you can
|
||||||
|
get the original file back if you somehow clobbered it incorrectly.
|
||||||
|
"""
|
||||||
|
|
||||||
|
EXAMPLES = r"""
|
||||||
|
- name: insert/update "Match User" configuation block in /etc/ssh/sshd_config
|
||||||
|
blockinfile:
|
||||||
|
dest: /etc/ssh/sshd_config
|
||||||
|
block: |
|
||||||
|
Match User ansible-agent
|
||||||
|
PasswordAuthentication no
|
||||||
|
|
||||||
|
- name: insert/update eth0 configuration stanza in /etc/network/interfaces
|
||||||
|
(it might be better to copy files into /etc/network/interfaces.d/)
|
||||||
|
blockinfile:
|
||||||
|
dest: /etc/network/interfaces
|
||||||
|
block: |
|
||||||
|
iface eth0 inet static
|
||||||
|
address 192.168.0.1
|
||||||
|
netmask 255.255.255.0
|
||||||
|
|
||||||
|
- name: insert/update HTML surrounded by custom markers after <body> line
|
||||||
|
blockinfile:
|
||||||
|
dest: /var/www/html/index.html
|
||||||
|
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
|
||||||
|
insertafter: "<body>"
|
||||||
|
content: |
|
||||||
|
<h1>Welcome to {{ansible_hostname}}</h1>
|
||||||
|
<p>Last updated on {{ansible_date_time.iso8601}}</p>
|
||||||
|
|
||||||
|
- name: remove HTML as well as surrounding markers
|
||||||
|
blockinfile:
|
||||||
|
dest: /var/www/html/index.html
|
||||||
|
marker: "<!-- {mark} ANSIBLE MANAGED BLOCK -->"
|
||||||
|
content: ""
|
||||||
|
"""
|
||||||
|
|
||||||
|
|
||||||
|
def write_changes(module, contents, dest):
|
||||||
|
|
||||||
|
tmpfd, tmpfile = tempfile.mkstemp()
|
||||||
|
f = os.fdopen(tmpfd, 'wb')
|
||||||
|
f.write(contents)
|
||||||
|
f.close()
|
||||||
|
|
||||||
|
validate = module.params.get('validate', None)
|
||||||
|
valid = not validate
|
||||||
|
if validate:
|
||||||
|
if "%s" not in validate:
|
||||||
|
module.fail_json(msg="validate must contain %%s: %s" % (validate))
|
||||||
|
(rc, out, err) = module.run_command(validate % tmpfile)
|
||||||
|
valid = rc == 0
|
||||||
|
if rc != 0:
|
||||||
|
module.fail_json(msg='failed to validate: '
|
||||||
|
'rc:%s error:%s' % (rc, err))
|
||||||
|
if valid:
|
||||||
|
module.atomic_move(tmpfile, dest)
|
||||||
|
|
||||||
|
|
||||||
|
def check_file_attrs(module, changed, message):
|
||||||
|
|
||||||
|
file_args = module.load_file_common_arguments(module.params)
|
||||||
|
if module.set_file_attributes_if_different(file_args, False):
|
||||||
|
|
||||||
|
if changed:
|
||||||
|
message += " and "
|
||||||
|
changed = True
|
||||||
|
message += "ownership, perms or SE linux context changed"
|
||||||
|
|
||||||
|
return message, changed
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
dest=dict(required=True, aliases=['name', 'destfile']),
|
||||||
|
state=dict(default='present', choices=['absent', 'present']),
|
||||||
|
marker=dict(default='# {mark} ANSIBLE MANAGED BLOCK', type='str'),
|
||||||
|
block=dict(default='', type='str', aliases=['content']),
|
||||||
|
insertafter=dict(default=None),
|
||||||
|
insertbefore=dict(default=None),
|
||||||
|
create=dict(default=False, type='bool'),
|
||||||
|
backup=dict(default=False, type='bool'),
|
||||||
|
validate=dict(default=None, type='str'),
|
||||||
|
),
|
||||||
|
mutually_exclusive=[['insertbefore', 'insertafter']],
|
||||||
|
add_file_common_args=True,
|
||||||
|
supports_check_mode=True
|
||||||
|
)
|
||||||
|
|
||||||
|
params = module.params
|
||||||
|
dest = os.path.expanduser(params['dest'])
|
||||||
|
if module.boolean(params.get('follow', None)):
|
||||||
|
dest = os.path.realpath(dest)
|
||||||
|
|
||||||
|
if os.path.isdir(dest):
|
||||||
|
module.fail_json(rc=256,
|
||||||
|
msg='Destination %s is a directory !' % dest)
|
||||||
|
|
||||||
|
if not os.path.exists(dest):
|
||||||
|
if not module.boolean(params['create']):
|
||||||
|
module.fail_json(rc=257,
|
||||||
|
msg='Destination %s does not exist !' % dest)
|
||||||
|
original = None
|
||||||
|
lines = []
|
||||||
|
else:
|
||||||
|
f = open(dest, 'rb')
|
||||||
|
original = f.read()
|
||||||
|
f.close()
|
||||||
|
lines = original.splitlines()
|
||||||
|
|
||||||
|
insertbefore = params['insertbefore']
|
||||||
|
insertafter = params['insertafter']
|
||||||
|
block = params['block']
|
||||||
|
marker = params['marker']
|
||||||
|
present = params['state'] == 'present'
|
||||||
|
|
||||||
|
if insertbefore is None and insertafter is None:
|
||||||
|
insertafter = 'EOF'
|
||||||
|
|
||||||
|
if insertafter not in (None, 'EOF'):
|
||||||
|
insertre = re.compile(insertafter)
|
||||||
|
elif insertbefore not in (None, 'BOF'):
|
||||||
|
insertre = re.compile(insertbefore)
|
||||||
|
else:
|
||||||
|
insertre = None
|
||||||
|
|
||||||
|
marker0 = re.sub(r'{mark}', 'BEGIN', marker)
|
||||||
|
marker1 = re.sub(r'{mark}', 'END', marker)
|
||||||
|
if present and block:
|
||||||
|
# Escape seqeuences like '\n' need to be handled in Ansible 1.x
|
||||||
|
if ANSIBLE_VERSION.startswith('1.'):
|
||||||
|
block = re.sub('', block, '')
|
||||||
|
blocklines = [marker0] + block.splitlines() + [marker1]
|
||||||
|
else:
|
||||||
|
blocklines = []
|
||||||
|
|
||||||
|
n0 = n1 = None
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith(marker0):
|
||||||
|
n0 = i
|
||||||
|
if line.startswith(marker1):
|
||||||
|
n1 = i
|
||||||
|
|
||||||
|
if None in (n0, n1):
|
||||||
|
n0 = None
|
||||||
|
if insertre is not None:
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if insertre.search(line):
|
||||||
|
n0 = i
|
||||||
|
if n0 is None:
|
||||||
|
n0 = len(lines)
|
||||||
|
elif insertafter is not None:
|
||||||
|
n0 += 1
|
||||||
|
elif insertbefore is not None:
|
||||||
|
n0 = 0 # insertbefore=BOF
|
||||||
|
else:
|
||||||
|
n0 = len(lines) # insertafter=EOF
|
||||||
|
elif n0 < n1:
|
||||||
|
lines[n0:n1+1] = []
|
||||||
|
else:
|
||||||
|
lines[n1:n0+1] = []
|
||||||
|
n0 = n1
|
||||||
|
|
||||||
|
lines[n0:n0] = blocklines
|
||||||
|
|
||||||
|
if lines:
|
||||||
|
result = '\n'.join(lines)+'\n'
|
||||||
|
else:
|
||||||
|
result = ''
|
||||||
|
if original == result:
|
||||||
|
msg = ''
|
||||||
|
changed = False
|
||||||
|
elif original is None:
|
||||||
|
msg = 'File created'
|
||||||
|
changed = True
|
||||||
|
elif not blocklines:
|
||||||
|
msg = 'Block removed'
|
||||||
|
changed = True
|
||||||
|
else:
|
||||||
|
msg = 'Block inserted'
|
||||||
|
changed = True
|
||||||
|
|
||||||
|
if changed and not module.check_mode:
|
||||||
|
if module.boolean(params['backup']) and os.path.exists(dest):
|
||||||
|
module.backup_local(dest)
|
||||||
|
write_changes(module, result, dest)
|
||||||
|
|
||||||
|
msg, changed = check_file_attrs(module, changed, msg)
|
||||||
|
module.exit_json(changed=changed, msg=msg)
|
||||||
|
|
||||||
|
# import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.splitter import *
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -131,9 +131,9 @@ def main():
|
||||||
module.params['login_host'],
|
module.params['login_host'],
|
||||||
module.params['login_port'],
|
module.params['login_port'],
|
||||||
urllib.quote(module.params['vhost'],''),
|
urllib.quote(module.params['vhost'],''),
|
||||||
module.params['name'],
|
urllib.quote(module.params['name'],''),
|
||||||
dest_type,
|
dest_type,
|
||||||
module.params['destination'],
|
urllib.quote(module.params['destination'],''),
|
||||||
urllib.quote(module.params['routing_key'],'')
|
urllib.quote(module.params['routing_key'],'')
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -173,9 +173,9 @@ def main():
|
||||||
module.params['login_host'],
|
module.params['login_host'],
|
||||||
module.params['login_port'],
|
module.params['login_port'],
|
||||||
urllib.quote(module.params['vhost'],''),
|
urllib.quote(module.params['vhost'],''),
|
||||||
module.params['name'],
|
urllib.quote(module.params['name'],''),
|
||||||
dest_type,
|
dest_type,
|
||||||
module.params['destination']
|
urllib.quote(module.params['destination'],'')
|
||||||
)
|
)
|
||||||
|
|
||||||
r = requests.post(
|
r = requests.post(
|
||||||
|
|
|
@ -133,7 +133,7 @@ def main():
|
||||||
module.params['login_host'],
|
module.params['login_host'],
|
||||||
module.params['login_port'],
|
module.params['login_port'],
|
||||||
urllib.quote(module.params['vhost'],''),
|
urllib.quote(module.params['vhost'],''),
|
||||||
module.params['name']
|
urllib.quote(module.params['name'],'')
|
||||||
)
|
)
|
||||||
|
|
||||||
# Check if exchange already exists
|
# Check if exchange already exists
|
||||||
|
|
|
@ -54,7 +54,7 @@ options:
|
||||||
default: null
|
default: null
|
||||||
choices: ['metric alert', 'service check']
|
choices: ['metric alert', 'service check']
|
||||||
query:
|
query:
|
||||||
description: ["he monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
|
description: ["The monitor query to notify on with syntax varying depending on what type of monitor you are creating."]
|
||||||
required: false
|
required: false
|
||||||
default: null
|
default: null
|
||||||
name:
|
name:
|
||||||
|
|
|
@ -266,7 +266,7 @@ def main():
|
||||||
module.fail_json(msg='no command passed for command action')
|
module.fail_json(msg='no command passed for command action')
|
||||||
##################################################################
|
##################################################################
|
||||||
if not cmdfile:
|
if not cmdfile:
|
||||||
module.fail_json('unable to locate nagios.cfg')
|
module.fail_json(msg='unable to locate nagios.cfg')
|
||||||
|
|
||||||
##################################################################
|
##################################################################
|
||||||
ansible_nagios = Nagios(module, **module.params)
|
ansible_nagios = Nagios(module, **module.params)
|
||||||
|
|
|
@ -91,6 +91,13 @@ options:
|
||||||
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
|
- 'https://www.zabbix.com/documentation/2.0/manual/appendix/api/hostinterface/definitions#host_interface'
|
||||||
required: false
|
required: false
|
||||||
default: []
|
default: []
|
||||||
|
force:
|
||||||
|
description:
|
||||||
|
- Overwrite the host configuration, even if already present
|
||||||
|
required: false
|
||||||
|
default: "yes"
|
||||||
|
choices: [ "yes", "no" ]
|
||||||
|
version_added: "2.0"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
|
@ -370,6 +377,7 @@ def main():
|
||||||
state=dict(default="present", choices=['present', 'absent']),
|
state=dict(default="present", choices=['present', 'absent']),
|
||||||
timeout=dict(type='int', default=10),
|
timeout=dict(type='int', default=10),
|
||||||
interfaces=dict(required=False),
|
interfaces=dict(required=False),
|
||||||
|
force=dict(default=True, type='bool'),
|
||||||
proxy=dict(required=False)
|
proxy=dict(required=False)
|
||||||
),
|
),
|
||||||
supports_check_mode=True
|
supports_check_mode=True
|
||||||
|
@ -388,6 +396,7 @@ def main():
|
||||||
state = module.params['state']
|
state = module.params['state']
|
||||||
timeout = module.params['timeout']
|
timeout = module.params['timeout']
|
||||||
interfaces = module.params['interfaces']
|
interfaces = module.params['interfaces']
|
||||||
|
force = module.params['force']
|
||||||
proxy = module.params['proxy']
|
proxy = module.params['proxy']
|
||||||
|
|
||||||
# convert enabled to 0; disabled to 1
|
# convert enabled to 0; disabled to 1
|
||||||
|
@ -439,6 +448,9 @@ def main():
|
||||||
if not group_ids:
|
if not group_ids:
|
||||||
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
|
module.fail_json(msg="Specify at least one group for updating host '%s'." % host_name)
|
||||||
|
|
||||||
|
if not force:
|
||||||
|
module.fail_json(changed=False, result="Host present, Can't update configuration without force")
|
||||||
|
|
||||||
# get exist host's interfaces
|
# get exist host's interfaces
|
||||||
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
|
exist_interfaces = host._zapi.hostinterface.get({'output': 'extend', 'hostids': host_id})
|
||||||
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
|
exist_interfaces_copy = copy.deepcopy(exist_interfaces)
|
||||||
|
|
|
@ -140,7 +140,7 @@ class OVSPort(object):
|
||||||
|
|
||||||
def set(self, set_opt):
|
def set(self, set_opt):
|
||||||
""" Set attributes on a port. """
|
""" Set attributes on a port. """
|
||||||
self.module("set called %s" % set_opt)
|
self.module.log("set called %s" % set_opt)
|
||||||
if (not set_opt):
|
if (not set_opt):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
|
@ -56,9 +56,11 @@ options:
|
||||||
color:
|
color:
|
||||||
description:
|
description:
|
||||||
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
|
- Text color for the message. ("none" is a valid option in 1.6 or later, in 1.6 and prior, the default color is black, not "none").
|
||||||
|
Added 11 more colors in version 2.0.
|
||||||
required: false
|
required: false
|
||||||
default: "none"
|
default: "none"
|
||||||
choices: [ "none", "yellow", "red", "green", "blue", "black" ]
|
choices: [ "none", "white", "black", "blue", "green", "red", "brown", "purple", "orange", "yellow", "light_green", "teal", "light_cyan",
|
||||||
|
"light_blue", "pink", "gray", "light_gray"]
|
||||||
channel:
|
channel:
|
||||||
description:
|
description:
|
||||||
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
|
- Channel name. One of nick_to or channel needs to be set. When both are set, the message will be sent to both of them.
|
||||||
|
@ -95,6 +97,13 @@ options:
|
||||||
Useful for when using a faux bot and not wanting join/parts between messages.
|
Useful for when using a faux bot and not wanting join/parts between messages.
|
||||||
default: True
|
default: True
|
||||||
version_added: "2.0"
|
version_added: "2.0"
|
||||||
|
style:
|
||||||
|
description:
|
||||||
|
- Text style for the message. Note italic does not work on some clients
|
||||||
|
default: None
|
||||||
|
required: False
|
||||||
|
choices: [ "bold", "underline", "reverse", "italic" ]
|
||||||
|
version_added: "2.0"
|
||||||
|
|
||||||
# informational: requirements for nodes
|
# informational: requirements for nodes
|
||||||
requirements: [ socket ]
|
requirements: [ socket ]
|
||||||
|
@ -134,24 +143,47 @@ from time import sleep
|
||||||
|
|
||||||
|
|
||||||
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None,
|
def send_msg(msg, server='localhost', port='6667', channel=None, nick_to=[], key=None, topic=None,
|
||||||
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True):
|
nick="ansible", color='none', passwd=False, timeout=30, use_ssl=False, part=True, style=None):
|
||||||
'''send message to IRC'''
|
'''send message to IRC'''
|
||||||
|
|
||||||
colornumbers = {
|
colornumbers = {
|
||||||
|
'white': "00",
|
||||||
'black': "01",
|
'black': "01",
|
||||||
|
'blue': "02",
|
||||||
|
'green': "03",
|
||||||
'red': "04",
|
'red': "04",
|
||||||
'green': "09",
|
'brown': "05",
|
||||||
|
'purple': "06",
|
||||||
|
'orange': "07",
|
||||||
'yellow': "08",
|
'yellow': "08",
|
||||||
'blue': "12",
|
'light_green': "09",
|
||||||
|
'teal': "10",
|
||||||
|
'light_cyan': "11",
|
||||||
|
'light_blue': "12",
|
||||||
|
'pink': "13",
|
||||||
|
'gray': "14",
|
||||||
|
'light_gray': "15",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
stylechoices = {
|
||||||
|
'bold': "\x02",
|
||||||
|
'underline': "\x1F",
|
||||||
|
'reverse': "\x16",
|
||||||
|
'italic': "\x1D",
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
styletext = stylechoices[style]
|
||||||
|
except:
|
||||||
|
styletext = ""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
colornumber = colornumbers[color]
|
colornumber = colornumbers[color]
|
||||||
colortext = "\x03" + colornumber
|
colortext = "\x03" + colornumber
|
||||||
except:
|
except:
|
||||||
colortext = ""
|
colortext = ""
|
||||||
|
|
||||||
message = colortext + msg
|
message = styletext + colortext + msg
|
||||||
|
|
||||||
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
irc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||||
if use_ssl:
|
if use_ssl:
|
||||||
|
@ -219,8 +251,13 @@ def main():
|
||||||
nick=dict(default='ansible'),
|
nick=dict(default='ansible'),
|
||||||
nick_to=dict(required=False, type='list'),
|
nick_to=dict(required=False, type='list'),
|
||||||
msg=dict(required=True),
|
msg=dict(required=True),
|
||||||
color=dict(default="none", choices=["yellow", "red", "green",
|
color=dict(default="none", aliases=['colour'], choices=["white", "black", "blue",
|
||||||
"blue", "black", "none"]),
|
"green", "red", "brown",
|
||||||
|
"purple", "orange", "yellow",
|
||||||
|
"light_green", "teal", "light_cyan",
|
||||||
|
"light_blue", "pink", "gray",
|
||||||
|
"light_gray", "none"]),
|
||||||
|
style=dict(default="none", choices=["underline", "reverse", "bold", "italic", "none"]),
|
||||||
channel=dict(required=False),
|
channel=dict(required=False),
|
||||||
key=dict(),
|
key=dict(),
|
||||||
topic=dict(),
|
topic=dict(),
|
||||||
|
@ -248,9 +285,10 @@ def main():
|
||||||
timeout = module.params["timeout"]
|
timeout = module.params["timeout"]
|
||||||
use_ssl = module.params["use_ssl"]
|
use_ssl = module.params["use_ssl"]
|
||||||
part = module.params["part"]
|
part = module.params["part"]
|
||||||
|
style = module.params["style"]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part)
|
send_msg(msg, server, port, channel, nick_to, key, topic, nick, color, passwd, timeout, use_ssl, part, style)
|
||||||
except Exception, e:
|
except Exception, e:
|
||||||
module.fail_json(msg="unable to send to IRC: %s" % e)
|
module.fail_json(msg="unable to send to IRC: %s" % e)
|
||||||
|
|
||||||
|
|
|
@ -24,33 +24,34 @@ version_added: "2.0"
|
||||||
module: sendgrid
|
module: sendgrid
|
||||||
short_description: Sends an email with the SendGrid API
|
short_description: Sends an email with the SendGrid API
|
||||||
description:
|
description:
|
||||||
- Sends an email with a SendGrid account through their API, not through
|
- "Sends an email with a SendGrid account through their API, not through
|
||||||
the SMTP service.
|
the SMTP service."
|
||||||
notes:
|
notes:
|
||||||
- This module is non-idempotent because it sends an email through the
|
- "This module is non-idempotent because it sends an email through the
|
||||||
external API. It is idempotent only in the case that the module fails.
|
external API. It is idempotent only in the case that the module fails."
|
||||||
- Like the other notification modules, this one requires an external
|
- "Like the other notification modules, this one requires an external
|
||||||
dependency to work. In this case, you'll need an active SendGrid
|
dependency to work. In this case, you'll need an active SendGrid
|
||||||
account.
|
account."
|
||||||
options:
|
options:
|
||||||
username:
|
username:
|
||||||
description:
|
description:
|
||||||
username for logging into the SendGrid account
|
- username for logging into the SendGrid account
|
||||||
required: true
|
required: true
|
||||||
password:
|
password:
|
||||||
description: password that corresponds to the username
|
description:
|
||||||
|
- password that corresponds to the username
|
||||||
required: true
|
required: true
|
||||||
from_address:
|
from_address:
|
||||||
description:
|
description:
|
||||||
the address in the "from" field for the email
|
- the address in the "from" field for the email
|
||||||
required: true
|
required: true
|
||||||
to_addresses:
|
to_addresses:
|
||||||
description:
|
description:
|
||||||
a list with one or more recipient email addresses
|
- a list with one or more recipient email addresses
|
||||||
required: true
|
required: true
|
||||||
subject:
|
subject:
|
||||||
description:
|
description:
|
||||||
the desired subject for the email
|
- the desired subject for the email
|
||||||
required: true
|
required: true
|
||||||
|
|
||||||
author: "Matt Makai (@makaimc)"
|
author: "Matt Makai (@makaimc)"
|
||||||
|
|
|
@ -250,7 +250,7 @@ def main():
|
||||||
outdated = npm.list_outdated()
|
outdated = npm.list_outdated()
|
||||||
if len(missing) or len(outdated):
|
if len(missing) or len(outdated):
|
||||||
changed = True
|
changed = True
|
||||||
npm.install()
|
npm.update()
|
||||||
else: #absent
|
else: #absent
|
||||||
installed, missing = npm.list()
|
installed, missing = npm.list()
|
||||||
if name in installed:
|
if name in installed:
|
||||||
|
|
|
@ -37,6 +37,11 @@ options:
|
||||||
- name of package to install/remove
|
- name of package to install/remove
|
||||||
required: false
|
required: false
|
||||||
default: None
|
default: None
|
||||||
|
path:
|
||||||
|
description:
|
||||||
|
- "':' separated list of paths to search for 'brew' executable. Since A package (I(formula) in homebrew parlance) location is prefixed relative to the actual path of I(brew) command, providing an alternative I(brew) path enables managing different set of packages in an alternative location in the system."
|
||||||
|
required: false
|
||||||
|
default: '/usr/local/bin'
|
||||||
state:
|
state:
|
||||||
description:
|
description:
|
||||||
- state of the package
|
- state of the package
|
||||||
|
@ -64,10 +69,22 @@ options:
|
||||||
notes: []
|
notes: []
|
||||||
'''
|
'''
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
|
# Install formula foo with 'brew' in default path (C(/usr/local/bin))
|
||||||
- homebrew: name=foo state=present
|
- homebrew: name=foo state=present
|
||||||
|
|
||||||
|
# Install formula foo with 'brew' in alternate path C(/my/other/location/bin)
|
||||||
|
- homebrew: name=foo path=/my/other/location/bin state=present
|
||||||
|
|
||||||
|
# Update homebrew first and install formula foo with 'brew' in default path
|
||||||
- homebrew: name=foo state=present update_homebrew=yes
|
- homebrew: name=foo state=present update_homebrew=yes
|
||||||
|
|
||||||
|
# Update homebrew first and upgrade formula foo to latest available with 'brew' in default path
|
||||||
- homebrew: name=foo state=latest update_homebrew=yes
|
- homebrew: name=foo state=latest update_homebrew=yes
|
||||||
|
|
||||||
|
# Update homebrew and upgrade all packages
|
||||||
- homebrew: update_homebrew=yes upgrade_all=yes
|
- homebrew: update_homebrew=yes upgrade_all=yes
|
||||||
|
|
||||||
|
# Miscellaneous other examples
|
||||||
- homebrew: name=foo state=head
|
- homebrew: name=foo state=head
|
||||||
- homebrew: name=foo state=linked
|
- homebrew: name=foo state=linked
|
||||||
- homebrew: name=foo state=absent
|
- homebrew: name=foo state=absent
|
||||||
|
@ -303,7 +320,7 @@ class Homebrew(object):
|
||||||
return package
|
return package
|
||||||
# /class properties -------------------------------------------- }}}
|
# /class properties -------------------------------------------- }}}
|
||||||
|
|
||||||
def __init__(self, module, path=None, packages=None, state=None,
|
def __init__(self, module, path, packages=None, state=None,
|
||||||
update_homebrew=False, upgrade_all=False,
|
update_homebrew=False, upgrade_all=False,
|
||||||
install_options=None):
|
install_options=None):
|
||||||
if not install_options:
|
if not install_options:
|
||||||
|
@ -329,13 +346,8 @@ class Homebrew(object):
|
||||||
setattr(self, key, val)
|
setattr(self, key, val)
|
||||||
|
|
||||||
def _prep(self):
|
def _prep(self):
|
||||||
self._prep_path()
|
|
||||||
self._prep_brew_path()
|
self._prep_brew_path()
|
||||||
|
|
||||||
def _prep_path(self):
|
|
||||||
if not self.path:
|
|
||||||
self.path = ['/usr/local/bin']
|
|
||||||
|
|
||||||
def _prep_brew_path(self):
|
def _prep_brew_path(self):
|
||||||
if not self.module:
|
if not self.module:
|
||||||
self.brew_path = None
|
self.brew_path = None
|
||||||
|
@ -770,7 +782,10 @@ def main():
|
||||||
required=False,
|
required=False,
|
||||||
type='list',
|
type='list',
|
||||||
),
|
),
|
||||||
path=dict(required=False),
|
path=dict(
|
||||||
|
default="/usr/local/bin",
|
||||||
|
required=False,
|
||||||
|
),
|
||||||
state=dict(
|
state=dict(
|
||||||
default="present",
|
default="present",
|
||||||
choices=[
|
choices=[
|
||||||
|
@ -808,8 +823,6 @@ def main():
|
||||||
path = p['path']
|
path = p['path']
|
||||||
if path:
|
if path:
|
||||||
path = path.split(':')
|
path = path.split(':')
|
||||||
else:
|
|
||||||
path = ['/usr/local/bin']
|
|
||||||
|
|
||||||
state = p['state']
|
state = p['state']
|
||||||
if state in ('present', 'installed'):
|
if state in ('present', 'installed'):
|
||||||
|
|
561
packaging/os/yumrepo.py
Normal file
561
packaging/os/yumrepo.py
Normal file
|
@ -0,0 +1,561 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
# encoding: utf-8
|
||||||
|
|
||||||
|
# (c) 2015, Jiri Tyr <jiri.tyr@gmail.com>
|
||||||
|
#
|
||||||
|
# This file is part of Ansible
|
||||||
|
#
|
||||||
|
# Ansible is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# Ansible is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
|
||||||
|
import ConfigParser
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: yumrepo
|
||||||
|
author: Jiri Tyr (@jtyr)
|
||||||
|
version_added: '2.1'
|
||||||
|
short_description: Add and remove YUM repositories
|
||||||
|
description:
|
||||||
|
- Add or remove YUM repositories in RPM-based Linux distributions.
|
||||||
|
|
||||||
|
options:
|
||||||
|
bandwidth:
|
||||||
|
required: false
|
||||||
|
default: 0
|
||||||
|
description:
|
||||||
|
- Maximum available network bandwidth in bytes/second. Used with the
|
||||||
|
I(throttle) option.
|
||||||
|
- If I(throttle) is a percentage and bandwidth is C(0) then bandwidth
|
||||||
|
throttling will be disabled. If I(throttle) is expressed as a data rate
|
||||||
|
(bytes/sec) then this option is ignored. Default is C(0) (no bandwidth
|
||||||
|
throttling).
|
||||||
|
baseurl:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- URL to the directory where the yum repository's 'repodata' directory
|
||||||
|
lives.
|
||||||
|
- This or the I(mirrorlist) parameter is required.
|
||||||
|
cost:
|
||||||
|
required: false
|
||||||
|
default: 1000
|
||||||
|
description:
|
||||||
|
- Relative cost of accessing this repository. Useful for weighing one
|
||||||
|
repo's packages as greater/less than any other.
|
||||||
|
description:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- A human readable string describing the repository.
|
||||||
|
enabled:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'yes'
|
||||||
|
description:
|
||||||
|
- This tells yum whether or not use this repository.
|
||||||
|
enablegroups:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'yes'
|
||||||
|
description:
|
||||||
|
- Determines whether yum will allow the use of package groups for this
|
||||||
|
repository.
|
||||||
|
exclude:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- List of packages to exclude from updates or installs. This should be a
|
||||||
|
space separated list. Shell globs using wildcards (eg. C(*) and C(?))
|
||||||
|
are allowed.
|
||||||
|
- The list can also be a regular YAML array.
|
||||||
|
failovermethod:
|
||||||
|
required: false
|
||||||
|
choices: [roundrobin, priority]
|
||||||
|
default: roundrobin
|
||||||
|
description:
|
||||||
|
- C(roundrobin) randomly selects a URL out of the list of URLs to start
|
||||||
|
with and proceeds through each of them as it encounters a failure
|
||||||
|
contacting the host.
|
||||||
|
- C(priority) starts from the first baseurl listed and reads through them
|
||||||
|
sequentially.
|
||||||
|
file:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- File to use to save the repo in. Defaults to the value of I(name).
|
||||||
|
gpgcakey:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- A URL pointing to the ASCII-armored CA key file for the repository.
|
||||||
|
gpgcheck:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- Tells yum whether or not it should perform a GPG signature check on
|
||||||
|
packages.
|
||||||
|
gpgkey:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- A URL pointing to the ASCII-armored GPG key file for the repository.
|
||||||
|
http_caching:
|
||||||
|
required: false
|
||||||
|
choices: [all, packages, none]
|
||||||
|
default: all
|
||||||
|
description:
|
||||||
|
- Determines how upstream HTTP caches are instructed to handle any HTTP
|
||||||
|
downloads that Yum does.
|
||||||
|
- C(all) means that all HTTP downloads should be cached.
|
||||||
|
- C(packages) means that only RPM package downloads should be cached (but
|
||||||
|
not repository metadata downloads).
|
||||||
|
- C(none) means that no HTTP downloads should be cached.
|
||||||
|
includepkgs:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- List of packages you want to only use from a repository. This should be
|
||||||
|
a space separated list. Shell globs using wildcards (eg. C(*) and C(?))
|
||||||
|
are allowed. Substitution variables (e.g. C($releasever)) are honored
|
||||||
|
here.
|
||||||
|
- The list can also be a regular YAML array.
|
||||||
|
keepalive:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- This tells yum whether or not HTTP/1.1 keepalive should be used with
|
||||||
|
this repository. This can improve transfer speeds by using one
|
||||||
|
connection when downloading multiple files from a repository.
|
||||||
|
metadata_expire:
|
||||||
|
required: false
|
||||||
|
default: 21600
|
||||||
|
description:
|
||||||
|
- Time (in seconds) after which the metadata will expire.
|
||||||
|
- Default value is 6 hours.
|
||||||
|
metalink:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Specifies a URL to a metalink file for the repomd.xml, a list of
|
||||||
|
mirrors for the entire repository are generated by converting the
|
||||||
|
mirrors for the repomd.xml file to a baseurl.
|
||||||
|
mirrorlist:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Specifies a URL to a file containing a list of baseurls.
|
||||||
|
- This or the I(baseurl) parameter is required.
|
||||||
|
mirrorlist_expire:
|
||||||
|
required: false
|
||||||
|
default: 21600
|
||||||
|
description:
|
||||||
|
- Time (in seconds) after which the mirrorlist locally cached will
|
||||||
|
expire.
|
||||||
|
- Default value is 6 hours.
|
||||||
|
name:
|
||||||
|
required: true
|
||||||
|
description:
|
||||||
|
- Unique repository ID.
|
||||||
|
password:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Password to use with the username for basic authentication.
|
||||||
|
protect:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- Protect packages from updates from other repositories.
|
||||||
|
proxy:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- URL to the proxy server that yum should use.
|
||||||
|
proxy_password:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Username to use for proxy.
|
||||||
|
proxy_username:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Password for this proxy.
|
||||||
|
repo_gpgcheck:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- This tells yum whether or not it should perform a GPG signature check
|
||||||
|
on the repodata from this repository.
|
||||||
|
reposdir:
|
||||||
|
required: false
|
||||||
|
default: /etc/yum.repos.d
|
||||||
|
description:
|
||||||
|
- Directory where the C(.repo) files will be stored.
|
||||||
|
retries:
|
||||||
|
required: false
|
||||||
|
default: 10
|
||||||
|
description:
|
||||||
|
- Set the number of times any attempt to retrieve a file should retry
|
||||||
|
before returning an error. Setting this to C(0) makes yum try forever.
|
||||||
|
skip_if_unavailable:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- If set to C(yes) yum will continue running if this repository cannot be
|
||||||
|
contacted for any reason. This should be set carefully as all repos are
|
||||||
|
consulted for any given command.
|
||||||
|
sslcacert:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Path to the directory containing the databases of the certificate
|
||||||
|
authorities yum should use to verify SSL certificates.
|
||||||
|
ssl_check_cert_permissions:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'no'
|
||||||
|
description:
|
||||||
|
- Whether yum should check the permissions on the paths for the
|
||||||
|
certificates on the repository (both remote and local).
|
||||||
|
- If we can't read any of the files then yum will force
|
||||||
|
I(skip_if_unavailable) to be true. This is most useful for non-root
|
||||||
|
processes which use yum on repos that have client cert files which are
|
||||||
|
readable only by root.
|
||||||
|
sslclientcert:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Path to the SSL client certificate yum should use to connect to
|
||||||
|
repos/remote sites.
|
||||||
|
sslclientkey:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Path to the SSL client key yum should use to connect to repos/remote
|
||||||
|
sites.
|
||||||
|
sslverify:
|
||||||
|
required: false
|
||||||
|
choices: ['yes', 'no']
|
||||||
|
default: 'yes'
|
||||||
|
description:
|
||||||
|
- Defines whether yum should verify SSL certificates/hosts at all.
|
||||||
|
state:
|
||||||
|
required: false
|
||||||
|
choices: [absent, present]
|
||||||
|
default: present
|
||||||
|
description:
|
||||||
|
- A source string state.
|
||||||
|
throttle:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Enable bandwidth throttling for downloads.
|
||||||
|
- This option can be expressed as a absolute data rate in bytes/sec. An
|
||||||
|
SI prefix (k, M or G) may be appended to the bandwidth value.
|
||||||
|
timeout:
|
||||||
|
required: false
|
||||||
|
default: 30
|
||||||
|
description:
|
||||||
|
- Number of seconds to wait for a connection before timing out.
|
||||||
|
username:
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
description:
|
||||||
|
- Username to use for basic authentication to a repo or really any url.
|
||||||
|
|
||||||
|
extends_documentation_fragment:
|
||||||
|
- files
|
||||||
|
|
||||||
|
notes:
|
||||||
|
- All comments will be removed if modifying an existing repo file.
|
||||||
|
- Section order is preserved in an existing repo file.
|
||||||
|
- Parameters in a section are ordered alphabetically in an existing repo
|
||||||
|
file.
|
||||||
|
- The repo file will be automatically deleted if it contains no repository.
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
- name: Add repository
|
||||||
|
yumrepo:
|
||||||
|
name: epel
|
||||||
|
description: EPEL YUM repo
|
||||||
|
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
|
||||||
|
|
||||||
|
- name: Add multiple repositories into the same file (1/2)
|
||||||
|
yumrepo:
|
||||||
|
name: epel
|
||||||
|
description: EPEL YUM repo
|
||||||
|
file: external_repos
|
||||||
|
baseurl: http://download.fedoraproject.org/pub/epel/$releasever/$basearch/
|
||||||
|
gpgcheck: no
|
||||||
|
- name: Add multiple repositories into the same file (2/2)
|
||||||
|
yumrepo:
|
||||||
|
name: rpmforge
|
||||||
|
description: RPMforge YUM repo
|
||||||
|
file: external_repos
|
||||||
|
baseurl: http://apt.sw.be/redhat/el7/en/$basearch/rpmforge
|
||||||
|
mirrorlist: http://mirrorlist.repoforge.org/el7/mirrors-rpmforge
|
||||||
|
enabled: no
|
||||||
|
|
||||||
|
- name: Remove repository
|
||||||
|
yumrepo:
|
||||||
|
name: epel
|
||||||
|
state: absent
|
||||||
|
|
||||||
|
- name: Remove repository from a specific repo file
|
||||||
|
yumrepo:
|
||||||
|
name: epel
|
||||||
|
file: external_repos
|
||||||
|
state: absent
|
||||||
|
'''
|
||||||
|
|
||||||
|
RETURN = '''
|
||||||
|
repo:
|
||||||
|
description: repository name
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: "epel"
|
||||||
|
state:
|
||||||
|
description: state of the target, after execution
|
||||||
|
returned: success
|
||||||
|
type: string
|
||||||
|
sample: "present"
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
class YumRepo(object):
|
||||||
|
# Class global variables
|
||||||
|
module = None
|
||||||
|
params = None
|
||||||
|
section = None
|
||||||
|
repofile = ConfigParser.RawConfigParser()
|
||||||
|
|
||||||
|
# List of parameters which will be allowed in the repo file output
|
||||||
|
allowed_params = [
|
||||||
|
'bandwidth', 'baseurl', 'cost', 'enabled', 'enablegroups', 'exclude',
|
||||||
|
'failovermethod', 'gpgcakey', 'gpgcheck', 'gpgkey', 'http_caching',
|
||||||
|
'includepkgs', 'keepalive', 'metadata_expire', 'metalink',
|
||||||
|
'mirrorlist', 'mirrorlist_expire', 'name', 'password', 'protect',
|
||||||
|
'proxy', 'proxy_password', 'proxy_username', 'repo_gpgcheck',
|
||||||
|
'retries', 'skip_if_unavailable', 'sslcacert',
|
||||||
|
'ssl_check_cert_permissions', 'sslclientcert', 'sslclientkey',
|
||||||
|
'sslverify', 'throttle', 'timeout', 'username']
|
||||||
|
|
||||||
|
# List of parameters which can be a list
|
||||||
|
list_params = ['exclude', 'includepkgs']
|
||||||
|
|
||||||
|
def __init__(self, module):
|
||||||
|
# To be able to use fail_json
|
||||||
|
self.module = module
|
||||||
|
# Shortcut for the params
|
||||||
|
self.params = self.module.params
|
||||||
|
# Section is always the repoid
|
||||||
|
self.section = self.params['repoid']
|
||||||
|
|
||||||
|
# Check if repo directory exists
|
||||||
|
repos_dir = self.params['reposdir']
|
||||||
|
if not os.path.isdir(repos_dir):
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Repo directory "%s" does not exist.' % repos_dir)
|
||||||
|
|
||||||
|
# Get the given or the default repo file name
|
||||||
|
repo_file = self.params['repoid']
|
||||||
|
if self.params['file'] is not None:
|
||||||
|
repo_file = self.params['file']
|
||||||
|
|
||||||
|
# Set dest; also used to set dest parameter for the FS attributes
|
||||||
|
self.params['dest'] = os.path.join(repos_dir, "%s.repo" % repo_file)
|
||||||
|
|
||||||
|
# Read the repo file if it exists
|
||||||
|
if os.path.isfile(self.params['dest']):
|
||||||
|
self.repofile.read(self.params['dest'])
|
||||||
|
|
||||||
|
def add(self):
|
||||||
|
# Remove already existing repo and create a new one
|
||||||
|
if self.repofile.has_section(self.section):
|
||||||
|
self.repofile.remove_section(self.section)
|
||||||
|
|
||||||
|
# Add section
|
||||||
|
self.repofile.add_section(self.section)
|
||||||
|
|
||||||
|
# Baseurl/mirrorlist is not required because for removal we need only
|
||||||
|
# the repo name. This is why we check if the baseurl/mirrorlist is
|
||||||
|
# defined.
|
||||||
|
if (self.params['baseurl'], self.params['mirrorlist']) == (None, None):
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Paramater "baseurl" or "mirrorlist" is required for '
|
||||||
|
'adding a new repo.')
|
||||||
|
|
||||||
|
# Set options
|
||||||
|
for key, value in sorted(self.params.items()):
|
||||||
|
if key in self.list_params and isinstance(value, list):
|
||||||
|
# Join items into one string for specific parameters
|
||||||
|
value = ' '.join(value)
|
||||||
|
elif isinstance(value, bool):
|
||||||
|
# Convert boolean value to integer
|
||||||
|
value = int(value)
|
||||||
|
|
||||||
|
# Set the value only if it was defined (default is None)
|
||||||
|
if value is not None and key in self.allowed_params:
|
||||||
|
self.repofile.set(self.section, key, value)
|
||||||
|
|
||||||
|
def save(self):
|
||||||
|
if len(self.repofile.sections()):
|
||||||
|
# Write data into the file
|
||||||
|
try:
|
||||||
|
fd = open(self.params['dest'], 'wb')
|
||||||
|
except IOError:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Cannot open repo file %s.' %
|
||||||
|
self.params['dest'])
|
||||||
|
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
self.repofile.write(fd)
|
||||||
|
except Error:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Cannot write repo file %s.' %
|
||||||
|
self.params['dest'])
|
||||||
|
finally:
|
||||||
|
fd.close()
|
||||||
|
else:
|
||||||
|
# Remove the file if there are not repos
|
||||||
|
try:
|
||||||
|
os.remove(self.params['dest'])
|
||||||
|
except OSError:
|
||||||
|
self.module.fail_json(
|
||||||
|
msg='Cannot remove empty repo file %s.' %
|
||||||
|
self.params['dest'])
|
||||||
|
|
||||||
|
def remove(self):
|
||||||
|
# Remove section if exists
|
||||||
|
if self.repofile.has_section(self.section):
|
||||||
|
self.repofile.remove_section(self.section)
|
||||||
|
|
||||||
|
def dump(self):
|
||||||
|
repo_string = ""
|
||||||
|
|
||||||
|
# Compose the repo file
|
||||||
|
for section in sorted(self.repofile.sections()):
|
||||||
|
repo_string += "[%s]\n" % section
|
||||||
|
|
||||||
|
for key, value in sorted(self.repofile.items(section)):
|
||||||
|
repo_string += "%s = %s\n" % (key, value)
|
||||||
|
|
||||||
|
repo_string += "\n"
|
||||||
|
|
||||||
|
return repo_string
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
# Module settings
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=dict(
|
||||||
|
bandwidth=dict(),
|
||||||
|
baseurl=dict(),
|
||||||
|
cost=dict(),
|
||||||
|
description=dict(),
|
||||||
|
enabled=dict(type='bool'),
|
||||||
|
enablegroups=dict(type='bool'),
|
||||||
|
exclude=dict(),
|
||||||
|
failovermethod=dict(choices=['roundrobin', 'priority']),
|
||||||
|
file=dict(),
|
||||||
|
gpgcakey=dict(),
|
||||||
|
gpgcheck=dict(type='bool'),
|
||||||
|
gpgkey=dict(),
|
||||||
|
http_caching=dict(choices=['all', 'packages', 'none']),
|
||||||
|
includepkgs=dict(),
|
||||||
|
keepalive=dict(type='bool'),
|
||||||
|
metadata_expire=dict(),
|
||||||
|
metalink=dict(),
|
||||||
|
mirrorlist=dict(),
|
||||||
|
mirrorlist_expire=dict(),
|
||||||
|
name=dict(required=True),
|
||||||
|
password=dict(no_log=True),
|
||||||
|
protect=dict(type='bool'),
|
||||||
|
proxy=dict(),
|
||||||
|
proxy_password=dict(no_log=True),
|
||||||
|
proxy_username=dict(),
|
||||||
|
repo_gpgcheck=dict(type='bool'),
|
||||||
|
reposdir=dict(default='/etc/yum.repos.d'),
|
||||||
|
retries=dict(),
|
||||||
|
skip_if_unavailable=dict(type='bool'),
|
||||||
|
sslcacert=dict(),
|
||||||
|
ssl_check_cert_permissions=dict(type='bool'),
|
||||||
|
sslclientcert=dict(),
|
||||||
|
sslclientkey=dict(),
|
||||||
|
sslverify=dict(type='bool'),
|
||||||
|
state=dict(choices=['present', 'absent'], default='present'),
|
||||||
|
throttle=dict(),
|
||||||
|
timeout=dict(),
|
||||||
|
username=dict(),
|
||||||
|
),
|
||||||
|
add_file_common_args=True,
|
||||||
|
supports_check_mode=True,
|
||||||
|
)
|
||||||
|
|
||||||
|
name = module.params['name']
|
||||||
|
state = module.params['state']
|
||||||
|
|
||||||
|
# Rename "name" and "description" to ensure correct key sorting
|
||||||
|
module.params['repoid'] = module.params['name']
|
||||||
|
module.params['name'] = module.params['description']
|
||||||
|
del module.params['description']
|
||||||
|
|
||||||
|
# Instantiate the YumRepo object
|
||||||
|
yumrepo = YumRepo(module)
|
||||||
|
|
||||||
|
# Get repo status before change
|
||||||
|
yumrepo_before = yumrepo.dump()
|
||||||
|
|
||||||
|
# Perform action depending on the state
|
||||||
|
if state == 'present':
|
||||||
|
yumrepo.add()
|
||||||
|
elif state == 'absent':
|
||||||
|
yumrepo.remove()
|
||||||
|
|
||||||
|
# Get repo status after change
|
||||||
|
yumrepo_after = yumrepo.dump()
|
||||||
|
|
||||||
|
# Compare repo states
|
||||||
|
changed = yumrepo_before != yumrepo_after
|
||||||
|
|
||||||
|
# Save the file only if not in check mode and if there was a change
|
||||||
|
if not module.check_mode and changed:
|
||||||
|
yumrepo.save()
|
||||||
|
|
||||||
|
# Change file attributes if needed
|
||||||
|
if os.path.isfile(module.params['dest']):
|
||||||
|
file_args = module.load_file_common_arguments(module.params)
|
||||||
|
changed = module.set_fs_attributes_if_different(file_args, changed)
|
||||||
|
|
||||||
|
# Print status of the change
|
||||||
|
module.exit_json(changed=changed, repo=name, state=state)
|
||||||
|
|
||||||
|
|
||||||
|
# Import module snippets
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -161,7 +161,7 @@ def get_package_state(m, packages):
|
||||||
for stdoutline in stdout.splitlines():
|
for stdoutline in stdout.splitlines():
|
||||||
match = rpmoutput_re.match(stdoutline)
|
match = rpmoutput_re.match(stdoutline)
|
||||||
if match == None:
|
if match == None:
|
||||||
return None
|
continue
|
||||||
package = match.group(1)
|
package = match.group(1)
|
||||||
result = match.group(2)
|
result = match.group(2)
|
||||||
if result == 'is installed':
|
if result == 'is installed':
|
||||||
|
@ -169,18 +169,13 @@ def get_package_state(m, packages):
|
||||||
else:
|
else:
|
||||||
installed_state[package] = False
|
installed_state[package] = False
|
||||||
|
|
||||||
for package in packages:
|
|
||||||
if package not in installed_state:
|
|
||||||
print package + ' was not returned by rpm \n'
|
|
||||||
return None
|
|
||||||
|
|
||||||
return installed_state
|
return installed_state
|
||||||
|
|
||||||
# Function used to make sure a package is present.
|
# Function used to make sure a package is present.
|
||||||
def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
|
def package_present(m, name, installed_state, package_type, disable_gpg_check, disable_recommends, old_zypper):
|
||||||
packages = []
|
packages = []
|
||||||
for package in name:
|
for package in name:
|
||||||
if installed_state[package] is False:
|
if package not in installed_state or installed_state[package] is False:
|
||||||
packages.append(package)
|
packages.append(package)
|
||||||
if len(packages) != 0:
|
if len(packages) != 0:
|
||||||
cmd = ['/usr/bin/zypper', '--non-interactive']
|
cmd = ['/usr/bin/zypper', '--non-interactive']
|
||||||
|
@ -246,7 +241,7 @@ def package_latest(m, name, installed_state, package_type, disable_gpg_check, di
|
||||||
def package_absent(m, name, installed_state, package_type, old_zypper):
|
def package_absent(m, name, installed_state, package_type, old_zypper):
|
||||||
packages = []
|
packages = []
|
||||||
for package in name:
|
for package in name:
|
||||||
if installed_state[package] is True:
|
if package not in installed_state or installed_state[package] is True:
|
||||||
packages.append(package)
|
packages.append(package)
|
||||||
if len(packages) != 0:
|
if len(packages) != 0:
|
||||||
cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
|
cmd = ['/usr/bin/zypper', '--non-interactive', 'remove', '-t', package_type]
|
||||||
|
|
|
@ -75,6 +75,7 @@ options:
|
||||||
default: 0
|
default: 0
|
||||||
notes:
|
notes:
|
||||||
- Not tested on any Debian based system.
|
- Not tested on any Debian based system.
|
||||||
|
- Requires the python2 bindings of firewalld, who may not be installed by default if the distribution switched to python 3
|
||||||
requirements: [ 'firewalld >= 0.2.11' ]
|
requirements: [ 'firewalld >= 0.2.11' ]
|
||||||
author: "Adam Miller (@maxamillion)"
|
author: "Adam Miller (@maxamillion)"
|
||||||
'''
|
'''
|
||||||
|
@ -251,7 +252,7 @@ def main():
|
||||||
module.fail(msg='permanent is a required parameter')
|
module.fail(msg='permanent is a required parameter')
|
||||||
|
|
||||||
if not HAS_FIREWALLD:
|
if not HAS_FIREWALLD:
|
||||||
module.fail_json(msg='firewalld required for this module')
|
module.fail_json(msg='firewalld and its python 2 module are required for this module')
|
||||||
|
|
||||||
## Pre-run version checking
|
## Pre-run version checking
|
||||||
if FW_VERSION < "0.2.11":
|
if FW_VERSION < "0.2.11":
|
||||||
|
|
|
@ -208,6 +208,10 @@ options:
|
||||||
- "ctstate is a list of the connection states to match in the conntrack module.
|
- "ctstate is a list of the connection states to match in the conntrack module.
|
||||||
Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', 'UNTRACKED', 'SNAT', 'DNAT'"
|
Possible states are: 'INVALID', 'NEW', 'ESTABLISHED', 'RELATED', 'UNTRACKED', 'SNAT', 'DNAT'"
|
||||||
required: false
|
required: false
|
||||||
|
limit:
|
||||||
|
description:
|
||||||
|
- "Specifies the maximum average number of matches to allow per second. The number can specify units explicitly, using `/second', `/minute', `/hour' or `/day', or parts of them (so `5/second' is the same as `5/s')."
|
||||||
|
required: false
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
|
@ -242,7 +246,12 @@ def append_comm(rule, param):
|
||||||
def append_conntrack(rule, param):
|
def append_conntrack(rule, param):
|
||||||
if param:
|
if param:
|
||||||
rule.extend(['-m'])
|
rule.extend(['-m'])
|
||||||
rule.extend(['conntrack'])
|
rule.extend(['state'])
|
||||||
|
|
||||||
|
def append_limit(rule, param):
|
||||||
|
if param:
|
||||||
|
rule.extend(['-m'])
|
||||||
|
rule.extend(['limit'])
|
||||||
|
|
||||||
|
|
||||||
def construct_rule(params):
|
def construct_rule(params):
|
||||||
|
@ -262,8 +271,11 @@ def construct_rule(params):
|
||||||
append_param(rule, params['to_ports'], '--to-ports', False)
|
append_param(rule, params['to_ports'], '--to-ports', False)
|
||||||
append_comm(rule, params['comment'])
|
append_comm(rule, params['comment'])
|
||||||
append_param(rule, params['comment'], '--comment', False)
|
append_param(rule, params['comment'], '--comment', False)
|
||||||
|
if params['ctstate']:
|
||||||
append_conntrack(rule, params['ctstate'])
|
append_conntrack(rule, params['ctstate'])
|
||||||
append_param(rule, ','.join(params['ctstate']), '--ctstate', False)
|
append_param(rule, ','.join(params['ctstate']), '--state', False)
|
||||||
|
append_limit(rule, params['limit'])
|
||||||
|
append_param(rule, params['limit'], '--limit', False)
|
||||||
return rule
|
return rule
|
||||||
|
|
||||||
|
|
||||||
|
@ -313,7 +325,8 @@ def main():
|
||||||
destination_port=dict(required=False, default=None, type='str'),
|
destination_port=dict(required=False, default=None, type='str'),
|
||||||
to_ports=dict(required=False, default=None, type='str'),
|
to_ports=dict(required=False, default=None, type='str'),
|
||||||
comment=dict(required=False, default=None, type='str'),
|
comment=dict(required=False, default=None, type='str'),
|
||||||
ctstate=dict(required=False, default=None, type='list'),
|
ctstate=dict(required=False, default=[], type='list'),
|
||||||
|
limit=dict(required=False, default=None, type='str'),
|
||||||
),
|
),
|
||||||
)
|
)
|
||||||
args = dict(
|
args = dict(
|
||||||
|
|
|
@ -40,7 +40,7 @@ options:
|
||||||
description:
|
description:
|
||||||
- Limit type, see C(man limits) for an explanation
|
- Limit type, see C(man limits) for an explanation
|
||||||
required: true
|
required: true
|
||||||
choices: [ "hard", "soft" ]
|
choices: [ "hard", "soft", "-" ]
|
||||||
limit_item:
|
limit_item:
|
||||||
description:
|
description:
|
||||||
- The limit to be set
|
- The limit to be set
|
||||||
|
@ -78,14 +78,22 @@ options:
|
||||||
- Modify the limits.conf path.
|
- Modify the limits.conf path.
|
||||||
required: false
|
required: false
|
||||||
default: "/etc/security/limits.conf"
|
default: "/etc/security/limits.conf"
|
||||||
|
comment:
|
||||||
|
description:
|
||||||
|
- Comment associated with the limit.
|
||||||
|
required: false
|
||||||
|
default: ''
|
||||||
'''
|
'''
|
||||||
|
|
||||||
EXAMPLES = '''
|
EXAMPLES = '''
|
||||||
# Add or modify limits for the user joe
|
# Add or modify nofile soft limit for the user joe
|
||||||
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
|
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=64000
|
||||||
|
|
||||||
# Add or modify limits for the user joe. Keep or set the maximal value
|
# Add or modify fsize hard limit for the user smith. Keep or set the maximal value.
|
||||||
- pam_limits: domain=joe limit_type=soft limit_item=nofile value=1000000
|
- pam_limits: domain=smith limit_type=hard limit_item=fsize value=1000000 use_max=yes
|
||||||
|
|
||||||
|
# Add or modify memlock, both soft and hard, limit for the user james with a comment.
|
||||||
|
- pam_limits: domain=james limit_type=- limit_item=memlock value=unlimited comment="unlimited memory lock for james"
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
|
|
|
@ -15,7 +15,6 @@
|
||||||
# You should have received a copy of the GNU General Public License
|
# You should have received a copy of the GNU General Public License
|
||||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
import json
|
|
||||||
import os
|
import os
|
||||||
import pipes
|
import pipes
|
||||||
import stat
|
import stat
|
||||||
|
@ -197,7 +196,7 @@ def main():
|
||||||
error=True, stdout=stdout, stderr=stderr)
|
error=True, stdout=stdout, stderr=stderr)
|
||||||
elif rc == 2:
|
elif rc == 2:
|
||||||
# success with changes
|
# success with changes
|
||||||
module.exit_json(rc=0, changed=True)
|
module.exit_json(rc=0, changed=True, stdout=stdout, stderr=stderr)
|
||||||
elif rc == 124:
|
elif rc == 124:
|
||||||
# timeout
|
# timeout
|
||||||
module.exit_json(
|
module.exit_json(
|
||||||
|
|
|
@ -352,7 +352,7 @@ Function Nssm-Update-Credentials
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
$fullUser = $user
|
$fullUser = $user
|
||||||
If (-not($user -contains "@") -and ($user.Split("\").count -eq 1)) {
|
If (-Not($user.contains("@")) -And ($user.Split("\").count -eq 1)) {
|
||||||
$fullUser = ".\" + $user
|
$fullUser = ".\" + $user
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -31,8 +31,6 @@ $state = Get-Attr -obj $params -name "state" -validateSet "present","absent" -de
|
||||||
$registryData = Get-Attr -obj $params -name "data" -default $null
|
$registryData = Get-Attr -obj $params -name "data" -default $null
|
||||||
$registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string"
|
$registryDataType = Get-Attr -obj $params -name "datatype" -validateSet "binary","dword","expandstring","multistring","string","qword" -default "string"
|
||||||
|
|
||||||
$registryKey = "Registry::" + $registryKey
|
|
||||||
|
|
||||||
If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null)
|
If ($state -eq "present" -and $registryData -eq $null -and $registryValue -ne $null)
|
||||||
{
|
{
|
||||||
Fail-Json $result "missing required argument: data"
|
Fail-Json $result "missing required argument: data"
|
||||||
|
|
|
@ -337,7 +337,7 @@ Function RunAsScheduledJob {
|
||||||
$sw = [System.Diagnostics.Stopwatch]::StartNew()
|
$sw = [System.Diagnostics.Stopwatch]::StartNew()
|
||||||
|
|
||||||
# NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available)
|
# NB: output from scheduled jobs is delayed after completion (including the sub-objects after the primary Output object is available)
|
||||||
While (($job.Output -eq $null -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) {
|
While (($job.Output -eq $null -or -not ($job.Output | Get-Member -Name Keys) -or -not $job.Output.Keys.Contains('job_output')) -and $sw.ElapsedMilliseconds -lt 15000) {
|
||||||
Write-DebugLog "Waiting for job output to populate..."
|
Write-DebugLog "Waiting for job output to populate..."
|
||||||
Start-Sleep -Milliseconds 500
|
Start-Sleep -Milliseconds 500
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue