Merge branch 'devel' into mysql_anon_user

This commit is contained in:
Lee Hardy 2015-11-17 09:09:05 +00:00
commit ee0412c8b4
52 changed files with 963 additions and 393 deletions

View file

@ -1 +1 @@
2.0.0-0.3.beta1
2.0.0-0.5.beta3

View file

@ -66,7 +66,7 @@ options:
default: paravirtual
choices: ["paravirtual", "hvm"]
author: Lorin Hochstein
author: "Ansible Core Team (deprecated)"
'''
EXAMPLES = '''

View file

@ -225,7 +225,7 @@ options:
description:
- whether instance is using optimized EBS volumes, see U(http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSOptimized.html)
required: false
default: false
default: 'false'
exact_count:
version_added: "1.5"
description:
@ -481,6 +481,7 @@ EXAMPLES = '''
#
- ec2:
state: running
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1
@ -498,6 +499,7 @@ EXAMPLES = '''
#
- ec2:
state: running
key_name: mykey
instance_type: c1.medium
image: ami-40603AD1

View file

@ -81,7 +81,12 @@ options:
required: false
default: null
version_added: "2.0"
launch_permissions:
description:
- Users and groups that should be able to launch the ami. Expects dictionary with a key of user_ids and/or group_names. user_ids should be a list of account ids. group_name should be a list of groups, "all" is the only acceptable value currently.
required: false
default: null
version_added: "2.0"
author: "Evan Duffield (@scicoin-project) <eduffield@iacquire.com>"
extends_documentation_fragment:
- aws
@ -163,6 +168,25 @@ EXAMPLES = '''
delete_snapshot: False
state: absent
# Update AMI Launch Permissions, making it public
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
group_names: ['all']
# Allow AMI to be launched by another account
- ec2_ami:
aws_access_key: xxxxxxxxxxxxxxxxxxxxxxx
aws_secret_key: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
region: xxxxxx
image_id: "{{ instance.image_id }}"
state: present
launch_permissions:
user_ids: ['123456789012']
'''
import sys
@ -193,6 +217,7 @@ def create_image(module, ec2):
no_reboot = module.params.get('no_reboot')
device_mapping = module.params.get('device_mapping')
tags = module.params.get('tags')
launch_permissions = module.params.get('launch_permissions')
try:
params = {'instance_id': instance_id,
@ -253,6 +278,12 @@ def create_image(module, ec2):
ec2.create_tags(image_id, tags)
except boto.exception.EC2ResponseError, e:
module.fail_json(msg = "Image tagging failed => %s: %s" % (e.error_code, e.error_message))
if launch_permissions:
try:
img = ec2.get_image(image_id)
img.set_launch_permissions(**launch_permissions)
except boto.exception.BotoServerError, e:
module.fail_json(msg="%s: %s" % (e.error_code, e.error_message), image_id=image_id)
module.exit_json(msg="AMI creation operation complete", image_id=image_id, state=img.state, changed=True)
@ -293,6 +324,36 @@ def deregister_image(module, ec2):
sys.exit(0)
def update_image(module, ec2):
"""
Updates AMI
"""
image_id = module.params.get('image_id')
launch_permissions = module.params.get('launch_permissions')
if 'user_ids' in launch_permissions:
launch_permissions['user_ids'] = [str(user_id) for user_id in launch_permissions['user_ids']]
img = ec2.get_image(image_id)
if img == None:
module.fail_json(msg = "Image %s does not exist" % image_id, changed=False)
try:
set_permissions = img.get_launch_permissions()
if set_permissions != launch_permissions:
if ('user_ids' in launch_permissions and launch_permissions['user_ids']) or ('group_names' in launch_permissions and launch_permissions['group_names']):
res = img.set_launch_permissions(**launch_permissions)
elif ('user_ids' in set_permissions and set_permissions['user_ids']) or ('group_names' in set_permissions and set_permissions['group_names']):
res = img.remove_launch_permissions(**set_permissions)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
module.exit_json(msg="AMI launch permissions updated", launch_permissions=launch_permissions, set_perms=set_permissions, changed=True)
else:
module.exit_json(msg="AMI not updated", launch_permissions=set_permissions, changed=False)
except boto.exception.BotoServerError, e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
@ -306,7 +367,8 @@ def main():
no_reboot = dict(default=False, type="bool"),
state = dict(default='present'),
device_mapping = dict(type='list'),
tags = dict(type='dict')
tags = dict(type='dict'),
launch_permissions = dict(type='dict')
)
)
module = AnsibleModule(argument_spec=argument_spec)
@ -326,6 +388,10 @@ def main():
deregister_image(module, ec2)
elif module.params.get('state') == 'present':
if module.params.get('image_id') and module.params.get('launch_permissions'):
# Update image's launch permissions
update_image(module, ec2)
# Changed is always set to true when provisioning new AMI
if not module.params.get('instance_id'):
module.fail_json(msg='instance_id parameter is required for new image')

View file

@ -46,15 +46,15 @@ options:
required: true
min_size:
description:
- Minimum number of instances in group
- Minimum number of instances in group, if unspecified then the current group value will be used.
required: false
max_size:
description:
- Maximum number of instances in group
- Maximum number of instances in group, if unspecified then the current group value will be used.
required: false
desired_capacity:
description:
- Desired number of instances in group
- Desired number of instances in group, if unspecified then the current group value will be used.
required: false
replace_all_instances:
description:
@ -589,6 +589,13 @@ def replace(connection, module):
changed = False
return(changed, props)
#check if min_size/max_size/desired capacity have been specified and if not use ASG values
if min_size is None:
min_size = as_group.min_size
if max_size is None:
max_size = as_group.max_size
if desired_capacity is None:
desired_capacity = as_group.desired_capacity
# set temporary settings and wait for them to be reached
# This should get overriden if the number of instances left is less than the batch size.

View file

@ -61,7 +61,6 @@ options:
extends_documentation_fragment:
- aws
- ec2
author: "Lorin Hochstein (@lorin) <lorin@nimbisservices.com>"
author: "Rick Mendes (@rickmendes) <rmendes@illumina.com>"
notes:
- This module will return C(public_ip) on success, which will contain the

View file

@ -109,6 +109,11 @@ options:
required: false
aliases: []
version_added: "1.8"
idle_timeout:
description:
- ELB connections from clients and to servers are timed out after this amount of time
required: false
version_added: "2.0"
cross_az_load_balancing:
description:
- Distribute load across all configured Availability Zones
@ -243,13 +248,14 @@ EXAMPLES = """
load_balancer_port: 80
instance_port: 80
# Create an ELB with connection draining and cross availability
# Create an ELB with connection draining, increased idle timeout and cross availability
# zone load balancing
- local_action:
module: ec2_elb_lb
name: "New ELB"
state: present
connection_draining_timeout: 60
idle_timeout: 300
cross_az_load_balancing: "yes"
region: us-east-1
zones:
@ -316,6 +322,7 @@ class ElbManager(object):
zones=None, purge_zones=None, security_group_ids=None,
health_check=None, subnets=None, purge_subnets=None,
scheme="internet-facing", connection_draining_timeout=None,
idle_timeout=None,
cross_az_load_balancing=None, access_logs=None,
stickiness=None, region=None, **aws_connect_params):
@ -331,6 +338,7 @@ class ElbManager(object):
self.purge_subnets = purge_subnets
self.scheme = scheme
self.connection_draining_timeout = connection_draining_timeout
self.idle_timeout = idle_timeout
self.cross_az_load_balancing = cross_az_load_balancing
self.access_logs = access_logs
self.stickiness = stickiness
@ -359,6 +367,8 @@ class ElbManager(object):
# set them to avoid errors
if self._check_attribute_support('connection_draining'):
self._set_connection_draining_timeout()
if self._check_attribute_support('connecting_settings'):
self._set_idle_timeout()
if self._check_attribute_support('cross_zone_load_balancing'):
self._set_cross_az_load_balancing()
if self._check_attribute_support('access_log'):
@ -456,6 +466,9 @@ class ElbManager(object):
if self._check_attribute_support('connection_draining'):
info['connection_draining_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectionDraining').timeout
if self._check_attribute_support('connecting_settings'):
info['idle_timeout'] = self.elb_conn.get_lb_attribute(self.name, 'ConnectingSettings').idle_timeout
if self._check_attribute_support('cross_zone_load_balancing'):
is_cross_az_lb_enabled = self.elb_conn.get_lb_attribute(self.name, 'CrossZoneLoadBalancing')
if is_cross_az_lb_enabled:
@ -745,6 +758,12 @@ class ElbManager(object):
attributes.connection_draining.enabled = False
self.elb_conn.modify_lb_attribute(self.name, 'ConnectionDraining', attributes.connection_draining)
def _set_idle_timeout(self):
attributes = self.elb.get_attributes()
if self.idle_timeout is not None:
attributes.connecting_settings.idle_timeout = self.idle_timeout
self.elb_conn.modify_lb_attribute(self.name, 'ConnectingSettings', attributes.connecting_settings)
def _policy_name(self, policy_type):
return __file__.split('/')[-1].replace('_', '-') + '-' + policy_type
@ -869,6 +888,7 @@ def main():
purge_subnets={'default': False, 'required': False, 'type': 'bool'},
scheme={'default': 'internet-facing', 'required': False},
connection_draining_timeout={'default': None, 'required': False},
idle_timeout={'default': None, 'required': False},
cross_az_load_balancing={'default': None, 'required': False},
stickiness={'default': None, 'required': False, 'type': 'dict'},
access_logs={'default': None, 'required': False, 'type': 'dict'}
@ -901,6 +921,7 @@ def main():
purge_subnets = module.params['purge_subnets']
scheme = module.params['scheme']
connection_draining_timeout = module.params['connection_draining_timeout']
idle_timeout = module.params['idle_timeout']
cross_az_load_balancing = module.params['cross_az_load_balancing']
stickiness = module.params['stickiness']
@ -928,7 +949,8 @@ def main():
elb_man = ElbManager(module, name, listeners, purge_listeners, zones,
purge_zones, security_group_ids, health_check,
subnets, purge_subnets, scheme,
connection_draining_timeout, cross_az_load_balancing,
connection_draining_timeout, idle_timeout,
cross_az_load_balancing,
access_logs, stickiness,
region=region, **aws_connect_params)
@ -939,6 +961,9 @@ def main():
if connection_draining_timeout and not elb_man._check_attribute_support('connection_draining'):
module.fail_json(msg="You must install boto >= 2.28.0 to use the connection_draining_timeout attribute")
if idle_timeout and not elb_man._check_attribute_support('connecting_settings'):
module.fail_json(msg="You must install boto >= 2.33.0 to use the idle_timeout attribute")
if state == 'present':
elb_man.ensure_ok()
elif state == 'absent':

View file

@ -57,12 +57,13 @@ options:
- The port number on which each of the cache nodes will accept connections
required: false
default: none
parameter_group:
cache_parameter_group:
description:
- Specify non-default parameter group names to be associated with cache cluster
- The name of the cache parameter group to associate with this cache cluster. If this argument is omitted, the default cache parameter group for the specified engine will be used.
required: false
default: None
version_added: "2.0"
aliases: [ 'parameter_group' ]
cache_subnet_group:
description:
- The subnet group name to associate with. Only use if inside a vpc. Required if inside a vpc
@ -150,7 +151,7 @@ class ElastiCacheManager(object):
EXIST_STATUSES = ['available', 'creating', 'rebooting', 'modifying']
def __init__(self, module, name, engine, cache_engine_version, node_type,
num_nodes, cache_port, parameter_group, cache_subnet_group,
num_nodes, cache_port, cache_parameter_group, cache_subnet_group,
cache_security_groups, security_group_ids, zone, wait,
hard_modify, region, **aws_connect_kwargs):
self.module = module
@ -160,7 +161,7 @@ class ElastiCacheManager(object):
self.node_type = node_type
self.num_nodes = num_nodes
self.cache_port = cache_port
self.parameter_group = parameter_group
self.cache_parameter_group = cache_parameter_group
self.cache_subnet_group = cache_subnet_group
self.cache_security_groups = cache_security_groups
self.security_group_ids = security_group_ids
@ -219,7 +220,7 @@ class ElastiCacheManager(object):
engine_version=self.cache_engine_version,
cache_security_group_names=self.cache_security_groups,
security_group_ids=self.security_group_ids,
cache_parameter_group_name=self.parameter_group,
cache_parameter_group_name=self.cache_parameter_group,
cache_subnet_group_name=self.cache_subnet_group,
preferred_availability_zone=self.zone,
port=self.cache_port)
@ -295,7 +296,7 @@ class ElastiCacheManager(object):
num_cache_nodes=self.num_nodes,
cache_node_ids_to_remove=nodes_to_remove,
cache_security_group_names=self.cache_security_groups,
cache_parameter_group_name=self.parameter_group,
cache_parameter_group_name=self.cache_parameter_group,
security_group_ids=self.security_group_ids,
apply_immediately=True,
engine_version=self.cache_engine_version)
@ -486,7 +487,8 @@ def main():
cache_engine_version={'required': False},
node_type={'required': False, 'default': 'cache.m1.small'},
num_nodes={'required': False, 'default': None, 'type': 'int'},
parameter_group={'required': False, 'default': None},
# alias for compat with the original PR 1950
cache_parameter_group={'required': False, 'default': None, 'aliases': ['parameter_group']},
cache_port={'required': False, 'type': 'int'},
cache_subnet_group={'required': False, 'default': None},
cache_security_groups={'required': False, 'default': [default],
@ -521,7 +523,7 @@ def main():
zone = module.params['zone']
wait = module.params['wait']
hard_modify = module.params['hard_modify']
parameter_group = module.params['parameter_group']
cache_parameter_group = module.params['cache_parameter_group']
if cache_subnet_group and cache_security_groups == [default]:
cache_security_groups = []
@ -540,7 +542,7 @@ def main():
elasticache_manager = ElastiCacheManager(module, name, engine,
cache_engine_version, node_type,
num_nodes, cache_port,
parameter_group,
cache_parameter_group,
cache_subnet_group,
cache_security_groups,
security_group_ids, zone, wait,

View file

@ -565,7 +565,10 @@ def main():
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
if region:
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))

View file

@ -107,6 +107,7 @@ import sys
try:
import boto
import boto.iam
import boto.ec2
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
@ -246,7 +247,10 @@ def main():
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
if region:
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))

View file

@ -183,6 +183,14 @@ def role_action(module, iam, name, policy_name, skip, pdoc, state):
current_policies = [cp for cp in iam.list_role_policies(name).
list_role_policies_result.
policy_names]
except boto.exception.BotoServerError as e:
if e.error_code == "NoSuchEntity":
# Role doesn't exist so it's safe to assume the policy doesn't either
module.exit_json(changed=False)
else:
module.fail_json(e.message)
try:
for pol in current_policies:
if urllib.unquote(iam.get_role_policy(name, pol).
get_role_policy_result.policy_document) == pdoc:
@ -307,7 +315,10 @@ def main():
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
try:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
if region:
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
else:
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
except boto.exception.NoAuthHandlerFound, e:
module.fail_json(msg=str(e))

View file

@ -271,6 +271,33 @@ EXAMPLES = '''
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}"
'''

View file

@ -388,8 +388,10 @@ def main():
# tripping of things like * and @.
decoded_name = rset.name.replace(r'\052', '*')
decoded_name = decoded_name.replace(r'\100', '@')
#Need to save this changes in rset, because of comparing rset.to_xml() == wanted_rset.to_xml() in next block
rset.name = decoded_name
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == identifier_in:
if rset.type == type_in and decoded_name.lower() == record_in.lower() and rset.identifier == str(identifier_in):
found_record = True
record['zone'] = zone_in
record['type'] = rset.type

View file

@ -146,7 +146,6 @@ options:
requirements: [ "boto" ]
author:
- "Lester Wade (@lwade)"
- "Ralph Tice (@ralph-tice)"
extends_documentation_fragment: aws
'''

View file

@ -331,6 +331,8 @@ author:
- "Joshua Conner (@joshuaconner)"
- "Pavel Antonov (@softzilla)"
- "Ash Wilson (@smashwilson)"
- "Thomas Steinbach (@ThomasSteinbach)"
- "Philippe Jandot (@zfil)"
requirements:
- "python >= 2.6"
- "docker-py >= 0.3.0"
@ -1536,7 +1538,8 @@ def present(manager, containers, count, name):
delta = count - len(containers.deployed)
if delta > 0:
containers.notice_changed(manager.create_containers(delta))
created = manager.create_containers(delta)
containers.notice_changed(manager.get_inspect_containers(created))
if delta < 0:
# If both running and stopped containers exist, remove
@ -1551,8 +1554,8 @@ def present(manager, containers, count, name):
to_remove.append(c)
manager.stop_containers(to_stop)
containers.notice_changed(manager.get_inspect_containers(to_remove))
manager.remove_containers(to_remove)
containers.notice_changed(to_remove)
def started(manager, containers, count, name):
'''Ensure that exactly `count` matching containers exist and are running.'''
@ -1568,13 +1571,13 @@ def started(manager, containers, count, name):
created = manager.create_containers(delta)
manager.start_containers(created)
containers.notice_changed(created)
containers.notice_changed(manager.get_inspect_containers(created))
if delta < 0:
excess = containers.running[0:-delta]
containers.notice_changed(manager.get_inspect_containers(excess))
manager.stop_containers(excess)
manager.remove_containers(excess)
containers.notice_changed(excess)
def reloaded(manager, containers, count, name):
'''
@ -1608,7 +1611,7 @@ def stopped(manager, containers, count, name):
containers.refresh()
manager.stop_containers(containers.running)
containers.notice_changed(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.running))
def killed(manager, containers, count, name):
'''Kill any matching containers that are running.'''
@ -1616,7 +1619,7 @@ def killed(manager, containers, count, name):
containers.refresh()
manager.kill_containers(containers.running)
containers.notice_changed(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.running))
def absent(manager, containers, count, name):
'''Stop and remove any matching containers.'''
@ -1624,8 +1627,8 @@ def absent(manager, containers, count, name):
containers.refresh()
manager.stop_containers(containers.running)
containers.notice_changed(manager.get_inspect_containers(containers.deployed))
manager.remove_containers(containers.deployed)
containers.notice_changed(containers.deployed)
def main():
module = AnsibleModule(
@ -1738,9 +1741,8 @@ def main():
module.exit_json(changed=manager.has_changed(),
msg=manager.get_summary_message(),
summary=manager.counters,
containers=containers.changed,
reload_reasons=manager.get_reload_reason_message(),
ansible_facts=_ansible_facts(manager.get_inspect_containers(containers.changed)))
ansible_facts=_ansible_facts(containers.changed))
except DockerAPIError as e:
module.fail_json(changed=manager.has_changed(), msg="Docker API Error: %s" % e.explanation)

View file

@ -63,8 +63,43 @@ options:
description:
- URL of docker host to issue commands to
required: false
default: unix://var/run/docker.sock
default: ${DOCKER_HOST} or unix://var/run/docker.sock
aliases: []
use_tls:
description:
- Whether to use tls to connect to the docker server. "no" means not to
use tls (and ignore any other tls related parameters). "encrypt" means
to use tls to encrypt the connection to the server. "verify" means to
also verify that the server's certificate is valid for the server
(this both verifies the certificate against the CA and that the
certificate was issued for that host. If this is unspecified, tls will
only be used if one of the other tls options require it.
choices: [ "no", "encrypt", "verify" ]
version_added: "2.0"
tls_client_cert:
description:
- Path to the PEM-encoded certificate used to authenticate docker client.
If specified tls_client_key must be valid
default: ${DOCKER_CERT_PATH}/cert.pem
version_added: "2.0"
tls_client_key:
description:
- Path to the PEM-encoded key used to authenticate docker client. If
specified tls_client_cert must be valid
default: ${DOCKER_CERT_PATH}/key.pem
version_added: "2.0"
tls_ca_cert:
description:
- Path to a PEM-encoded certificate authority to secure the Docker connection.
This has no effect if use_tls is encrypt.
default: ${DOCKER_CERT_PATH}/ca.pem
version_added: "2.0"
tls_hostname:
description:
- A hostname to check matches what's supplied in the docker server's
certificate. If unspecified, the hostname is taken from the docker_url.
default: Taken from docker_url
version_added: "2.0"
docker_api_version:
description:
- Remote API version to use. This defaults to the current default as
@ -118,6 +153,7 @@ Remove image from local docker storage:
'''
import re
import os
from urlparse import urlparse
try:
@ -161,11 +197,90 @@ class DockerImageManager:
self.name = self.module.params.get('name')
self.tag = self.module.params.get('tag')
self.nocache = self.module.params.get('nocache')
docker_url = urlparse(module.params.get('docker_url'))
# Connect to the docker server using any configured host and TLS settings.
env_host = os.getenv('DOCKER_HOST')
env_docker_verify = os.getenv('DOCKER_TLS_VERIFY')
env_cert_path = os.getenv('DOCKER_CERT_PATH')
env_docker_hostname = os.getenv('DOCKER_TLS_HOSTNAME')
docker_url = module.params.get('docker_url')
if not docker_url:
if env_host:
docker_url = env_host
else:
docker_url = 'unix://var/run/docker.sock'
docker_api_version = module.params.get('docker_api_version')
tls_client_cert = module.params.get('tls_client_cert', None)
if not tls_client_cert and env_cert_path:
tls_client_cert = os.path.join(env_cert_path, 'cert.pem')
tls_client_key = module.params.get('tls_client_key', None)
if not tls_client_key and env_cert_path:
tls_client_key = os.path.join(env_cert_path, 'key.pem')
tls_ca_cert = module.params.get('tls_ca_cert')
if not tls_ca_cert and env_cert_path:
tls_ca_cert = os.path.join(env_cert_path, 'ca.pem')
tls_hostname = module.params.get('tls_hostname')
if tls_hostname is None:
if env_docker_hostname:
tls_hostname = env_docker_hostname
else:
parsed_url = urlparse(docker_url)
if ':' in parsed_url.netloc:
tls_hostname = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
else:
tls_hostname = parsed_url
if not tls_hostname:
tls_hostname = True
# use_tls can be one of four values:
# no: Do not use tls
# encrypt: Use tls. We may do client auth. We will not verify the server
# verify: Use tls. We may do client auth. We will verify the server
# None: Only use tls if the parameters for client auth were specified
# or tls_ca_cert (which requests verifying the server with
# a specific ca certificate)
use_tls = module.params.get('use_tls')
if use_tls is None and env_docker_verify is not None:
use_tls = 'verify'
tls_config = None
if use_tls != 'no':
params = {}
# Setup client auth
if tls_client_cert and tls_client_key:
params['client_cert'] = (tls_client_cert, tls_client_key)
# We're allowed to verify the connection to the server
if use_tls == 'verify' or (use_tls is None and tls_ca_cert):
if tls_ca_cert:
params['ca_cert'] = tls_ca_cert
params['verify'] = True
params['assert_hostname'] = tls_hostname
else:
params['verify'] = True
params['assert_hostname'] = tls_hostname
elif use_tls == 'encrypt':
params['verify'] = False
if params:
# See https://github.com/docker/docker-py/blob/d39da11/docker/utils/utils.py#L279-L296
docker_url = docker_url.replace('tcp://', 'https://')
tls_config = docker.tls.TLSConfig(**params)
self.client = docker.Client(
base_url=docker_url.geturl(),
base_url=docker_url,
version=module.params.get('docker_api_version'),
timeout=module.params.get('timeout'))
timeout=module.params.get('timeout'),
tls=tls_config)
self.changed = False
self.log = []
self.error_msg = None
@ -244,7 +359,12 @@ def main():
tag = dict(required=False, default="latest"),
nocache = dict(default=False, type='bool'),
state = dict(default='present', choices=['absent', 'present', 'build']),
docker_url = dict(default='unix://var/run/docker.sock'),
use_tls = dict(default=None, choices=['no', 'encrypt', 'verify']),
tls_client_cert = dict(required=False, default=None, type='str'),
tls_client_key = dict(required=False, default=None, type='str'),
tls_ca_cert = dict(required=False, default=None, type='str'),
tls_hostname = dict(required=False, type='str', default=None),
docker_url = dict(),
docker_api_version = dict(required=False,
default=DEFAULT_DOCKER_API_VERSION,
type='str'),
@ -286,6 +406,45 @@ def main():
module.exit_json(failed=failed, changed=manager.has_changed(), msg=msg, image_id=image_id)
except SSLError as e:
if get_platform() == "Darwin":
# Ensure that the environment variables has been set
if "DOCKER_HOST" not in os.environ:
environment_error = '''
It looks like you have not set your docker environment
variables. Please ensure that you have set the requested
variables as instructed when running boot2docker up. If
they are set in .bash_profile you will need to symlink
it to .bashrc.
'''
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error)
# If the above is true it's likely the hostname does not match
else:
environment_error = '''
You may need to ignore hostname missmatches by setting
tls_hostname=boot2docker in your role. If this does not
resolve the issue please open an issue at
ansible/ansible-modules-core and ping michaeljs1990
'''
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e) + environment_error)
# General error for non darwin users
else:
module.exit_json(failed=True, changed=manager.has_changed(), msg="SSLError: " + str(e))
except ConnectionError as e:
if get_platform() == "Darwin" and "DOCKER_HOST" not in os.environ:
# Ensure that the environment variables has been set
environment_error = '''
It looks like you have not set your docker environment
variables. Please ensure that you have set the requested
variables as instructed when running boot2docker up. If
they are set in .bash_profile you will need to symlink
it to .bashrc.
'''
module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e) + environment_error)
module.exit_json(failed=True, changed=manager.has_changed(), msg="ConnectionError: " + str(e))
except DockerAPIError as e:
module.exit_json(failed=True, changed=manager.has_changed(), msg="Docker API error: " + e.explanation)

View file

@ -90,7 +90,7 @@ options:
requirements:
- "python >= 2.6"
- python-keystoneclient
author: "Lorin Hochstein (@lorin)"
author: "Ansible Core Team (deprecated)"
'''
EXAMPLES = '''

View file

@ -122,10 +122,10 @@ def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
state=dict(default='present', choices=['absent', 'present']),
network=dict(required=False),
floating_ip_address=dict(required=False),
network=dict(required=False, default=None),
floating_ip_address=dict(required=False, default=None),
reuse=dict(required=False, type='bool', default=False),
fixed_address=dict(required=False),
fixed_address=dict(required=False, default=None),
wait=dict(required=False, type='bool', default=False),
timeout=dict(required=False, type='int', default=60),
)
@ -154,23 +154,12 @@ def main():
msg="server {0} not found".format(server_name_or_id))
if state == 'present':
if floating_ip_address is None:
if reuse:
f_ip = cloud.available_floating_ip(network=network)
else:
f_ip = cloud.create_floating_ip(network=network)
else:
f_ip = _get_floating_ip(cloud, floating_ip_address)
if f_ip is None:
module.fail_json(
msg="floating IP {0} not found".format(
floating_ip_address))
cloud.attach_ip_to_server(
server_id=server['id'], floating_ip_id=f_ip['id'],
cloud.add_ips_to_server(
server=server, ips=floating_ip_address, reuse=reuse,
fixed_address=fixed_address, wait=wait, timeout=timeout)
fip_address = cloud.get_server_public_ip(server)
# Update the floating IP status
f_ip = cloud.get_floating_ip(id=f_ip['id'])
f_ip = _get_floating_ip(cloud, fip_address)
module.exit_json(changed=True, floating_ip=f_ip)
elif state == 'absent':

View file

@ -146,10 +146,14 @@ def main():
" as offered. Delete key first." % name
)
else:
module.exit_json(changed=False, key=keypair)
changed = False
else:
keypair = cloud.create_keypair(name, public_key)
changed = True
new_key = cloud.create_keypair(name, public_key)
module.exit_json(changed=True, key=new_key)
module.exit_json(changed=changed,
key=keypair,
id=keypair['id'])
elif state == 'absent':
if keypair:

View file

@ -146,7 +146,10 @@ def main():
if state == 'present':
if not net:
net = cloud.create_network(name, shared, admin_state_up, external)
module.exit_json(changed=False, network=net, id=net['id'])
changed = True
else:
changed = False
module.exit_json(changed=changed, network=net, id=net['id'])
elif state == 'absent':
if not net:

View file

@ -217,8 +217,13 @@ def main():
rxtx_factor=module.params['rxtx_factor'],
is_public=module.params['is_public']
)
module.exit_json(changed=True, flavor=flavor)
module.exit_json(changed=False, flavor=flavor)
changed=True
else:
changed=False
module.exit_json(changed=changed,
flavor=flavor,
id=flavor['id'])
elif state == 'absent':
if flavor:

View file

@ -61,8 +61,7 @@ options:
security_groups:
description:
- Security group(s) ID(s) or name(s) associated with the port (comma
separated for multiple security groups - no spaces between comma(s)
or YAML list).
separated string or YAML list)
required: false
default: None
no_security_groups:
@ -220,7 +219,7 @@ def _needs_update(module, port, cloud):
'device_id']
compare_dict = ['allowed_address_pairs',
'extra_dhcp_opt']
compare_comma_separated_list = ['security_groups']
compare_list = ['security_groups']
for key in compare_simple:
if module.params[key] is not None and module.params[key] != port[key]:
@ -229,7 +228,7 @@ def _needs_update(module, port, cloud):
if module.params[key] is not None and cmp(module.params[key],
port[key]) != 0:
return True
for key in compare_comma_separated_list:
for key in compare_list:
if module.params[key] is not None and (set(module.params[key]) !=
set(port[key])):
return True
@ -309,7 +308,7 @@ def main():
fixed_ips=dict(default=None),
admin_state_up=dict(default=None),
mac_address=dict(default=None),
security_groups=dict(default=None),
security_groups=dict(default=None, type='list'),
no_security_groups=dict(default=False, type='bool'),
allowed_address_pairs=dict(default=None),
extra_dhcp_opt=dict(default=None),
@ -336,13 +335,11 @@ def main():
try:
cloud = shade.openstack_cloud(**module.params)
if module.params['security_groups']:
if type(module.params['security_groups']) == str:
module.params['security_groups'] = module.params[
'security_groups'].split(',')
# translate security_groups to UUID's if names where provided
module.params['security_groups'] = map(
lambda v: get_security_group_id(module, cloud, v),
module.params['security_groups'])
module.params['security_groups'] = [
get_security_group_id(module, cloud, v)
for v in module.params['security_groups']
]
port = None
network_id = None

View file

@ -164,10 +164,13 @@ def _needs_update(cloud, module, router, network, internal_subnet_ids):
"""
if router['admin_state_up'] != module.params['admin_state_up']:
return True
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if router['external_gateway_info']:
if router['external_gateway_info'].get('enable_snat', True) != module.params['enable_snat']:
return True
if network:
if router['external_gateway_info']['network_id'] != network['id']:
if not router['external_gateway_info']:
return True
elif router['external_gateway_info']['network_id'] != network['id']:
return True
# check external interfaces
@ -332,7 +335,9 @@ def main():
changed = True
module.exit_json(changed=changed, router=router)
module.exit_json(changed=changed,
router=router,
id=router['id'])
elif state == 'absent':
if not router:

View file

@ -91,7 +91,7 @@ def _system_state_change(module, secgroup):
def main():
argument_spec = openstack_full_argument_spec(
name=dict(required=True),
description=dict(default=None),
description=dict(default=''),
state=dict(default='present', choices=['absent', 'present']),
)

View file

@ -76,7 +76,8 @@ options:
default: None
security_groups:
description:
- The name of the security group to which the instance should be added
- Names of the security groups to which the instance should be
added. This may be a YAML list or a common separated string.
required: false
default: None
nics:
@ -366,7 +367,7 @@ def _create_server(module, cloud):
flavor=flavor_dict['id'],
nics=nics,
meta=module.params['meta'],
security_groups=module.params['security_groups'].split(','),
security_groups=module.params['security_groups'],
userdata=module.params['userdata'],
config_drive=module.params['config_drive'],
)
@ -459,7 +460,7 @@ def main():
flavor_ram = dict(default=None, type='int'),
flavor_include = dict(default=None),
key_name = dict(default=None),
security_groups = dict(default='default'),
security_groups = dict(default=['default'], type='list'),
nics = dict(default=[], type='list'),
meta = dict(default=None),
userdata = dict(default=None),

View file

@ -15,6 +15,8 @@
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import fnmatch
try:
import shade
from shade import meta
@ -25,36 +27,47 @@ except ImportError:
DOCUMENTATION = '''
---
module: os_server_facts
short_description: Retrieve facts about a compute instance
short_description: Retrieve facts about one or more compute instances
version_added: "2.0"
author: "Monty Taylor (@emonty)"
description:
- Retrieve facts about a server instance from OpenStack.
- Retrieve facts about server instances from OpenStack.
notes:
- Facts are placed in the C(openstack) variable.
- This module creates a new top-level C(openstack_servers) fact, which
contains a list of servers.
requirements:
- "python >= 2.6"
- "shade"
options:
server:
description:
- Name or ID of the instance
required: true
- restrict results to servers with names matching
this glob expression (e.g., C<web*>).
required: false
default: None
detailed:
description:
- when true, return additional detail about servers at the expense
of additional API calls.
required: false
default: false
extends_documentation_fragment: openstack
'''
EXAMPLES = '''
# Gather facts about a previously created server named vm1
# Gather facts about all servers named C<web*>:
- os_server_facts:
cloud: rax-dfw
server: vm1
- debug: var=openstack
server: web*
- debug:
var: openstack_servers
'''
def main():
argument_spec = openstack_full_argument_spec(
server=dict(required=True),
server=dict(required=False),
detailed=dict(required=False, type='bool'),
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(argument_spec, **module_kwargs)
@ -64,10 +77,16 @@ def main():
try:
cloud = shade.openstack_cloud(**module.params)
server = cloud.get_server(module.params['server'])
hostvars = dict(openstack=meta.get_hostvars_from_server(
cloud, server))
module.exit_json(changed=False, ansible_facts=hostvars)
openstack_servers = cloud.list_servers(
detailed=module.params['detailed'])
if module.params['server']:
# filter servers by name
pattern = module.params['server']
openstack_servers = [server for server in openstack_servers
if fnmatch.fnmatch(server['name'], pattern)]
module.exit_json(changed=False, ansible_facts=dict(
openstack_servers=openstack_servers))
except shade.OpenStackCloudException as e:
module.fail_json(msg=e.message)
@ -77,4 +96,3 @@ from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()

View file

@ -302,7 +302,9 @@ def main():
changed = True
else:
changed = False
module.exit_json(changed=changed)
module.exit_json(changed=changed,
subnet=subnet,
id=subnet['id'])
elif state == 'absent':
if not subnet:

View file

@ -743,6 +743,9 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
# set the new RAM size
spec.set_element_memoryMB(int(vm_hardware['memory_mb']))
changes['memory'] = vm_hardware['memory_mb']
# ===( Reconfigure Network )====#
if vm_nic:
changed = reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name)
# ====( Config Memory )====#
if 'num_cpus' in vm_hardware:
@ -814,6 +817,104 @@ def reconfigure_vm(vsphere_client, vm, module, esxi, resource_pool, cluster_name
module.exit_json(changed=False)
def reconfigure_net(vsphere_client, vm, module, esxi, resource_pool, guest, vm_nic, cluster_name=None):
s = vsphere_client
nics = {}
request = VI.ReconfigVM_TaskRequestMsg()
_this = request.new__this(vm._mor)
_this.set_attribute_type(vm._mor.get_attribute_type())
request.set_element__this(_this)
nic_changes = []
datacenter = esxi['datacenter']
# Datacenter managed object reference
dclist = [k for k,
v in vsphere_client.get_datacenters().items() if v == datacenter]
if dclist:
dcmor=dclist[0]
else:
vsphere_client.disconnect()
module.fail_json(msg="Cannot find datacenter named: %s" % datacenter)
dcprops = VIProperty(vsphere_client, dcmor)
nfmor = dcprops.networkFolder._obj
for k,v in vm_nic.iteritems():
nicNum = k[len(k) -1]
if vm_nic[k]['network_type'] == 'dvs':
portgroupKey = find_portgroup_key(module, s, nfmor, vm_nic[k]['network'])
todvs = True
elif vm_nic[k]['network_type'] == 'standard':
todvs = False
# Detect cards that need to be changed and network type (and act accordingly)
for dev in vm.properties.config.hardware.device:
if dev._type in ["VirtualE1000", "VirtualE1000e",
"VirtualPCNet32", "VirtualVmxnet",
"VirtualNmxnet2", "VirtualVmxnet3"]:
devNum = dev.deviceInfo.label[len(dev.deviceInfo.label) - 1]
if devNum == nicNum:
fromdvs = dev.deviceInfo.summary.split(':')[0] == 'DVSwitch'
if todvs and fromdvs:
if dev.backing.port._obj.get_element_portgroupKey() != portgroupKey:
nics[k] = (dev, portgroupKey, 1)
elif fromdvs and not todvs:
nics[k] = (dev, '', 2)
elif not fromdvs and todvs:
nics[k] = (dev, portgroupKey, 3)
elif not fromdvs and not todvs:
if dev.backing._obj.get_element_deviceName() != vm_nic[k]['network']:
nics[k] = (dev, '', 2)
else:
pass
else:
module.exit_json()
if len(nics) > 0:
for nic, obj in nics.iteritems():
"""
1,2 and 3 are used to mark which action should be taken
1 = from a distributed switch to a distributed switch
2 = to a standard switch
3 = to a distributed switch
"""
dev = obj[0]
pgKey = obj[1]
dvsKey = obj[2]
if dvsKey == 1:
dev.backing.port._obj.set_element_portgroupKey(pgKey)
dev.backing.port._obj.set_element_portKey('')
if dvsKey == 3:
dvswitch_uuid = find_dvswitch_uuid(module, s, nfmor, pgKey)
nic_backing_port = VI.ns0.DistributedVirtualSwitchPortConnection_Def(
"nic_backing_port").pyclass()
nic_backing_port.set_element_switchUuid(dvswitch_uuid)
nic_backing_port.set_element_portgroupKey(pgKey)
nic_backing_port.set_element_portKey('')
nic_backing = VI.ns0.VirtualEthernetCardDistributedVirtualPortBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_port(nic_backing_port)
dev._obj.set_element_backing(nic_backing)
if dvsKey == 2:
nic_backing = VI.ns0.VirtualEthernetCardNetworkBackingInfo_Def(
"nic_backing").pyclass()
nic_backing.set_element_deviceName(vm_nic[nic]['network'])
dev._obj.set_element_backing(nic_backing)
for nic, obj in nics.iteritems():
dev = obj[0]
spec = request.new_spec()
nic_change = spec.new_deviceChange()
nic_change.set_element_device(dev._obj)
nic_change.set_element_operation("edit")
nic_changes.append(nic_change)
spec.set_element_deviceChange(nic_changes)
request.set_element_spec(spec)
ret = vsphere_client._proxy.ReconfigVM_Task(request)._returnval
task = VITask(ret, vsphere_client)
status = task.wait_for_state([task.STATE_SUCCESS, task.STATE_ERROR])
if status == task.STATE_SUCCESS:
return(True)
elif status == task.STATE_ERROR:
module.fail_json(msg="Could not change network %s" % task.get_error_message())
elif len(nics) == 0:
return(False)
def create_vm(vsphere_client, module, esxi, resource_pool, cluster_name, guest, vm_extra_config, vm_hardware, vm_disk, vm_nic, vm_hw_version, state):
datacenter = esxi['datacenter']

View file

@ -68,7 +68,7 @@ options:
choices: [ "present", "absent", "dump", "import" ]
collation:
description:
- Collation mode
- Collation mode (sorting). This only applies to new table/databases and does not update existing ones, this is a limitation of MySQL.
required: false
default: null
encoding:
@ -79,7 +79,7 @@ options:
target:
description:
- Location, on the remote host, of the dump file to read from or write to. Uncompressed SQL
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz compressed files are supported.
files (C(.sql)) as well as bzip2 (C(.bz2)), gzip (C(.gz)) and xz (Added in 2.0) compressed files are supported.
required: false
notes:
- Requires the MySQLdb Python package on the remote host. For Ubuntu, this

View file

@ -39,7 +39,7 @@ options:
default: no
password:
description:
- set the user's password
- set the user's password. (Required when adding a user)
required: false
default: null
host:

View file

@ -244,7 +244,8 @@ def main():
db_connection = MySQLdb.connect(host=module.params["login_host"], port=module.params["login_port"], user=login_user, passwd=login_password, db="mysql")
cursor = db_connection.cursor()
except Exception, e:
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials")
errno, errstr = e.args
module.fail_json(msg="unable to connect to database, check login_user and login_password are correct or ~/.my.cnf has the credentials (ERROR: %s %s)" % (errno, errstr))
mysqlvar_val = getvariable(cursor, mysqlvar)
if mysqlvar_val is None:
module.fail_json(msg="Variable not available \"%s\"" % mysqlvar, changed=False)

View file

@ -95,7 +95,7 @@ notes:
- This module uses I(psycopg2), a Python PostgreSQL database adapter. You must ensure that psycopg2 is installed on
the host before using this module. If the remote host is the PostgreSQL server (which is the default case), then PostgreSQL must also be installed on the remote host. For Ubuntu-based systems, install the C(postgresql), C(libpq-dev), and C(python-psycopg2) packages on the remote host before using this module.
requirements: [ psycopg2 ]
author: "Lorin Hochstein (@lorin)"
author: "Ansible Core Team"
'''
EXAMPLES = '''

View file

@ -137,7 +137,7 @@ notes:
to all users. You may not specify password or role_attr_flags when the
PUBLIC user is specified.
requirements: [ psycopg2 ]
author: "Lorin Hochstein (@lorin)"
author: "Ansible Core Team"
'''
EXAMPLES = '''

View file

@ -19,7 +19,7 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import os
import time
import tempfile
DOCUMENTATION = '''
---
@ -214,7 +214,8 @@ def main():
backup = dict(default=False, type='bool'),
force = dict(default=True, aliases=['thirsty'], type='bool'),
validate = dict(required=False, type='str'),
directory_mode = dict(required=False)
directory_mode = dict(required=False),
remote_src = dict(required=False, type='bool'),
),
add_file_common_args=True,
supports_check_mode=True,
@ -228,6 +229,7 @@ def main():
validate = module.params.get('validate',None)
follow = module.params['follow']
mode = module.params['mode']
remote_src = module.params['remote_src']
if not os.path.exists(src):
module.fail_json(msg="Source %s failed to transfer" % (src))
@ -307,7 +309,12 @@ def main():
(rc,out,err) = module.run_command(validate % src)
if rc != 0:
module.fail_json(msg="failed to validate: rc:%s error:%s" % (rc,err))
module.atomic_move(src, dest)
if remote_src:
tmpdest = tempfile.mkstemp(dir=os.basedir(dest))
shutil.copy2(src, tmpdest)
module.atomic_move(tmpdest, dest)
else:
module.atomic_move(src, dest)
except IOError:
module.fail_json(msg="failed to copy: %s to %s" % (src, dest))
changed = True

View file

@ -25,8 +25,6 @@ import stat
import fnmatch
import time
import re
import shutil
DOCUMENTATION = '''
---
@ -50,17 +48,18 @@ options:
required: false
default: '*'
description:
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
those whose basenames match at least one of the patterns specified. Multiple patterns can be
specified using a list.
- One or more (shell or regex) patterns, which type is controled by C(use_regex) option.
- The patterns restrict the list of files to be returned to those whose basenames match at
least one of the patterns specified. Multiple patterns can be specified using a list.
aliases: ['pattern']
contains:
required: false
default: null
description:
- One or more re patterns which should be matched against the file content
- One or more re patterns which should be matched against the file content
paths:
required: true
aliases: [ "name" ]
aliases: [ "name", "path" ]
description:
- List of paths to the file or directory to search. All paths must be fully qualified.
file_type:
@ -108,6 +107,12 @@ options:
choices: [ True, False ]
description:
- Set this to true to retrieve a file's sha1 checksum
use_regex:
required: false
default: "False"
choices: [ True, False ]
description:
- If false the patterns are file globs (shell) if true they are python regexes
'''
@ -121,8 +126,11 @@ EXAMPLES = '''
# Recursively find /var/tmp files with last access time greater than 3600 seconds
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
- find: paths="/var/tmp" patterns="'*.old','*.log.gz'" size="10m"
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True
'''
RETURN = '''
@ -152,13 +160,23 @@ examined:
sample: 34
'''
def pfilter(f, patterns=None):
def pfilter(f, patterns=None, use_regex=False):
'''filter using glob patterns'''
if patterns is None:
return True
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
if use_regex:
for p in patterns:
r = re.compile(p)
if r.match(f):
return True
else:
for p in patterns:
if fnmatch.fnmatch(f, p):
return True
return False
@ -236,8 +254,8 @@ def statinfo(st):
def main():
module = AnsibleModule(
argument_spec = dict(
paths = dict(required=True, aliases=['name'], type='list'),
patterns = dict(default=['*'], type='list'),
paths = dict(required=True, aliases=['name','path'], type='list'),
patterns = dict(default=['*'], type='list', aliases=['pattern']),
contains = dict(default=None, type='str'),
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
age = dict(default=None, type='str'),
@ -247,7 +265,9 @@ def main():
hidden = dict(default="False", type='bool'),
follow = dict(default="False", type='bool'),
get_checksum = dict(default="False", type='bool'),
use_regex = dict(default="False", type='bool'),
),
supports_check_mode=True,
)
params = module.params
@ -292,16 +312,21 @@ def main():
if os.path.basename(fsname).startswith('.') and not params['hidden']:
continue
st = os.stat(fsname)
try:
st = os.stat(fsname)
except:
msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
continue
r = {'path': fsname}
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
r.update(statinfo(st))
filelist.append(r)
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
if pfilter(fsobj, params['patterns']) and \
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
agefilter(st, now, age, params['age_stamp']) and \
sizefilter(st, size) and \
contentfilter(fsname, params['contains']):
@ -314,7 +339,7 @@ def main():
if not params['recurse']:
break
else:
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
matched = len(filelist)
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)

View file

@ -2,6 +2,7 @@
# -*- coding: utf-8 -*-
# (c) 2012, Jan-Piet Mens <jpmens () gmail.com>
# (c) 2015, Ales Nosek <anosek.nosek () gmail.com>
#
# This file is part of Ansible
#
@ -28,8 +29,7 @@ description:
- Manage (add, remove, change) individual settings in an INI-style file without having
to manage the file as a whole with, say, M(template) or M(assemble). Adds missing
sections if they don't exist.
- Comments are discarded when the source file is read, and therefore will not
show up in the destination file.
- Before version 2.0, comments are discarded when the source file is read, and therefore will not show up in the destination file.
version_added: "0.9"
options:
dest:
@ -65,6 +65,12 @@ options:
description:
- all arguments accepted by the M(file) module also work here
required: false
state:
description:
- If set to C(absent) the option or section will be removed if present instead of created.
required: false
default: "present"
choices: [ "present", "absent" ]
notes:
- While it is possible to add an I(option) without specifying a I(value), this makes
no sense.
@ -73,7 +79,9 @@ notes:
Either use M(template) to create a base INI file with a C([default]) section, or use
M(lineinfile) to add the missing line.
requirements: [ ConfigParser ]
author: "Jan-Piet Mens (@jpmens)"
author:
- "Jan-Piet Mens (@jpmens)"
- "Ales Nosek (@noseka1)"
'''
EXAMPLES = '''
@ -95,86 +103,83 @@ import sys
def do_ini(module, filename, section=None, option=None, value=None, state='present', backup=False):
changed = False
if (sys.version_info[0] == 2 and sys.version_info[1] >= 7) or sys.version_info[0] >= 3:
cp = ConfigParser.ConfigParser(allow_no_value=True)
else:
cp = ConfigParser.ConfigParser()
cp.optionxform = identity
ini_file = open(filename, 'r')
try:
f = open(filename)
cp.readfp(f)
except IOError:
pass
ini_lines = ini_file.readlines()
# append a fake section line to simplify the logic
ini_lines.append('[')
finally:
ini_file.close()
within_section = not section
section_start = 0
changed = False
if state == 'absent':
if option is None and value is None:
if cp.has_section(section):
cp.remove_section(section)
changed = True
for index, line in enumerate(ini_lines):
if line.startswith('[%s]' % section):
within_section = True
section_start = index
elif line.startswith('['):
if within_section:
if state == 'present':
# insert missing option line at the end of the section
ini_lines.insert(index, '%s = %s\n' % (option, value))
changed = True
elif state == 'absent' and not option:
# remove the entire section
del ini_lines[section_start:index]
changed = True
break
else:
if option is not None:
try:
if cp.get(section, option):
cp.remove_option(section, option)
if within_section and option:
if state == 'present':
# change the existing option line
if re.match('%s *=' % option, line) \
or re.match('# *%s *=' % option, line) \
or re.match('; *%s *=' % option, line):
newline = '%s = %s\n' % (option, value)
changed = ini_lines[index] != newline
ini_lines[index] = newline
if changed:
# remove all possible option occurences from the rest of the section
index = index + 1
while index < len(ini_lines):
line = ini_lines[index]
if line.startswith('['):
break
if re.match('%s *=' % option, line):
del ini_lines[index]
else:
index = index + 1
break
else:
# comment out the existing option line
if re.match('%s *=' % option, line):
ini_lines[index] = '#%s' % ini_lines[index]
changed = True
except ConfigParser.InterpolationError:
cp.remove_option(section, option)
changed = True
except:
pass
break
if state == 'present':
# remove the fake section line
del ini_lines[-1:]
# DEFAULT section is always there by DEFAULT, so never try to add it.
if not cp.has_section(section) and section.upper() != 'DEFAULT':
if not within_section and option and state == 'present':
ini_lines.append('[%s]\n' % section)
ini_lines.append('%s = %s\n' % (option, value))
changed = True
cp.add_section(section)
changed = True
if option is not None and value is not None:
try:
oldvalue = cp.get(section, option)
if str(value) != str(oldvalue):
cp.set(section, option, value)
changed = True
except ConfigParser.NoSectionError:
cp.set(section, option, value)
changed = True
except ConfigParser.NoOptionError:
cp.set(section, option, value)
changed = True
except ConfigParser.InterpolationError:
cp.set(section, option, value)
changed = True
if changed and not module.check_mode:
if backup:
module.backup_local(filename)
ini_file = open(filename, 'w')
try:
f = open(filename, 'w')
cp.write(f)
except:
module.fail_json(msg="Can't create %s" % filename)
ini_file.writelines(ini_lines)
finally:
ini_file.close()
return changed
# ==============================================================
# identity
def identity(arg):
"""
This function simply returns its argument. It serves as a
replacement for ConfigParser.optionxform, which by default
changes arguments to lower case. The identity function is a
better choice than str() or unicode(), because it is
encoding-agnostic.
"""
return arg
# ==============================================================
# main
@ -212,4 +217,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

View file

@ -42,11 +42,19 @@ options:
aliases: []
get_checksum:
description:
- Whether to return a checksum of the file (currently sha1)
- Whether to return a checksum of the file (default sha1)
required: false
default: yes
aliases: []
version_added: "1.8"
checksum_algorithm:
description:
- Algorithm to determine checksum of file. Will throw an error if the host is unable to use specified algorithm.
required: false
choices: [ 'sha1', 'sha224', 'sha256', 'sha384', 'sha512' ]
default: sha1
aliases: [ 'checksum_algo' ]
version_added: "2.0"
author: "Bruce Pennypacker (@bpennypacker)"
'''
@ -84,6 +92,9 @@ EXAMPLES = '''
# Don't do md5 checksum
- stat: path=/path/to/myhugefile get_md5=no
# Use sha256 to calculate checksum
- stat: path=/path/to/something checksum_algorithm=sha256
'''
RETURN = '''
@ -254,7 +265,7 @@ stat:
sample: f88fa92d8cf2eeecf4c0a50ccc96d0c0
checksum:
description: hash of the path
returned: success, path exists and user can read stats and path supports hashing
returned: success, path exists, user can read stats, path supports hashing and supplied checksum algorithm is available
type: string
sample: 50ba294cdf28c0d5bcde25708df53346825a429f
pw_name:
@ -281,7 +292,8 @@ def main():
path = dict(required=True),
follow = dict(default='no', type='bool'),
get_md5 = dict(default='yes', type='bool'),
get_checksum = dict(default='yes', type='bool')
get_checksum = dict(default='yes', type='bool'),
checksum_algorithm = dict(default='sha1', type='str', choices=['sha1', 'sha224', 'sha256', 'sha384', 'sha512'], aliases=['checksum_algo'])
),
supports_check_mode = True
)
@ -291,6 +303,7 @@ def main():
follow = module.params.get('follow')
get_md5 = module.params.get('get_md5')
get_checksum = module.params.get('get_checksum')
checksum_algorithm = module.params.get('checksum_algorithm')
try:
if follow:
@ -351,8 +364,7 @@ def main():
d['md5'] = None
if S_ISREG(mode) and get_checksum and os.access(path,os.R_OK):
d['checksum'] = module.sha1(path)
d['checksum'] = module.digest_from_file(path, checksum_algorithm)
try:
pw = pwd.getpwuid(st.st_uid)

View file

@ -25,6 +25,8 @@ import shutil
import tempfile
import base64
import datetime
from distutils.version import LooseVersion
try:
import json
except ImportError:
@ -143,7 +145,8 @@ options:
version_added: '1.9.2'
# informational: requirements for nodes
requirements: [ urlparse, httplib2 ]
requirements:
- httplib2 >= 0.7.0
author: "Romeo Theriault (@romeotheriault)"
'''
@ -198,11 +201,15 @@ EXAMPLES = '''
'''
HAS_HTTPLIB2 = True
HAS_HTTPLIB2 = False
try:
import httplib2
except ImportError:
HAS_HTTPLIB2 = False
if LooseVersion(httplib2.__version__) >= LooseVersion('0.7'):
HAS_HTTPLIB2 = True
except ImportError, AttributeError:
# AttributeError if __version__ is not present
pass
HAS_URLPARSE = True
@ -382,7 +389,7 @@ def main():
)
if not HAS_HTTPLIB2:
module.fail_json(msg="httplib2 is not installed")
module.fail_json(msg="httplib2 >= 0.7 is not installed")
if not HAS_URLPARSE:
module.fail_json(msg="urlparse is not installed")

29
packaging/language/pip.py Normal file → Executable file
View file

@ -90,6 +90,12 @@ options:
required: false
default: null
version_added: "1.0"
editable:
description:
- Pass the editable flag for versioning URLs.
required: false
default: yes
version_added: "2.0"
chdir:
description:
- cd into this directory before running the command
@ -121,6 +127,9 @@ EXAMPLES = '''
# Install (MyApp) using one of the remote protocols (bzr+,hg+,git+,svn+). You do not have to supply '-e' option in extra_args.
- pip: name='svn+http://myrepo/svn/MyApp#egg=MyApp'
# Install MyApp using one of the remote protocols (bzr+,hg+,git+) in a non editable way.
- pip: name='git+http://myrepo/app/MyApp' editable=false
# Install (MyApp) from local tarball
- pip: name='file:///path/to/MyApp.tar.gz'
@ -239,6 +248,7 @@ def main():
virtualenv_python=dict(default=None, required=False, type='str'),
use_mirrors=dict(default='yes', type='bool'),
extra_args=dict(default=None, required=False),
editable=dict(default='yes', type='bool', required=False),
chdir=dict(default=None, required=False, type='path'),
executable=dict(default=None, required=False),
),
@ -312,15 +322,16 @@ def main():
# Automatically apply -e option to extra_args when source is a VCS url. VCS
# includes those beginning with svn+, git+, hg+ or bzr+
if name:
if name.startswith('svn+') or name.startswith('git+') or \
name.startswith('hg+') or name.startswith('bzr+'):
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if module.params['editable']:
if name.startswith('svn+') or name.startswith('git+') or \
name.startswith('hg+') or name.startswith('bzr+'):
args_list = [] # used if extra_args is not used at all
if extra_args:
args_list = extra_args.split(' ')
if '-e' not in args_list:
args_list.append('-e')
# Ok, we will reconstruct the option string
extra_args = ' '.join(args_list)
if extra_args:
cmd += ' %s' % extra_args

View file

@ -62,9 +62,9 @@ options:
default: null
install_recommends:
description:
- Corresponds to the C(--no-install-recommends) option for I(apt). Default behavior (C(yes)) replicates apt's default behavior; C(no) does not install recommended packages. Suggested packages are never installed.
- Corresponds to the C(--no-install-recommends) option for I(apt). C(yes) installs recommended packages. C(no) does not install recommended packages. By default, Ansible will use the same defaults as the operating system. Suggested packages are never installed.
required: false
default: yes
default: null
choices: [ "yes", "no" ]
force:
description:
@ -231,7 +231,7 @@ def package_status(m, pkgname, version, cache, state):
provided_packages = cache.get_providing_packages(pkgname)
if provided_packages:
is_installed = False
# when virtual package providing only one package, look up status of target package
# when virtual package providing only one package, look up status of target package
if cache.is_virtual_package(pkgname) and len(provided_packages) == 1:
package = provided_packages[0]
installed, upgradable, has_files = package_status(m, package.name, version, cache, state='install')
@ -339,7 +339,7 @@ def expand_pkgspec_from_fnmatches(m, pkgspec, cache):
return new_pkgspec
def install(m, pkgspec, cache, upgrade=False, default_release=None,
install_recommends=True, force=False,
install_recommends=None, force=False,
dpkg_options=expand_dpkg_options(DPKG_OPTIONS),
build_dep=False):
pkg_list = []
@ -385,8 +385,12 @@ def install(m, pkgspec, cache, upgrade=False, default_release=None,
if default_release:
cmd += " -t '%s'" % (default_release,)
if not install_recommends:
cmd += " --no-install-recommends"
if install_recommends is False:
cmd += " -o APT::Install-Recommends=no"
elif install_recommends is True:
cmd += " -o APT::Install-Recommends=yes"
# install_recommends is None uses the OS default
rc, out, err = m.run_command(cmd)
if rc:
@ -547,7 +551,7 @@ def main():
package = dict(default=None, aliases=['pkg', 'name'], type='list'),
deb = dict(default=None),
default_release = dict(default=None, aliases=['default-release']),
install_recommends = dict(default='yes', aliases=['install-recommends'], type='bool'),
install_recommends = dict(default=None, aliases=['install-recommends'], type='bool'),
force = dict(default='no', type='bool'),
upgrade = dict(choices=['no', 'yes', 'safe', 'full', 'dist']),
dpkg_options = dict(default=DPKG_OPTIONS)
@ -559,7 +563,7 @@ def main():
if not HAS_PYTHON_APT:
try:
module.run_command('apt-get update && apt-get install python-apt -y -q', use_unsafe_shell=True, check_rc=True)
module.run_command('apt-get update && apt-get install python-apt -y -q --force-yes', use_unsafe_shell=True, check_rc=True)
global apt, apt_pkg
import apt
import apt.debfile

View file

@ -130,6 +130,15 @@ notes:
that the other packages come from (such as epel-release) then that package
needs to be installed in a separate task. This mimics yum's command line
behaviour.
- 'Yum itself has two types of groups. "Package groups" are specified in the
rpm itself while "environment groups" are specified in a separate file
(usually by the distribution). Unfortunately, this division becomes
apparent to ansible users because ansible needs to operate on the group
of packages in a single transaction and yum requires groups to be specified
in different ways when used in that way. Package groups are specified as
"@development-tools" and environment groups are "@^gnome-desktop-environment".
Use the "yum group list" command to see which category of group the group
you want to install falls into.'
# informational: requirements for nodes
requirements: [ yum ]
author:
@ -161,6 +170,9 @@ EXAMPLES = '''
- name: install the 'Development tools' package group
yum: name="@Development tools" state=present
- name: install the 'Gnome desktop' environment group
yum: name="@^gnome-desktop-environment" state=present
'''
# 64k. Number of bytes to read at a time when manually downloading pkgs via a url

View file

@ -82,7 +82,7 @@ options:
version_added: "1.9"
description:
- "Adds or removes authorized keys for particular user accounts"
author: "Brad Olson (@bradobro)"
author: "Ansible Core Team"
'''
EXAMPLES = '''

View file

@ -42,6 +42,7 @@ EXAMPLES = '''
- hostname: name=web01
'''
import socket
from distutils.version import LooseVersion
# import module snippets
@ -481,6 +482,15 @@ class ScientificLinuxHostname(Hostname):
else:
strategy_class = RedHatStrategy
class OracleLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Oracle linux server'
distribution_version = get_distribution_version()
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
strategy_class = SystemdStrategy
else:
strategy_class = RedHatStrategy
class AmazonLinuxHostname(Hostname):
platform = 'Linux'
distribution = 'Amazon'
@ -554,6 +564,10 @@ def main():
hostname.set_permanent_hostname(name)
changed = True
module.exit_json(changed=changed, name=name, ansible_facts=dict(ansible_hostname=name))
module.exit_json(changed=changed, name=name,
ansible_facts=dict(ansible_hostname=name.split('.')[0],
ansible_nodename=name,
ansible_fqdn=socket.getfqdn(),
ansible_domain='.'.join(socket.getfqdn().split('.')[1:])))
main()

View file

@ -395,7 +395,7 @@ class LinuxService(Service):
location = dict()
for binary in binaries:
location[binary] = self.module.get_bin_path(binary)
location[binary] = self.module.get_bin_path(binary, opt_dirs=paths)
for initdir in initpaths:
initscript = "%s/%s" % (initdir,self.name)
@ -403,25 +403,31 @@ class LinuxService(Service):
self.svc_initscript = initscript
def check_systemd():
# verify systemd is installed (by finding systemctl)
if not location.get('systemctl', False):
return False
# Check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError, err:
# If comm doesn't exist, old kernel, no systemd
return False
# tools must be installed
if location.get('systemctl',False):
for line in f:
if 'systemd' in line:
return True
# this should show if systemd is the boot init system
# these mirror systemd's own sd_boot test http://www.freedesktop.org/software/systemd/man/sd_booted.html
for canary in ["/run/systemd/system/", "/dev/.run/systemd/", "/dev/.systemd/"]:
if os.path.exists(canary):
return True
# If all else fails, check if init is the systemd command, using comm as cmdline could be symlink
try:
f = open('/proc/1/comm', 'r')
except IOError:
# If comm doesn't exist, old kernel, no systemd
return False
for line in f:
if 'systemd' in line:
return True
return False
# Locate a tool to enable/disable a service
if location.get('systemctl',False) and check_systemd():
if check_systemd():
# service is managed by systemd
self.__systemd_unit = self.name
self.svc_cmd = location['systemctl']
@ -699,7 +705,8 @@ class LinuxService(Service):
(rc, out, err) = self.execute_command("%s --list %s" % (self.enable_cmd, self.name))
if not self.name in out:
self.module.fail_json(msg="service %s does not support chkconfig" % self.name)
state = out.split()[-1]
#TODO: look back on why this is here
#state = out.split()[-1]
# Check if we're already in the correct state
if "3:%s" % action in out and "5:%s" % action in out:
@ -961,7 +968,6 @@ class FreeBsdService(Service):
self.rcconf_file = rcfile
rc, stdout, stderr = self.execute_command("%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments))
cmd = "%s %s %s %s" % (self.svc_cmd, self.name, 'rcvar', self.arguments)
try:
rcvars = shlex.split(stdout, comments=True)
except:

View file

@ -27,15 +27,20 @@ import shlex
import os
import subprocess
import sys
import datetime
import traceback
import signal
import time
import syslog
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
def notice(msg):
syslog.syslog(syslog.LOG_NOTICE, msg)
def daemonize_self():
# daemonizing code: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66012
# logger.info("cobblerd started")
try:
pid = os.fork()
if pid > 0:
@ -65,50 +70,21 @@ def daemonize_self():
os.dup2(dev_null.fileno(), sys.stdout.fileno())
os.dup2(dev_null.fileno(), sys.stderr.fileno())
if len(sys.argv) < 3:
print json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
})
sys.exit(1)
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
def _run_module(wrapped_cmd, jid, job_path):
syslog.openlog('ansible-%s' % os.path.basename(__file__))
syslog.syslog(syslog.LOG_NOTICE, 'Invoked with %s' % " ".join(sys.argv[1:]))
# setup logging directory
logdir = os.path.expanduser("~/.ansible_async")
log_path = os.path.join(logdir, jid)
if not os.path.exists(logdir):
try:
os.makedirs(logdir)
except:
print json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % logdir
})
def _run_command(wrapped_cmd, jid, log_path):
logfile = open(log_path, "w")
logfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
logfile.close()
logfile = open(log_path, "w")
jobfile = open(job_path, "w")
jobfile.write(json.dumps({ "started" : 1, "ansible_job_id" : jid }))
jobfile.close()
jobfile = open(job_path, "w")
result = {}
outdata = ''
try:
cmd = shlex.split(wrapped_cmd)
script = subprocess.Popen(cmd, shell=False,
stdin=None, stdout=logfile, stderr=logfile)
script = subprocess.Popen(cmd, shell=False, stdin=None, stdout=jobfile, stderr=jobfile)
script.communicate()
outdata = file(log_path).read()
outdata = file(job_path).read()
result = json.loads(outdata)
except (OSError, IOError), e:
@ -118,83 +94,109 @@ def _run_command(wrapped_cmd, jid, log_path):
"msg": str(e),
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
jobfile.write(json.dumps(result))
except:
result = {
"failed" : 1,
"cmd" : wrapped_cmd,
"data" : outdata, # temporary debug only
"data" : outdata, # temporary notice only
"msg" : traceback.format_exc()
}
result['ansible_job_id'] = jid
logfile.write(json.dumps(result))
logfile.close()
jobfile.write(json.dumps(result))
jobfile.close()
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
#import logging
#import logging.handlers
####################
## main ##
####################
if __name__ == '__main__':
#logger = logging.getLogger("ansible_async")
#logger.setLevel(logging.WARNING)
#logger.addHandler( logging.handlers.SysLogHandler("/dev/log") )
def debug(msg):
#logger.warning(msg)
pass
if len(sys.argv) < 3:
print json.dumps({
"failed" : True,
"msg" : "usage: async_wrapper <jid> <time_limit> <modulescript> <argsfile>. Humans, do not call directly!"
})
sys.exit(1)
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
jid = "%s.%d" % (sys.argv[1], os.getpid())
time_limit = sys.argv[2]
wrapped_module = sys.argv[3]
argsfile = sys.argv[4]
cmd = "%s %s" % (wrapped_module, argsfile)
step = 5
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
time.sleep(1)
debug("Return async_wrapper task started.")
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : log_path })
sys.stdout.flush()
sys.exit(0)
else:
# The actual wrapper process
# setup job output directory
jobdir = os.path.expanduser("~/.ansible_async")
job_path = os.path.join(jobdir, jid)
# Daemonize, so we keep on running
daemonize_self()
if not os.path.exists(jobdir):
try:
os.makedirs(jobdir)
except:
print json.dumps({
"failed" : 1,
"msg" : "could not create: %s" % jobdir
})
# immediately exit this process, leaving an orphaned process
# running which immediately forks a supervisory timing process
# we are now daemonized, create a supervisory process
debug("Starting module and watcher")
try:
pid = os.fork()
if pid:
# Notify the overlord that the async process started
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
debug("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(5)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
debug("%s still running (%s)"%(sub_pid, remaining))
time.sleep(5)
remaining = remaining - 5
if remaining <= 0:
debug("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
debug("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
debug("Done in kid B.")
os._exit(0)
else:
# the child process runs the actual module
debug("Start module (%s)"%os.getpid())
_run_command(cmd, jid, log_path)
debug("Module complete (%s)"%os.getpid())
# we need to not return immmediately such that the launched command has an attempt
# to initialize PRIOR to ansible trying to clean up the launch directory (and argsfile)
# this probably could be done with some IPC later. Modules should always read
# the argsfile at the very first start of their execution anyway
notice("Return async_wrapper task started.")
print json.dumps({ "started" : 1, "ansible_job_id" : jid, "results_file" : job_path })
sys.stdout.flush()
time.sleep(1)
sys.exit(0)
else:
# The actual wrapper process
except Exception, err:
debug("error: %s"%(err))
raise err
# Daemonize, so we keep on running
daemonize_self()
# we are now daemonized, create a supervisory process
notice("Starting module and watcher")
sub_pid = os.fork()
if sub_pid:
# the parent stops the process after the time limit
remaining = int(time_limit)
# set the child process group id to kill all children
os.setpgid(sub_pid, sub_pid)
notice("Start watching %s (%s)"%(sub_pid, remaining))
time.sleep(step)
while os.waitpid(sub_pid, os.WNOHANG) == (0, 0):
notice("%s still running (%s)"%(sub_pid, remaining))
time.sleep(step)
remaining = remaining - step
if remaining <= 0:
notice("Now killing %s"%(sub_pid))
os.killpg(sub_pid, signal.SIGKILL)
notice("Sent kill to group %s"%sub_pid)
time.sleep(1)
sys.exit(0)
notice("Done in kid B.")
sys.exit(0)
else:
# the child process runs the actual module
notice("Start module (%s)"%os.getpid())
_run_module(cmd, jid, job_path)
notice("Module complete (%s)"%os.getpid())
sys.exit(0)
except Exception, err:
notice("error: %s"%(err))
print json.dumps({
"failed" : True,
"msg" : "FATAL ERROR: %s" % str(err)
})
sys.exit(1)

View file

@ -14,7 +14,7 @@ author: "Benno Joy (@bennojoy)"
module: include_vars
short_description: Load variables from files, dynamically within a task.
description:
- Loads variables from a YAML file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
- Loads variables from a YAML/JSON file dynamically during task runtime. It can work with conditionals, or use host specific variables to determine the path name to load from.
options:
free-form:
description:

View file

@ -24,9 +24,8 @@ author: "Dag Wieers (@dagwieers)"
module: set_fact
short_description: Set host facts from a task
description:
- This module allows setting new variables. Variables are set on a host-by-host basis
just like facts discovered by the setup module.
- These variables will survive between plays.
- This module allows setting new variables. Variables are set on a host-by-host basis just like facts discovered by the setup module.
- These variables will survive between plays during an Ansible run, but will not be saved across executions even if you use a fact cache.
options:
key_value:
description:

View file

@ -18,12 +18,14 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import socket
import datetime
import time
import sys
import re
import binascii
import datetime
import math
import re
import select
import socket
import sys
import time
HAS_PSUTIL = False
try:
@ -101,7 +103,7 @@ options:
notes:
- The ability to use search_regex with a port connection was added in 1.7.
requirements: []
author:
author:
- "Jeroen Hoekx (@jhoekx)"
- "John Jarvis (@jarv)"
- "Andrii Radyk (@AnderEnder)"
@ -125,7 +127,7 @@ EXAMPLES = '''
- wait_for: path=/tmp/foo search_regex=completed
# wait until the lock file is removed
- wait_for: path=/var/lock/file.lock state=absent
- wait_for: path=/var/lock/file.lock state=absent
# wait until the process is finished and pid was destroyed
- wait_for: path=/proc/3466/status state=absent
@ -320,6 +322,11 @@ def _create_connection( (host, port), connect_timeout):
connect_socket = socket.create_connection( (host, port), connect_timeout)
return connect_socket
def _timedelta_total_seconds(timedelta):
return (
timedelta.microseconds + 0.0 +
(timedelta.seconds + timedelta.days * 24 * 3600) * 10 ** 6) / 10 ** 6
def main():
module = AnsibleModule(
@ -349,6 +356,10 @@ def main():
state = params['state']
path = params['path']
search_regex = params['search_regex']
if search_regex is not None:
compiled_search_re = re.compile(search_regex, re.MULTILINE)
else:
compiled_search_re = None
if port and path:
module.fail_json(msg="port and path parameter can not both be passed to wait_for")
@ -404,55 +415,72 @@ def main():
if path:
try:
os.stat(path)
if search_regex:
try:
f = open(path)
try:
if re.search(search_regex, f.read(), re.MULTILINE):
break
else:
time.sleep(1)
finally:
f.close()
except IOError:
time.sleep(1)
pass
else:
break
except OSError, e:
# File not present
if e.errno == 2:
time.sleep(1)
else:
# If anything except file not present, throw an error
if e.errno != 2:
elapsed = datetime.datetime.now() - start
module.fail_json(msg="Failed to stat %s, %s" % (path, e.strerror), elapsed=elapsed.seconds)
# file doesn't exist yet, so continue
else:
# File exists. Are there additional things to check?
if not compiled_search_re:
# nope, succeed!
break
try:
f = open(path)
try:
if re.search(compiled_search_re, f.read()):
# String found, success!
break
finally:
f.close()
except IOError:
pass
elif port:
alt_connect_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
try:
s = _create_connection( (host, port), connect_timeout)
if search_regex:
s = _create_connection((host, port), min(connect_timeout, alt_connect_timeout))
except:
# Failed to connect by connect_timeout. wait and try again
pass
else:
# Connected -- are there additional conditions?
if compiled_search_re:
data = ''
matched = False
while 1:
data += s.recv(1024)
if not data:
while datetime.datetime.now() < end:
max_timeout = math.ceil(_timedelta_total_seconds(end - datetime.datetime.now()))
(readable, w, e) = select.select([s], [], [], max_timeout)
if not readable:
# No new data. Probably means our timeout
# expired
continue
response = s.recv(1024)
if not response:
# Server shutdown
break
elif re.search(search_regex, data, re.MULTILINE):
data += response
if re.search(compiled_search_re, data):
matched = True
break
# Shutdown the client socket
s.shutdown(socket.SHUT_RDWR)
s.close()
if matched:
s.shutdown(socket.SHUT_RDWR)
s.close()
# Found our string, success!
break
else:
# Connection established, success!
s.shutdown(socket.SHUT_RDWR)
s.close()
break
except:
time.sleep(1)
pass
else:
time.sleep(1)
else:
# Conditions not yet met, wait and try again
time.sleep(1)
else: # while-else
# Timeout expired
elapsed = datetime.datetime.now() - start
if port:
if search_regex:
@ -485,4 +513,5 @@ def main():
# import module snippets
from ansible.module_utils.basic import *
main()
if __name__ == '__main__':
main()

View file

@ -69,7 +69,7 @@ notes:
- "On Debian, Ubuntu, or Fedora: install I(python-passlib)."
- "On RHEL or CentOS: Enable EPEL, then install I(python-passlib)."
requires: [ passlib>=1.6 ]
author: "Lorin Hochstein (@lorin)"
author: "Ansible Core Team"
"""
EXAMPLES = """

View file

@ -26,11 +26,9 @@ $result = New-Object psobject @{
};
$win32_os = Get-CimInstance Win32_OperatingSystem
$win32_cs = Get-CimInstance Win32_ComputerSystem
$osversion = [Environment]::OSVersion
$memory = @()
$memory += Get-WmiObject win32_Physicalmemory
$capacity = 0
$memory | foreach {$capacity += $_.Capacity}
$capacity = $win32_cs.TotalPhysicalMemory # Win32_PhysicalMemory is empty on some virtual platforms
$netcfg = Get-WmiObject win32_NetworkAdapterConfiguration
$ActiveNetcfg = @(); $ActiveNetcfg+= $netcfg | where {$_.ipaddress -ne $null}

View file

@ -34,6 +34,10 @@ options:
description:
- File system path to the MSI file to install
required: true
extra_args:
description:
- Additional arguments to pass to the msiexec.exe command
required: false
state:
description:
- Whether the MSI file should be installed or uninstalled

View file

@ -15,7 +15,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
DOCUMENTATION = r'''
---
module: win_template
version_added: "1.9.2"
@ -47,8 +47,8 @@ notes:
- "templates are loaded with C(trim_blocks=True)."
- By default, windows line endings are not created in the generated file.
- "In order to ensure windows line endings are in the generated file, add the following header
as the first line of your template: #jinja2: newline_sequence:'\r\n' and ensure each line
of the template ends with \r\n"
as the first line of your template: #jinja2: newline_sequence:'\\\\r\\\\n' and ensure each line
of the template ends with \\\\r\\\\n"
- Beware fetching files from windows machines when creating templates
because certain tools, such as Powershell ISE, and regedit's export facility
add a Byte Order Mark as the first character of the file, which can cause tracebacks.