Merge remote-tracking branch 'upstream/devel' into feature/iam_policy_present_state_includes_policy_changes
This commit is contained in:
commit
6992d0d465
13 changed files with 93 additions and 50 deletions
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
2.0.0-0.3.beta1
|
||||
2.0.0-0.4.beta2
|
||||
|
|
|
@ -565,7 +565,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -107,6 +107,7 @@ import sys
|
|||
try:
|
||||
import boto
|
||||
import boto.iam
|
||||
import boto.ec2
|
||||
HAS_BOTO = True
|
||||
except ImportError:
|
||||
HAS_BOTO = False
|
||||
|
@ -246,7 +247,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -307,7 +307,10 @@ def main():
|
|||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module)
|
||||
|
||||
try:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
if region:
|
||||
iam = boto.iam.connect_to_region(region, **aws_connect_kwargs)
|
||||
else:
|
||||
iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs)
|
||||
except boto.exception.NoAuthHandlerFound, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
|
|
@ -146,10 +146,14 @@ def main():
|
|||
" as offered. Delete key first." % name
|
||||
)
|
||||
else:
|
||||
module.exit_json(changed=False, key=keypair)
|
||||
changed = False
|
||||
else:
|
||||
keypair = cloud.create_keypair(name, public_key)
|
||||
changed = True
|
||||
|
||||
new_key = cloud.create_keypair(name, public_key)
|
||||
module.exit_json(changed=True, key=new_key)
|
||||
module.exit_json(changed=changed,
|
||||
key=keypair,
|
||||
id=keypair['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if keypair:
|
||||
|
|
|
@ -146,7 +146,10 @@ def main():
|
|||
if state == 'present':
|
||||
if not net:
|
||||
net = cloud.create_network(name, shared, admin_state_up, external)
|
||||
module.exit_json(changed=False, network=net, id=net['id'])
|
||||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed, network=net, id=net['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not net:
|
||||
|
|
|
@ -217,8 +217,13 @@ def main():
|
|||
rxtx_factor=module.params['rxtx_factor'],
|
||||
is_public=module.params['is_public']
|
||||
)
|
||||
module.exit_json(changed=True, flavor=flavor)
|
||||
module.exit_json(changed=False, flavor=flavor)
|
||||
changed=True
|
||||
else:
|
||||
changed=False
|
||||
|
||||
module.exit_json(changed=changed,
|
||||
flavor=flavor,
|
||||
id=flavor['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if flavor:
|
||||
|
|
|
@ -91,7 +91,7 @@ def _system_state_change(module, secgroup):
|
|||
def main():
|
||||
argument_spec = openstack_full_argument_spec(
|
||||
name=dict(required=True),
|
||||
description=dict(default=None),
|
||||
description=dict(default=''),
|
||||
state=dict(default='present', choices=['absent', 'present']),
|
||||
)
|
||||
|
||||
|
|
|
@ -76,7 +76,8 @@ options:
|
|||
default: None
|
||||
security_groups:
|
||||
description:
|
||||
- The name of the security group to which the instance should be added
|
||||
- Names of the security groups to which the instance should be
|
||||
added. This may be a YAML list or a common separated string.
|
||||
required: false
|
||||
default: None
|
||||
nics:
|
||||
|
@ -366,7 +367,7 @@ def _create_server(module, cloud):
|
|||
flavor=flavor_dict['id'],
|
||||
nics=nics,
|
||||
meta=module.params['meta'],
|
||||
security_groups=module.params['security_groups'].split(','),
|
||||
security_groups=module.params['security_groups'],
|
||||
userdata=module.params['userdata'],
|
||||
config_drive=module.params['config_drive'],
|
||||
)
|
||||
|
@ -459,7 +460,7 @@ def main():
|
|||
flavor_ram = dict(default=None, type='int'),
|
||||
flavor_include = dict(default=None),
|
||||
key_name = dict(default=None),
|
||||
security_groups = dict(default='default'),
|
||||
security_groups = dict(default=['default'], type='list'),
|
||||
nics = dict(default=[], type='list'),
|
||||
meta = dict(default=None),
|
||||
userdata = dict(default=None),
|
||||
|
|
|
@ -302,7 +302,9 @@ def main():
|
|||
changed = True
|
||||
else:
|
||||
changed = False
|
||||
module.exit_json(changed=changed)
|
||||
module.exit_json(changed=changed,
|
||||
subnet=subnet,
|
||||
id=subnet['id'])
|
||||
|
||||
elif state == 'absent':
|
||||
if not subnet:
|
||||
|
|
|
@ -25,8 +25,6 @@ import stat
|
|||
import fnmatch
|
||||
import time
|
||||
import re
|
||||
import shutil
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
|
@ -50,17 +48,18 @@ options:
|
|||
required: false
|
||||
default: '*'
|
||||
description:
|
||||
- One or more (shell type) file glob patterns, which restrict the list of files to be returned to
|
||||
those whose basenames match at least one of the patterns specified. Multiple patterns can be
|
||||
specified using a list.
|
||||
- One or more (shell or regex) patterns, which type is controled by C(use_regex) option.
|
||||
- The patterns restrict the list of files to be returned to those whose basenames match at
|
||||
least one of the patterns specified. Multiple patterns can be specified using a list.
|
||||
aliases: ['pattern']
|
||||
contains:
|
||||
required: false
|
||||
default: null
|
||||
description:
|
||||
- One or more re patterns which should be matched against the file content
|
||||
- One or more re patterns which should be matched against the file content
|
||||
paths:
|
||||
required: true
|
||||
aliases: [ "name" ]
|
||||
aliases: [ "name", "path" ]
|
||||
description:
|
||||
- List of paths to the file or directory to search. All paths must be fully qualified.
|
||||
file_type:
|
||||
|
@ -108,6 +107,12 @@ options:
|
|||
choices: [ True, False ]
|
||||
description:
|
||||
- Set this to true to retrieve a file's sha1 checksum
|
||||
use_regex:
|
||||
required: false
|
||||
default: "False"
|
||||
choices: [ True, False ]
|
||||
description:
|
||||
- If false the patterns are file globs (shell) if true they are python regexes
|
||||
'''
|
||||
|
||||
|
||||
|
@ -121,8 +126,11 @@ EXAMPLES = '''
|
|||
# Recursively find /var/tmp files with last access time greater than 3600 seconds
|
||||
- find: paths="/var/tmp" age="3600" age_stamp=atime recurse=yes
|
||||
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .log or .log.gz
|
||||
- find: paths="/var/tmp" patterns="*.log","*.log.gz" size="10m"
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz
|
||||
- find: paths="/var/tmp" patterns="'*.old','*.log.gz'" size="10m"
|
||||
|
||||
# find /var/log files equal or greater than 10 megabytes ending with .old or .log.gz via regex
|
||||
- find: paths="/var/tmp" patterns="^.*?\.(?:old|log\.gz)$" size="10m" use_regex=True
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
|
@ -152,13 +160,23 @@ examined:
|
|||
sample: 34
|
||||
'''
|
||||
|
||||
def pfilter(f, patterns=None):
|
||||
def pfilter(f, patterns=None, use_regex=False):
|
||||
'''filter using glob patterns'''
|
||||
|
||||
if patterns is None:
|
||||
return True
|
||||
for p in patterns:
|
||||
if fnmatch.fnmatch(f, p):
|
||||
return True
|
||||
|
||||
if use_regex:
|
||||
for p in patterns:
|
||||
r = re.compile(p)
|
||||
if r.match(f):
|
||||
return True
|
||||
else:
|
||||
|
||||
for p in patterns:
|
||||
if fnmatch.fnmatch(f, p):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
|
@ -236,8 +254,8 @@ def statinfo(st):
|
|||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
paths = dict(required=True, aliases=['name'], type='list'),
|
||||
patterns = dict(default=['*'], type='list'),
|
||||
paths = dict(required=True, aliases=['name','path'], type='list'),
|
||||
patterns = dict(default=['*'], type='list', aliases=['pattern']),
|
||||
contains = dict(default=None, type='str'),
|
||||
file_type = dict(default="file", choices=['file', 'directory'], type='str'),
|
||||
age = dict(default=None, type='str'),
|
||||
|
@ -247,6 +265,7 @@ def main():
|
|||
hidden = dict(default="False", type='bool'),
|
||||
follow = dict(default="False", type='bool'),
|
||||
get_checksum = dict(default="False", type='bool'),
|
||||
use_regex = dict(default="False", type='bool'),
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -292,16 +311,21 @@ def main():
|
|||
if os.path.basename(fsname).startswith('.') and not params['hidden']:
|
||||
continue
|
||||
|
||||
st = os.stat(fsname)
|
||||
try:
|
||||
st = os.stat(fsname)
|
||||
except:
|
||||
msg+="%s was skipped as it does not seem to be a valid file or it cannot be accessed\n" % fsname
|
||||
continue
|
||||
|
||||
r = {'path': fsname}
|
||||
if stat.S_ISDIR(st.st_mode) and params['file_type'] == 'directory':
|
||||
if pfilter(fsobj, params['patterns']) and agefilter(st, now, age, params['age_stamp']):
|
||||
if pfilter(fsobj, params['patterns'], params['use_regex']) and agefilter(st, now, age, params['age_stamp']):
|
||||
|
||||
r.update(statinfo(st))
|
||||
filelist.append(r)
|
||||
|
||||
elif stat.S_ISREG(st.st_mode) and params['file_type'] == 'file':
|
||||
if pfilter(fsobj, params['patterns']) and \
|
||||
if pfilter(fsobj, params['patterns'], params['use_regex']) and \
|
||||
agefilter(st, now, age, params['age_stamp']) and \
|
||||
sizefilter(st, size) and \
|
||||
contentfilter(fsname, params['contains']):
|
||||
|
@ -314,7 +338,7 @@ def main():
|
|||
if not params['recurse']:
|
||||
break
|
||||
else:
|
||||
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n"
|
||||
msg+="%s was skipped as it does not seem to be a valid directory or it cannot be accessed\n" % npath
|
||||
|
||||
matched = len(filelist)
|
||||
module.exit_json(files=filelist, changed=False, msg=msg, matched=matched, examined=looked)
|
||||
|
|
|
@ -481,6 +481,15 @@ class ScientificLinuxHostname(Hostname):
|
|||
else:
|
||||
strategy_class = RedHatStrategy
|
||||
|
||||
class OracleLinuxHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Oracle linux server'
|
||||
distribution_version = get_distribution_version()
|
||||
if distribution_version and LooseVersion(distribution_version) >= LooseVersion("7"):
|
||||
strategy_class = SystemdStrategy
|
||||
else:
|
||||
strategy_class = RedHatStrategy
|
||||
|
||||
class AmazonLinuxHostname(Hostname):
|
||||
platform = 'Linux'
|
||||
distribution = 'Amazon'
|
||||
|
|
|
@ -403,22 +403,7 @@ class LinuxService(Service):
|
|||
self.svc_initscript = initscript
|
||||
|
||||
def check_systemd():
|
||||
# verify systemd is installed (by finding systemctl)
|
||||
if not location.get('systemctl', False):
|
||||
return False
|
||||
|
||||
# Check if init is the systemd command, using comm as cmdline could be symlink
|
||||
try:
|
||||
f = open('/proc/1/comm', 'r')
|
||||
except IOError, err:
|
||||
# If comm doesn't exist, old kernel, no systemd
|
||||
return False
|
||||
|
||||
for line in f:
|
||||
if 'systemd' in line:
|
||||
return True
|
||||
|
||||
return False
|
||||
return os.path.exists("/run/systemd/system/") or os.path.exists("/dev/.run/systemd/") or os.path.exists("/dev/.systemd/")
|
||||
|
||||
# Locate a tool to enable/disable a service
|
||||
if location.get('systemctl',False) and check_systemd():
|
||||
|
|
Loading…
Add table
Reference in a new issue