[cloud] new module lambda_policy (PR #24951)
- Fixes to lambda - reformatting + tests for lambda_facts - lambda module integration test - switch lambda and lambda_facts to AnsibleAwsModule - Get the account ID from STS, GetUser, and finally error message
This commit is contained in:
parent
c36c34ef7e
commit
fbec5ab12d
16 changed files with 1481 additions and 123 deletions
|
@ -262,7 +262,7 @@ def main():
|
|||
|
||||
if invoke_params['InvocationType'] == 'RequestResponse':
|
||||
try:
|
||||
results['output'] = json.loads(response['Payload'].read())
|
||||
results['output'] = json.loads(response['Payload'].read().decode('utf8'))
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Failed while decoding function return value", exception=traceback.format_exc())
|
||||
|
||||
|
|
|
@ -36,19 +36,18 @@ options:
|
|||
state:
|
||||
description:
|
||||
- Create or delete Lambda function
|
||||
required: false
|
||||
default: present
|
||||
choices: [ 'present', 'absent' ]
|
||||
runtime:
|
||||
description:
|
||||
- The runtime environment for the Lambda function you are uploading. Required when creating a function. Use parameters as described in boto3 docs.
|
||||
Current example runtime environments are nodejs, nodejs4.3, java8 or python2.7
|
||||
required: true
|
||||
- Required when C(state=present)
|
||||
role:
|
||||
description:
|
||||
- The Amazon Resource Name (ARN) of the IAM role that Lambda assumes when it executes your function to access any other Amazon Web Services (AWS)
|
||||
resources. You may use the bare ARN if the role belongs to the same AWS account.
|
||||
default: null
|
||||
- Required when C(state=present)
|
||||
handler:
|
||||
description:
|
||||
- The function within your code that Lambda calls to begin execution
|
||||
|
@ -56,17 +55,21 @@ options:
|
|||
zip_file:
|
||||
description:
|
||||
- A .zip file containing your deployment package
|
||||
- If C(state=present) then either zip_file or s3_bucket must be present.
|
||||
required: false
|
||||
default: null
|
||||
aliases: [ 'src' ]
|
||||
s3_bucket:
|
||||
description:
|
||||
- Amazon S3 bucket name where the .zip file containing your deployment package is stored
|
||||
- If C(state=present) then either zip_file or s3_bucket must be present.
|
||||
- s3_bucket and s3_key are required together
|
||||
required: false
|
||||
default: null
|
||||
s3_key:
|
||||
description:
|
||||
- The Amazon S3 object (the deployment package) key name you want to upload
|
||||
- s3_bucket and s3_key are required together
|
||||
required: false
|
||||
default: null
|
||||
s3_object_version:
|
||||
|
@ -189,30 +192,53 @@ output:
|
|||
}
|
||||
'''
|
||||
|
||||
# Import from Python standard library
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn, camel_dict_to_snake_dict
|
||||
import base64
|
||||
import hashlib
|
||||
import traceback
|
||||
|
||||
try:
|
||||
import botocore
|
||||
HAS_BOTOCORE = True
|
||||
from botocore.exceptions import ClientError, ValidationError, ParamValidationError
|
||||
except ImportError:
|
||||
HAS_BOTOCORE = False
|
||||
pass # protected by AnsibleAWSModule
|
||||
|
||||
|
||||
def get_account_id(module, region=None, endpoint=None, **aws_connect_kwargs):
|
||||
"""return the account id we are currently working on
|
||||
|
||||
get_account_id tries too find out the account that we are working
|
||||
on. It's not guaranteed that this will be easy so we try in
|
||||
several different ways. Giving either IAM or STS privilages to
|
||||
the account should be enough to permit this.
|
||||
"""
|
||||
try:
|
||||
import boto3
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
sts_client = boto3_conn(module, conn_type='client', resource='sts',
|
||||
region=region, endpoint=endpoint, **aws_connect_kwargs)
|
||||
account_id = sts_client.get_caller_identity().get('Account')
|
||||
except ClientError:
|
||||
try:
|
||||
iam_client = boto3_conn(module, conn_type='client', resource='iam',
|
||||
region=region, endpoint=endpoint, **aws_connect_kwargs)
|
||||
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
|
||||
except ClientError as e:
|
||||
if (e.response['Error']['Code'] == 'AccessDenied'):
|
||||
except_msg = to_native(e.message)
|
||||
account_id = except_msg.search("arn:aws:iam::([0-9]{12,32}):\w+/").group(1)
|
||||
if account_id is None:
|
||||
module.fail_json_aws(e, msg="getting account information")
|
||||
except Exception as e:
|
||||
module.fail_json_aws(e, msg="getting account information")
|
||||
return account_id
|
||||
|
||||
|
||||
def get_current_function(connection, function_name, qualifier=None):
|
||||
try:
|
||||
if qualifier is not None:
|
||||
return connection.get_function(FunctionName=function_name,
|
||||
Qualifier=qualifier)
|
||||
return connection.get_function(FunctionName=function_name, Qualifier=qualifier)
|
||||
return connection.get_function(FunctionName=function_name)
|
||||
except botocore.exceptions.ClientError:
|
||||
except ClientError:
|
||||
return None
|
||||
|
||||
|
||||
|
@ -229,25 +255,23 @@ def sha256sum(filename):
|
|||
|
||||
|
||||
def main():
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(type='str', required=True),
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
runtime=dict(type='str', required=True),
|
||||
role=dict(type='str', default=None),
|
||||
handler=dict(type='str', default=None),
|
||||
zip_file=dict(type='str', default=None, aliases=['src']),
|
||||
s3_bucket=dict(type='str'),
|
||||
s3_key=dict(type='str'),
|
||||
s3_object_version=dict(type='str', default=None),
|
||||
description=dict(type='str', default=''),
|
||||
argument_spec = dict(
|
||||
name=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
runtime=dict(),
|
||||
role=dict(),
|
||||
handler=dict(),
|
||||
zip_file=dict(aliases=['src']),
|
||||
s3_bucket=dict(),
|
||||
s3_key=dict(),
|
||||
s3_object_version=dict(),
|
||||
description=dict(default=''),
|
||||
timeout=dict(type='int', default=3),
|
||||
memory_size=dict(type='int', default=128),
|
||||
vpc_subnet_ids=dict(type='list', default=None),
|
||||
vpc_security_group_ids=dict(type='list', default=None),
|
||||
environment_variables=dict(type='dict', default=None),
|
||||
dead_letter_arn=dict(type='str', default=None),
|
||||
)
|
||||
vpc_subnet_ids=dict(type='list'),
|
||||
vpc_security_group_ids=dict(type='list'),
|
||||
environment_variables=dict(type='dict'),
|
||||
dead_letter_arn=dict(),
|
||||
)
|
||||
|
||||
mutually_exclusive = [['zip_file', 's3_key'],
|
||||
|
@ -257,10 +281,13 @@ def main():
|
|||
required_together = [['s3_key', 's3_bucket'],
|
||||
['vpc_subnet_ids', 'vpc_security_group_ids']]
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_if = [['state', 'present', ['runtime', 'handler', 'role']]]
|
||||
|
||||
module = AnsibleAWSModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=mutually_exclusive,
|
||||
required_together=required_together)
|
||||
required_together=required_together,
|
||||
required_if=required_if)
|
||||
|
||||
name = module.params.get('name')
|
||||
state = module.params.get('state').lower()
|
||||
|
@ -282,12 +309,6 @@ def main():
|
|||
check_mode = module.check_mode
|
||||
changed = False
|
||||
|
||||
if not HAS_BOTOCORE:
|
||||
module.fail_json(msg='Python module "botocore" is missing, please install it')
|
||||
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='Python module "boto3" is missing, please install it')
|
||||
|
||||
region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module, boto3=True)
|
||||
if not region:
|
||||
module.fail_json(msg='region must be specified')
|
||||
|
@ -295,20 +316,16 @@ def main():
|
|||
try:
|
||||
client = boto3_conn(module, conn_type='client', resource='lambda',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
except (ClientError, ValidationError) as e:
|
||||
module.fail_json_aws(e, msg="Trying to connect to AWS")
|
||||
|
||||
if state == 'present':
|
||||
if role.startswith('arn:aws:iam'):
|
||||
role_arn = role
|
||||
else:
|
||||
# get account ID and assemble ARN
|
||||
try:
|
||||
iam_client = boto3_conn(module, conn_type='client', resource='iam',
|
||||
region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
account_id = iam_client.get_user()['User']['Arn'].split(':')[4]
|
||||
account_id = get_account_id(module, region=region, endpoint=ec2_url, **aws_connect_kwargs)
|
||||
role_arn = 'arn:aws:iam::{0}:role/{1}'.format(account_id, role)
|
||||
except (botocore.exceptions.ClientError, botocore.exceptions.ValidationError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
# Get function configuration if present, False otherwise
|
||||
current_function = get_current_function(client, name)
|
||||
|
@ -334,7 +351,8 @@ def main():
|
|||
func_kwargs.update({'Timeout': timeout})
|
||||
if memory_size and current_config['MemorySize'] != memory_size:
|
||||
func_kwargs.update({'MemorySize': memory_size})
|
||||
if (environment_variables is not None) and (current_config.get('Environment', {}).get('Variables', {}) != environment_variables):
|
||||
if (environment_variables is not None) and (current_config.get(
|
||||
'Environment', {}).get('Variables', {}) != environment_variables):
|
||||
func_kwargs.update({'Environment': {'Variables': environment_variables}})
|
||||
if dead_letter_arn is not None:
|
||||
if current_config.get('DeadLetterConfig'):
|
||||
|
@ -350,11 +368,8 @@ def main():
|
|||
|
||||
# If VPC configuration is desired
|
||||
if vpc_subnet_ids or vpc_security_group_ids:
|
||||
if len(vpc_subnet_ids) < 1:
|
||||
module.fail_json(msg='At least 1 subnet is required')
|
||||
|
||||
if len(vpc_security_group_ids) < 1:
|
||||
module.fail_json(msg='At least 1 security group is required')
|
||||
if not vpc_subnet_ids or not vpc_security_group_ids:
|
||||
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
|
||||
|
||||
if 'VpcConfig' in current_config:
|
||||
# Compare VPC config with current config
|
||||
|
@ -365,13 +380,12 @@ def main():
|
|||
vpc_security_group_ids_changed = sorted(vpc_security_group_ids) != sorted(current_vpc_security_group_ids)
|
||||
|
||||
if 'VpcConfig' not in current_config or subnet_net_id_changed or vpc_security_group_ids_changed:
|
||||
func_kwargs.update({'VpcConfig':
|
||||
{'SubnetIds': vpc_subnet_ids,'SecurityGroupIds': vpc_security_group_ids}})
|
||||
new_vpc_config = {'SubnetIds': vpc_subnet_ids,
|
||||
'SecurityGroupIds': vpc_security_group_ids}
|
||||
func_kwargs.update({'VpcConfig': new_vpc_config})
|
||||
else:
|
||||
# No VPC configuration is desired, assure VPC config is empty when present in current config
|
||||
if ('VpcConfig' in current_config and
|
||||
'VpcId' in current_config['VpcConfig'] and
|
||||
current_config['VpcConfig']['VpcId'] != ''):
|
||||
if 'VpcConfig' in current_config and current_config['VpcConfig'].get('VpcId'):
|
||||
func_kwargs.update({'VpcConfig': {'SubnetIds': [], 'SecurityGroupIds': []}})
|
||||
|
||||
# Upload new configuration if configuration has changed
|
||||
|
@ -381,8 +395,8 @@ def main():
|
|||
response = client.update_function_configuration(**func_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
except (ParamValidationError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Trying to update lambda configuration")
|
||||
|
||||
# Update code configuration
|
||||
code_kwargs = {'FunctionName': name, 'Publish': True}
|
||||
|
@ -408,7 +422,7 @@ def main():
|
|||
encoded_zip = f.read()
|
||||
code_kwargs.update({'ZipFile': encoded_zip})
|
||||
except IOError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
# Upload new code if needed (e.g. code checksum has changed)
|
||||
if len(code_kwargs) > 2:
|
||||
|
@ -417,8 +431,8 @@ def main():
|
|||
response = client.update_function_code(**code_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
except (ParamValidationError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Trying to upload new code")
|
||||
|
||||
# Describe function code and configuration
|
||||
response = get_current_function(client, name, qualifier=current_version)
|
||||
|
@ -444,22 +458,26 @@ def main():
|
|||
|
||||
code = {'ZipFile': zip_content}
|
||||
except IOError as e:
|
||||
module.fail_json(msg=str(e))
|
||||
module.fail_json(msg=str(e), exception=traceback.format_exc())
|
||||
|
||||
else:
|
||||
module.fail_json(msg='Either S3 object or path to zipfile required')
|
||||
|
||||
func_kwargs = {'FunctionName': name,
|
||||
'Description': description,
|
||||
'Publish': True,
|
||||
'Runtime': runtime,
|
||||
'Role': role_arn,
|
||||
'Handler': handler,
|
||||
'Code': code,
|
||||
'Timeout': timeout,
|
||||
'MemorySize': memory_size,
|
||||
}
|
||||
|
||||
if description is not None:
|
||||
func_kwargs.update({'Description': description})
|
||||
|
||||
if handler is not None:
|
||||
func_kwargs.update({'Handler': handler})
|
||||
|
||||
if environment_variables:
|
||||
func_kwargs.update({'Environment': {'Variables': environment_variables}})
|
||||
|
||||
|
@ -468,11 +486,8 @@ def main():
|
|||
|
||||
# If VPC configuration is given
|
||||
if vpc_subnet_ids or vpc_security_group_ids:
|
||||
if len(vpc_subnet_ids) < 1:
|
||||
module.fail_json(msg='At least 1 subnet is required')
|
||||
|
||||
if len(vpc_security_group_ids) < 1:
|
||||
module.fail_json(msg='At least 1 security group is required')
|
||||
if not vpc_subnet_ids or not vpc_security_group_ids:
|
||||
module.fail_json(msg='vpc connectivity requires at least one security group and one subnet')
|
||||
|
||||
func_kwargs.update({'VpcConfig': {'SubnetIds': vpc_subnet_ids,
|
||||
'SecurityGroupIds': vpc_security_group_ids}})
|
||||
|
@ -483,8 +498,8 @@ def main():
|
|||
response = client.create_function(**func_kwargs)
|
||||
current_version = response['Version']
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
except (ParamValidationError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Trying to create function")
|
||||
|
||||
response = get_current_function(client, name, qualifier=current_version)
|
||||
if not response:
|
||||
|
@ -497,8 +512,8 @@ def main():
|
|||
if not check_mode:
|
||||
client.delete_function(FunctionName=name)
|
||||
changed = True
|
||||
except (botocore.exceptions.ParamValidationError, botocore.exceptions.ClientError) as e:
|
||||
module.fail_json(msg=str(e))
|
||||
except (ParamValidationError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg="Trying to delete Lambda function")
|
||||
|
||||
module.exit_json(changed=changed)
|
||||
|
||||
|
@ -507,8 +522,5 @@ def main():
|
|||
module.exit_json(changed=changed)
|
||||
|
||||
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
|
@ -96,15 +96,18 @@ lambda_facts.function.TheName:
|
|||
type: dict
|
||||
'''
|
||||
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.ec2 import camel_dict_to_snake_dict, get_aws_connection_info, boto3_conn
|
||||
import json
|
||||
import datetime
|
||||
import sys
|
||||
import re
|
||||
|
||||
|
||||
try:
|
||||
import boto3
|
||||
from botocore.exceptions import ClientError
|
||||
HAS_BOTO3 = True
|
||||
except ImportError:
|
||||
HAS_BOTO3 = False
|
||||
pass # protected by AnsibleAWSModule
|
||||
|
||||
|
||||
def fix_return(node):
|
||||
|
@ -155,7 +158,7 @@ def alias_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(aliases=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} aliases, error: {1}'.format(function_name, e))
|
||||
module.fail_json_aws(e, msg="Trying to get aliases")
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=aliases.')
|
||||
|
||||
|
@ -209,7 +212,7 @@ def config_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(function={})
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} configuration, error: {1}'.format(function_name, e))
|
||||
module.fail_json_aws(e, msg="Trying to get {0} configuration".format(function_name))
|
||||
else:
|
||||
params = dict()
|
||||
if module.params.get('max_items'):
|
||||
|
@ -224,7 +227,7 @@ def config_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(function_list=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get function list, error: {0}'.format(e))
|
||||
module.fail_json_aws(e, msg="Trying to get function list")
|
||||
|
||||
functions = dict()
|
||||
for func in lambda_facts.pop('function_list', []):
|
||||
|
@ -265,7 +268,7 @@ def mapping_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(mappings=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get source event mappings, error: {0}'.format(e))
|
||||
module.fail_json_aws(e, msg="Trying to get source event mappings")
|
||||
|
||||
if function_name:
|
||||
return {function_name: camel_dict_to_snake_dict(lambda_facts)}
|
||||
|
@ -296,7 +299,7 @@ def policy_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(policy={})
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} policy, error: {1}'.format(function_name, e))
|
||||
module.fail_json_aws(e, msg="Trying to get {0} policy".format(function_name))
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=policy.')
|
||||
|
||||
|
@ -329,7 +332,7 @@ def version_details(client, module):
|
|||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
lambda_facts.update(versions=[])
|
||||
else:
|
||||
module.fail_json(msg='Unable to get {0} versions, error: {1}'.format(function_name, e))
|
||||
module.fail_json_aws(e, msg="Trying to get {0} versions".format(function_name))
|
||||
else:
|
||||
module.fail_json(msg='Parameter function_name required for query=versions.')
|
||||
|
||||
|
@ -342,26 +345,19 @@ def main():
|
|||
|
||||
:return dict: ansible facts
|
||||
"""
|
||||
argument_spec = ec2_argument_spec()
|
||||
argument_spec.update(
|
||||
dict(
|
||||
argument_spec = dict(
|
||||
function_name=dict(required=False, default=None, aliases=['function', 'name']),
|
||||
query=dict(required=False, choices=['aliases', 'all', 'config', 'mappings', 'policy', 'versions'], default='all'),
|
||||
event_source_arn=dict(required=False, default=None)
|
||||
)
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
module = AnsibleAWSModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[],
|
||||
required_together=[]
|
||||
)
|
||||
|
||||
# validate dependencies
|
||||
if not HAS_BOTO3:
|
||||
module.fail_json(msg='boto3 is required for this module.')
|
||||
|
||||
# validate function_name if present
|
||||
function_name = module.params['function_name']
|
||||
if function_name:
|
||||
|
@ -381,7 +377,7 @@ def main():
|
|||
))
|
||||
client = boto3_conn(module, **aws_connect_kwargs)
|
||||
except ClientError as e:
|
||||
module.fail_json(msg="Can't authorize connection - {0}".format(e))
|
||||
module.fail_json_aws(e, "trying to set up boto connection")
|
||||
|
||||
this_module = sys.modules[__name__]
|
||||
|
||||
|
@ -405,9 +401,5 @@ def main():
|
|||
module.exit_json(**results)
|
||||
|
||||
|
||||
# ansible import module(s) kept at ~eof as recommended
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.ec2 import *
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
430
lib/ansible/modules/cloud/amazon/lambda_policy.py
Normal file
430
lib/ansible/modules/cloud/amazon/lambda_policy.py
Normal file
|
@ -0,0 +1,430 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2016, Pierre Jodouin <pjodouin@virtualcomputing.solutions>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
ANSIBLE_METADATA = {'status': ['preview'],
|
||||
'supported_by': 'community',
|
||||
'metadata_version': '1.1'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: lambda_policy
|
||||
short_description: Creates, updates or deletes AWS Lambda policy statements.
|
||||
description:
|
||||
- This module allows the management of AWS Lambda policy statements.
|
||||
It is idempotent and supports "Check" mode. Use module M(lambda) to manage the lambda
|
||||
function itself, M(lambda_alias) to manage function aliases, M(lambda_event) to manage event source mappings
|
||||
such as Kinesis streams, M(lambda_invoke) to execute a lambda function and M(lambda_facts) to gather facts
|
||||
relating to one or more lambda functions.
|
||||
|
||||
version_added: "2.4"
|
||||
|
||||
author:
|
||||
- Pierre Jodouin (@pjodouin)
|
||||
- Michael De La Rue (@mikedlr)
|
||||
options:
|
||||
function_name:
|
||||
description:
|
||||
- "Name of the Lambda function whose resource policy you are updating by adding a new permission."
|
||||
- "You can specify a function name (for example, Thumbnail ) or you can specify Amazon Resource Name (ARN) of the"
|
||||
- "function (for example, arn:aws:lambda:us-west-2:account-id:function:ThumbNail ). AWS Lambda also allows you to"
|
||||
- "specify partial ARN (for example, account-id:Thumbnail ). Note that the length constraint applies only to the"
|
||||
- "ARN. If you specify only the function name, it is limited to 64 character in length."
|
||||
required: true
|
||||
aliases: ['lambda_function_arn', 'function_arn']
|
||||
|
||||
state:
|
||||
description:
|
||||
- Describes the desired state.
|
||||
required: true
|
||||
default: "present"
|
||||
choices: ["present", "absent"]
|
||||
|
||||
alias:
|
||||
description:
|
||||
- Name of the function alias. Mutually exclusive with C(version).
|
||||
|
||||
version:
|
||||
description:
|
||||
- Version of the Lambda function. Mutually exclusive with C(alias).
|
||||
|
||||
statement_id:
|
||||
description:
|
||||
- A unique statement identifier.
|
||||
required: true
|
||||
aliases: ['sid']
|
||||
|
||||
action:
|
||||
description:
|
||||
- "The AWS Lambda action you want to allow in this statement. Each Lambda action is a string starting with
|
||||
lambda: followed by the API name (see Operations ). For example, lambda:CreateFunction . You can use wildcard
|
||||
(lambda:* ) to grant permission for all AWS Lambda actions."
|
||||
required: true
|
||||
|
||||
principal:
|
||||
description:
|
||||
- "The principal who is getting this permission. It can be Amazon S3 service Principal (s3.amazonaws.com ) if
|
||||
you want Amazon S3 to invoke the function, an AWS account ID if you are granting cross-account permission, or
|
||||
any valid AWS service principal such as sns.amazonaws.com . For example, you might want to allow a custom
|
||||
application in another AWS account to push events to AWS Lambda by invoking your function."
|
||||
required: true
|
||||
|
||||
source_arn:
|
||||
description:
|
||||
- This is optional; however, when granting Amazon S3 permission to invoke your function, you should specify this
|
||||
field with the bucket Amazon Resource Name (ARN) as its value. This ensures that only events generated from
|
||||
the specified bucket can invoke the function.
|
||||
|
||||
source_account:
|
||||
description:
|
||||
- The AWS account ID (without a hyphen) of the source owner. For example, if the SourceArn identifies a bucket,
|
||||
then this is the bucket owner's account ID. You can use this additional condition to ensure the bucket you
|
||||
specify is owned by a specific account (it is possible the bucket owner deleted the bucket and some other AWS
|
||||
account created the bucket). You can also use this condition to specify all sources (that is, you don't
|
||||
specify the SourceArn ) owned by a specific account.
|
||||
|
||||
event_source_token:
|
||||
description:
|
||||
- Token string representing source ARN or account. Mutually exclusive with C(source_arn) or C(source_account).
|
||||
|
||||
requirements:
|
||||
- boto3
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
- hosts: localhost
|
||||
gather_facts: no
|
||||
vars:
|
||||
state: present
|
||||
tasks:
|
||||
- name: Lambda S3 event notification
|
||||
lambda_policy:
|
||||
state: "{{ state | default('present') }}"
|
||||
function_name: functionName
|
||||
alias: Dev
|
||||
statement_id: lambda-s3-myBucket-create-data-log
|
||||
action: lambda:InvokeFunction
|
||||
principal: s3.amazonaws.com
|
||||
source_arn: arn:aws:s3:eu-central-1:123456789012:bucketName
|
||||
source_account: 123456789012
|
||||
|
||||
- name: show results
|
||||
debug: var=lambda_policy_action
|
||||
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
lambda_policy_action:
|
||||
description: describes what action was taken
|
||||
returned: success
|
||||
type: string
|
||||
'''
|
||||
|
||||
import json
|
||||
import re
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.ec2 import get_aws_connection_info, boto3_conn
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError
|
||||
except:
|
||||
pass # will be protected by AnsibleAWSModule
|
||||
|
||||
|
||||
def pc(key):
|
||||
"""
|
||||
Changes python key into Pascal case equivalent. For example, 'this_function_name' becomes 'ThisFunctionName'.
|
||||
|
||||
:param key:
|
||||
:return:
|
||||
"""
|
||||
|
||||
return "".join([token.capitalize() for token in key.split('_')])
|
||||
|
||||
|
||||
def policy_equal(module, current_statement):
|
||||
for param in ('action', 'principal', 'source_arn', 'source_account', 'event_source_token'):
|
||||
if module.params.get(param) != current_statement.get(param):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
def set_api_params(module, module_params):
|
||||
"""
|
||||
Sets module parameters to those expected by the boto3 API.
|
||||
|
||||
:param module:
|
||||
:param module_params:
|
||||
:return:
|
||||
"""
|
||||
|
||||
api_params = dict()
|
||||
|
||||
for param in module_params:
|
||||
module_param = module.params.get(param)
|
||||
if module_param is not None:
|
||||
api_params[pc(param)] = module_param
|
||||
|
||||
return api_params
|
||||
|
||||
|
||||
def validate_params(module):
|
||||
"""
|
||||
Performs parameter validation beyond the module framework's validation.
|
||||
|
||||
:param module:
|
||||
:return:
|
||||
"""
|
||||
|
||||
function_name = module.params['function_name']
|
||||
|
||||
# validate function name
|
||||
if function_name.startswith('arn:'):
|
||||
if not re.search('^[\w\-]+$', function_name):
|
||||
module.fail_json(
|
||||
msg='Function name {0} is invalid. Names must contain only alphanumeric characters and hyphens.'.format(
|
||||
function_name)
|
||||
)
|
||||
if len(function_name) > 64:
|
||||
module.fail_json(
|
||||
msg='Function name "{0}" exceeds 64 character limit'.format(function_name))
|
||||
else:
|
||||
if not re.search('^[\w\-:]+$', function_name):
|
||||
module.fail_json(
|
||||
msg='ARN {0} is invalid. ARNs must contain only alphanumeric characters, hyphens and colons.'.format(function_name)
|
||||
)
|
||||
if len(function_name) > 140:
|
||||
module.fail_json(msg='ARN name "{0}" exceeds 140 character limit'.format(function_name))
|
||||
|
||||
|
||||
def get_qualifier(module):
|
||||
"""
|
||||
Returns the function qualifier as a version or alias or None.
|
||||
|
||||
:param module:
|
||||
:return:
|
||||
"""
|
||||
|
||||
if module.params.get('version') is not None:
|
||||
return to_native(module.params['version'])
|
||||
elif module.params['alias']:
|
||||
return to_native(module.params['alias'])
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def extract_statement(policy, sid):
|
||||
"""return flattened single policy statement from a policy
|
||||
|
||||
If a policy statement is present in the policy extract it and
|
||||
return it in a flattened form. Otherwise return an empty
|
||||
dictionary.
|
||||
"""
|
||||
if 'Statement' not in policy:
|
||||
return {}
|
||||
policy_statement = {}
|
||||
# Now that we have the policy, check if required permission statement is present and flatten to
|
||||
# simple dictionary if found.
|
||||
for statement in policy['Statement']:
|
||||
if statement['Sid'] == sid:
|
||||
policy_statement['action'] = statement['Action']
|
||||
policy_statement['principal'] = statement['Principal']['Service']
|
||||
try:
|
||||
policy_statement['source_arn'] = statement['Condition']['ArnLike']['AWS:SourceArn']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
policy_statement['source_account'] = statement['Condition']['StringEquals']['AWS:SourceAccount']
|
||||
except KeyError:
|
||||
pass
|
||||
try:
|
||||
policy_statement['event_source_token'] = statement['Condition']['StringEquals']['lambda:EventSourceToken']
|
||||
except KeyError:
|
||||
pass
|
||||
break
|
||||
|
||||
return policy_statement
|
||||
|
||||
|
||||
def get_policy_statement(module, client):
|
||||
"""Checks that policy exists and if so, that statement ID is present or absent.
|
||||
|
||||
:param module:
|
||||
:param client:
|
||||
:return:
|
||||
"""
|
||||
|
||||
policy = dict()
|
||||
sid = module.params['statement_id']
|
||||
|
||||
# set API parameters
|
||||
api_params = set_api_params(module, ('function_name', ))
|
||||
qualifier = get_qualifier(module)
|
||||
if qualifier:
|
||||
api_params.update(Qualifier=qualifier)
|
||||
|
||||
policy_results = None
|
||||
# check if function policy exists
|
||||
try:
|
||||
policy_results = client.get_policy(**api_params)
|
||||
except ClientError as e:
|
||||
try:
|
||||
if e.response['Error']['Code'] == 'ResourceNotFoundException':
|
||||
return {}
|
||||
except AttributeError: # catches ClientErrors without response, e.g. fail before connect
|
||||
pass
|
||||
module.fail_json_aws(e, msg="retrieving function policy")
|
||||
except Exception as e:
|
||||
module.fail_json_aws(e, msg="retrieving function policy")
|
||||
|
||||
# get_policy returns a JSON string so must convert to dict before reassigning to its key
|
||||
policy = json.loads(policy_results.get('Policy', '{}'))
|
||||
return extract_statement(policy, sid)
|
||||
|
||||
|
||||
def add_policy_permission(module, client):
|
||||
"""
|
||||
Adds a permission statement to the policy.
|
||||
|
||||
:param module:
|
||||
:param aws:
|
||||
:return:
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
# set API parameters
|
||||
params = (
|
||||
'function_name',
|
||||
'statement_id',
|
||||
'action',
|
||||
'principal',
|
||||
'source_arn',
|
||||
'source_account',
|
||||
'event_source_token')
|
||||
api_params = set_api_params(module, params)
|
||||
qualifier = get_qualifier(module)
|
||||
if qualifier:
|
||||
api_params.update(Qualifier=qualifier)
|
||||
|
||||
if not module.check_mode:
|
||||
try:
|
||||
client.add_permission(**api_params)
|
||||
except Exception as e:
|
||||
module.fail_json_aws(e, msg="adding permission to policy")
|
||||
changed = True
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def remove_policy_permission(module, client):
|
||||
"""
|
||||
Removed a permission statement from the policy.
|
||||
|
||||
:param module:
|
||||
:param aws:
|
||||
:return:
|
||||
"""
|
||||
|
||||
changed = False
|
||||
|
||||
# set API parameters
|
||||
api_params = set_api_params(module, ('function_name', 'statement_id'))
|
||||
qualifier = get_qualifier(module)
|
||||
if qualifier:
|
||||
api_params.update(Qualifier=qualifier)
|
||||
|
||||
try:
|
||||
if not module.check_mode:
|
||||
client.remove_permission(**api_params)
|
||||
changed = True
|
||||
except Exception as e:
|
||||
module.fail_json_aws(e, msg="removing permission from policy")
|
||||
|
||||
return changed
|
||||
|
||||
|
||||
def manage_state(module, lambda_client):
|
||||
changed = False
|
||||
current_state = 'absent'
|
||||
state = module.params['state']
|
||||
action_taken = 'none'
|
||||
|
||||
# check if the policy exists
|
||||
current_policy_statement = get_policy_statement(module, lambda_client)
|
||||
if current_policy_statement:
|
||||
current_state = 'present'
|
||||
|
||||
if state == 'present':
|
||||
if current_state == 'present' and not policy_equal(module, current_policy_statement):
|
||||
remove_policy_permission(module, lambda_client)
|
||||
changed = add_policy_permission(module, lambda_client)
|
||||
action_taken = 'updated'
|
||||
if not current_state == 'present':
|
||||
changed = add_policy_permission(module, lambda_client)
|
||||
action_taken = 'added'
|
||||
elif current_state == 'present':
|
||||
# remove the policy statement
|
||||
changed = remove_policy_permission(module, lambda_client)
|
||||
action_taken = 'deleted'
|
||||
|
||||
return dict(changed=changed, ansible_facts=dict(lambda_policy_action=action_taken))
|
||||
|
||||
|
||||
def setup_client(module):
|
||||
region, ec2_url, aws_connect_params = get_aws_connection_info(module, boto3=True)
|
||||
if region:
|
||||
connection = boto3_conn(module, conn_type='client', resource='lambda', region=region, endpoint=ec2_url, **aws_connect_params)
|
||||
else:
|
||||
module.fail_json(msg="region must be specified")
|
||||
return connection
|
||||
|
||||
|
||||
def setup_module_object():
|
||||
argument_spec = dict(
|
||||
state=dict(default='present', choices=['present', 'absent']),
|
||||
function_name=dict(required=True, aliases=['lambda_function_arn', 'function_arn']),
|
||||
statement_id=dict(required=True, aliases=['sid']),
|
||||
alias=dict(),
|
||||
version=dict(type='int'),
|
||||
action=dict(required=True, ),
|
||||
principal=dict(required=True, ),
|
||||
source_arn=dict(),
|
||||
source_account=dict(),
|
||||
event_source_token=dict(),
|
||||
)
|
||||
|
||||
return AnsibleAWSModule(
|
||||
argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[['alias', 'version'],
|
||||
['event_source_token', 'source_arn'],
|
||||
['event_source_token', 'source_account']],
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
Main entry point.
|
||||
|
||||
:return dict: ansible facts
|
||||
"""
|
||||
|
||||
module = setup_module_object()
|
||||
client = setup_client(module)
|
||||
validate_params(module)
|
||||
results = manage_state(module, client)
|
||||
|
||||
module.exit_json(**results)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
4
test/integration/targets/aws_lambda/aliases
Normal file
4
test/integration/targets/aws_lambda/aliases
Normal file
|
@ -0,0 +1,4 @@
|
|||
cloud/aws
|
||||
posix/ci/cloud/aws
|
||||
execute_lambda
|
||||
lambda
|
3
test/integration/targets/aws_lambda/defaults/main.yml
Normal file
3
test/integration/targets/aws_lambda/defaults/main.yml
Normal file
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
# defaults file for aws_lambda test
|
||||
lambda_function_name: '{{resource_prefix}}'
|
34
test/integration/targets/aws_lambda/files/mini_lambda.py
Normal file
34
test/integration/targets/aws_lambda/files/mini_lambda.py
Normal file
|
@ -0,0 +1,34 @@
|
|||
from __future__ import print_function
|
||||
import json
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
"""
|
||||
The handler function is the function which gets called each time
|
||||
the lambda is run.
|
||||
"""
|
||||
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
|
||||
# the log entry.
|
||||
print("got event:\n" + json.dumps(event))
|
||||
|
||||
# if the name parameter isn't present this can throw an exception
|
||||
# which will result in an amazon chosen failure from the lambda
|
||||
# which can be completely fine.
|
||||
|
||||
name = event["name"]
|
||||
|
||||
return {"message": "hello " + name}
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
This main function will normally never be called during normal
|
||||
lambda use. It is here for testing the lambda program only.
|
||||
"""
|
||||
event = {"name": "james"}
|
||||
context = None
|
||||
print(handler(event, context))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
409
test/integration/targets/aws_lambda/tasks/main.yml
Normal file
409
test/integration/targets/aws_lambda/tasks/main.yml
Normal file
|
@ -0,0 +1,409 @@
|
|||
---
|
||||
#
|
||||
# Author: Michael De La Rue
|
||||
# based on ec2_key.yml + lambda.py
|
||||
|
||||
- block:
|
||||
|
||||
# ============================================================
|
||||
- name: test with no parameters
|
||||
lambda:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with no parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("missing required arguments: name")'
|
||||
|
||||
# ============================================================
|
||||
- name: test with no parameters except state absent
|
||||
lambda:
|
||||
state=absent
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with no parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("missing required arguments: name")'
|
||||
|
||||
# ============================================================
|
||||
- name: test with no role or handler
|
||||
lambda:
|
||||
name=ansible-testing-fake-should-not-be-created
|
||||
runtime="python2.7"
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with no parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("state is present but the following are missing: handler")'
|
||||
|
||||
# ============================================================
|
||||
- name: test with all module required variables but no region
|
||||
lambda:
|
||||
name=ansible-testing-fake-should-not-be-created
|
||||
runtime="python2.7"
|
||||
handler="no-handler"
|
||||
role=arn:fake-role-doesnt-exist
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with only 'name'
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg == "region must be specified"'
|
||||
|
||||
# ============================================================
|
||||
- name: test with all module required variables, no region and all possible variables set to blank
|
||||
lambda:
|
||||
name: ansible-testing-fake-should-not-be-created
|
||||
state: present
|
||||
runtime: "python2.7"
|
||||
role: arn:fake-role-doesnt-exist
|
||||
handler:
|
||||
s3_bucket:
|
||||
s3_key:
|
||||
s3_object_version:
|
||||
description:
|
||||
vpc_subnet_ids:
|
||||
vpc_security_group_ids:
|
||||
environment_variables:
|
||||
dead_letter_arn:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with only 'name'
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg == "region must be specified"'
|
||||
|
||||
# ============================================================
|
||||
# direct zip file upload
|
||||
- name: move lambda into place for archive module
|
||||
copy:
|
||||
src: "mini_lambda.py"
|
||||
dest: "{{output_dir}}/mini_lambda.py"
|
||||
|
||||
- name: bundle lambda into a zip
|
||||
archive:
|
||||
format: zip
|
||||
path: "{{output_dir}}/mini_lambda.py"
|
||||
dest: "{{output_dir}}/mini_lambda.zip"
|
||||
register: zip_res
|
||||
|
||||
- name: test state=present - upload the lambda
|
||||
lambda:
|
||||
name="{{lambda_function_name}}"
|
||||
runtime="python2.7"
|
||||
handler="mini_lambda.handler"
|
||||
role="ansible_lambda_role"
|
||||
ec2_region='{{ec2_region}}'
|
||||
ec2_access_key='{{ec2_access_key}}'
|
||||
ec2_secret_key='{{ec2_secret_key}}'
|
||||
security_token='{{security_token}}'
|
||||
zip_file="{{zip_res.dest}}"
|
||||
register: result
|
||||
|
||||
- name: assert lambda upload succeeded
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
|
||||
- name: test lambda works
|
||||
execute_lambda:
|
||||
name: "{{lambda_function_name}}"
|
||||
payload:
|
||||
name: "Mr Ansible Tests"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
register: result
|
||||
|
||||
- name: assert lambda manages to respond as expected
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
- 'result.result.output.message == "hello Mr Ansible Tests"'
|
||||
|
||||
# ============================================================
|
||||
- name: test state=present with security group but no vpc
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}"
|
||||
runtime: "python2.7"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
handler:
|
||||
description:
|
||||
vpc_subnet_ids:
|
||||
vpc_security_group_ids: sg-FA6E
|
||||
environment_variables:
|
||||
dead_letter_arn:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert lambda fails with proper message
|
||||
assert:
|
||||
that:
|
||||
- 'result|failed'
|
||||
- 'result.msg != "MODULE FAILURE"'
|
||||
- 'result.changed == False'
|
||||
- '"requires at least one security group and one subnet" in result.msg'
|
||||
|
||||
# ============================================================
|
||||
- name: test state=present with all nullable variables explicitly set to null
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}"
|
||||
runtime: "python2.7"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
handler: "mini_lambda.handler"
|
||||
# These are not allowed because of mutually exclusive.
|
||||
# s3_bucket:
|
||||
# s3_key:
|
||||
# s3_object_version:
|
||||
description:
|
||||
vpc_subnet_ids:
|
||||
vpc_security_group_ids:
|
||||
environment_variables:
|
||||
dead_letter_arn:
|
||||
register: result
|
||||
|
||||
- name: assert lambda was updated as expected
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
- 'result.changed == False'
|
||||
|
||||
# ============================================================
|
||||
- name: test state=present triggering a network exception due to bad url
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}"
|
||||
runtime: "python2.7"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_url: https://noexist.example.com
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: 'iamnotreallyanaccesskey'
|
||||
ec2_secret_key: 'thisisabadsecretkey'
|
||||
security_token: 'andthisisabadsecuritytoken'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert lambda manages to respond as expected
|
||||
assert:
|
||||
that:
|
||||
- 'result|failed'
|
||||
- 'result.changed == False'
|
||||
|
||||
# ============================================================
|
||||
- name: test state=absent (expect changed=False)
|
||||
lambda:
|
||||
name="{{lambda_function_name}}"
|
||||
ec2_region='{{ec2_region}}'
|
||||
ec2_access_key='{{ec2_access_key}}'
|
||||
ec2_secret_key='{{ec2_secret_key}}'
|
||||
security_token='{{security_token}}'
|
||||
state=absent
|
||||
register: result
|
||||
|
||||
- name: assert state=absent
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
- 'result.changed == True'
|
||||
|
||||
# ============================================================
|
||||
# parallel lambda creation
|
||||
|
||||
- name: parallel lambda creation 1/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_1"
|
||||
runtime: "python2.7"
|
||||
handler: "mini_lambda.handler"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_1
|
||||
|
||||
- name: parallel lambda creation 2/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_2"
|
||||
runtime: "python2.7"
|
||||
handler: "mini_lambda.handler"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_2
|
||||
|
||||
- name: parallel lambda creation 3/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_3"
|
||||
runtime: "python2.7"
|
||||
handler: "mini_lambda.handler"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_3
|
||||
|
||||
- name: parallel lambda creation 4/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_4"
|
||||
runtime: "python2.7"
|
||||
handler: "mini_lambda.handler"
|
||||
role: "ansible_lambda_role"
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
register: result
|
||||
|
||||
- name: assert lambda manages to respond as expected
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
|
||||
- name: wait for async job 1
|
||||
async_status: jid={{ async_1.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
- name: wait for async job 2
|
||||
async_status: jid={{ async_1.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
- name: wait for async job 3
|
||||
async_status: jid={{ async_3.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
|
||||
- name: parallel lambda deletion 1/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_1"
|
||||
state: absent
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_1
|
||||
|
||||
- name: parallel lambda deletion 2/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_2"
|
||||
state: absent
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_2
|
||||
|
||||
- name: parallel lambda deletion 3/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_3"
|
||||
state: absent
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
async: 1000
|
||||
register: async_3
|
||||
|
||||
- name: parallel lambda deletion 4/4
|
||||
lambda:
|
||||
name: "{{lambda_function_name}}_4"
|
||||
state: absent
|
||||
ec2_region: '{{ec2_region}}'
|
||||
ec2_access_key: '{{ec2_access_key}}'
|
||||
ec2_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
zip_file: "{{zip_res.dest}}"
|
||||
register: result
|
||||
|
||||
- name: assert lambda creation has succeeded
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
|
||||
- name: wait for async job 1
|
||||
async_status: jid={{ async_1.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
- name: wait for async job 2
|
||||
async_status: jid={{ async_1.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
- name: wait for async job 3
|
||||
async_status: jid={{ async_3.ansible_job_id }}
|
||||
register: job_result
|
||||
until: job_result.finished
|
||||
retries: 30
|
||||
|
||||
|
||||
# ============================================================
|
||||
# upload via s3 bucket - multi function
|
||||
|
||||
# ============================================================
|
||||
# update already existing function
|
||||
|
||||
|
||||
always:
|
||||
|
||||
# ============================================================
|
||||
- name: test state=absent (expect changed=False)
|
||||
lambda:
|
||||
name="{{lambda_function_name}}"
|
||||
ec2_region='{{ec2_region}}'
|
||||
ec2_access_key='{{ec2_access_key}}'
|
||||
ec2_secret_key='{{ec2_secret_key}}'
|
||||
security_token='{{security_token}}'
|
||||
state=absent
|
||||
register: result
|
||||
|
||||
- name: assert state=absent
|
||||
assert:
|
||||
that:
|
||||
- 'not result|failed'
|
||||
- 'result.changed == False'
|
2
test/integration/targets/aws_lambda_policy/aliases
Normal file
2
test/integration/targets/aws_lambda_policy/aliases
Normal file
|
@ -0,0 +1,2 @@
|
|||
cloud/aws
|
||||
posix/ci/cloud/aws
|
|
@ -0,0 +1,3 @@
|
|||
---
|
||||
# defaults file for aws_lambda test
|
||||
lambda_function_name: '{{resource_prefix}}-api-endpoint'
|
|
@ -0,0 +1,36 @@
|
|||
from __future__ import print_function
|
||||
import json
|
||||
|
||||
|
||||
def handler(event, context):
|
||||
"""
|
||||
The handler function is the function which gets called each time
|
||||
the lambda is run.
|
||||
"""
|
||||
# printing goes to the cloudwatch log allowing us to simply debug the lambda if we can find
|
||||
# the log entry.
|
||||
print("got event:\n" + json.dumps(event))
|
||||
|
||||
# if the name parameter isn't present this can throw an exception
|
||||
# which will result in an amazon chosen failure from the lambda
|
||||
# which can be completely fine.
|
||||
|
||||
name = event["pathParameters"]["greet_name"]
|
||||
|
||||
return {"statusCode": 200,
|
||||
"body": 'hello: "' + name + '"',
|
||||
"headers": {}}
|
||||
|
||||
|
||||
def main():
|
||||
"""
|
||||
This main function will normally never be called during normal
|
||||
lambda use. It is here for testing the lambda program only.
|
||||
"""
|
||||
event = {"name": "james"}
|
||||
context = None
|
||||
print(handler(event, context))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
218
test/integration/targets/aws_lambda_policy/tasks/main.yml
Normal file
218
test/integration/targets/aws_lambda_policy/tasks/main.yml
Normal file
|
@ -0,0 +1,218 @@
|
|||
---
|
||||
#
|
||||
# Author: Michael De La Rue
|
||||
# based on ec2_key.yml + lambda.py
|
||||
|
||||
- block:
|
||||
|
||||
# ============================================================
|
||||
- name: test with no parameters
|
||||
lambda_policy:
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert failure when called with no parameters
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- 'result.msg.startswith("missing required arguments: ")'
|
||||
|
||||
# ============================================================
|
||||
- name: test with all required dummy parameters but no region
|
||||
lambda_policy:
|
||||
statement_id: dummy
|
||||
principal: api_fakeway
|
||||
action: fake:do_something_fake
|
||||
function_name: dummy_fake_function
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- name: assert failure and appropriate message when called without region
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- '"region must be specified" in result.msg'
|
||||
|
||||
# ============================================================
|
||||
- name: test with all required dummy parameters but no region
|
||||
lambda_policy:
|
||||
statement_id: dummy
|
||||
principal: api_fakeway
|
||||
action: fake:do_something_fake
|
||||
function_name: dummy_fake_function
|
||||
region: null
|
||||
ignore_errors: true
|
||||
register: result
|
||||
|
||||
- name: assert failure and appropriate message when called false region region
|
||||
assert:
|
||||
that:
|
||||
- 'result.failed'
|
||||
- '"region must be specified" in result.msg'
|
||||
|
||||
# ============================================================
|
||||
- name: test exceptions generated by forcing bad ec2 url
|
||||
lambda_policy:
|
||||
function_name: "{{ lambda_function_name }}"
|
||||
region: "{{ec2_region}}"
|
||||
state: present
|
||||
statement_id: api-gateway-invoke-lambdas
|
||||
action: lambda:InvokeFunction
|
||||
principal: apigateway.amazonaws.com
|
||||
source_arn: "arn:aws:execute-api:no-north-0:1234567:*/*"
|
||||
ec2_url: https://noexist.example.com
|
||||
ec2_region: 'no-north-0'
|
||||
ec2_access_key: 'iamnotreallyanaccesskey'
|
||||
ec2_secret_key: 'thisisabadsecretkey'
|
||||
security_token: 'andthisisabadsecuritytoken'
|
||||
register: result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert lambda manages to respond as expected
|
||||
assert:
|
||||
that:
|
||||
- 'result|failed'
|
||||
- 'result.msg != "MODULE FAILURE"'
|
||||
- 'result.changed == False'
|
||||
|
||||
# ============================================================
|
||||
# direct zip file upload
|
||||
- name: move lambda into place for archive module
|
||||
copy:
|
||||
src: "mini_http_lambda.py"
|
||||
dest: "{{output_dir}}/mini_http_lambda.py"
|
||||
|
||||
- name: bundle lambda into a zip
|
||||
archive:
|
||||
format: zip
|
||||
path: "{{output_dir}}/mini_http_lambda.py"
|
||||
dest: "{{output_dir}}/mini_http_lambda.zip"
|
||||
register: zip_res
|
||||
|
||||
- name: test state=present - upload the lambda
|
||||
lambda:
|
||||
name="{{lambda_function_name}}"
|
||||
runtime="python2.7"
|
||||
handler="mini_http_lambda.handler"
|
||||
role="ansible_lambda_role"
|
||||
ec2_region='{{ec2_region}}'
|
||||
aws_access_key='{{aws_access_key}}'
|
||||
aws_secret_key='{{aws_secret_key}}'
|
||||
security_token='{{security_token}}'
|
||||
zip_file="{{zip_res.dest}}"
|
||||
register: lambda_result
|
||||
|
||||
- name: install aws cli - FIXME temporary this should go for a lighterweight solution
|
||||
command: pip install awscli
|
||||
register: result
|
||||
|
||||
- name: get the aws account ID for use in future commands
|
||||
command: aws sts get-caller-identity --output text --query 'Account'
|
||||
environment:
|
||||
AWS_ACCESS_KEY_ID: '{{aws_access_key}}'
|
||||
AWS_SECRET_ACCESS_KEY: '{{aws_secret_key}}'
|
||||
AWS_SESSION_TOKEN: '{{security_token}}'
|
||||
register: result
|
||||
|
||||
- name: register account id
|
||||
set_fact:
|
||||
aws_account_id: "{{ result.stdout | replace('\n', '') }}"
|
||||
|
||||
- name: register lambda uri for use in template
|
||||
set_fact:
|
||||
mini_lambda_uri: "arn:aws:apigateway:{{ec2_region}}:lambda:path/2015-03-31/functions/arn:aws:lambda:{{ec2_region}}:{{aws_account_id}}:function:{{ lambda_result.configuration.function_name }}/invocations"
|
||||
|
||||
- name: build API file
|
||||
template:
|
||||
src: endpoint-test-swagger-api.yml.j2
|
||||
dest: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
|
||||
|
||||
- name: deploy new API
|
||||
aws_api_gateway:
|
||||
api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
|
||||
stage: "lambdabased"
|
||||
region: '{{ec2_region}}'
|
||||
aws_access_key: '{{aws_access_key}}'
|
||||
aws_secret_key: '{{aws_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
register: create_result
|
||||
|
||||
|
||||
- name: register api id for later
|
||||
set_fact:
|
||||
api_id: "{{ create_result.api_id }}"
|
||||
|
||||
- name: check API fails with permissions failure
|
||||
uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester"
|
||||
register: unauth_uri_result
|
||||
ignore_errors: true
|
||||
|
||||
- name: assert internal server error due to permissions
|
||||
assert:
|
||||
that:
|
||||
- unauth_uri_result|failed
|
||||
- 'unauth_uri_result.status == 500'
|
||||
|
||||
- name: give api gateway execute permissions on lambda
|
||||
lambda_policy:
|
||||
function_name: "{{ lambda_function_name }}"
|
||||
region: "{{ec2_region}}"
|
||||
state: present
|
||||
statement_id: api-gateway-invoke-lambdas
|
||||
action: lambda:InvokeFunction
|
||||
principal: apigateway.amazonaws.com
|
||||
source_arn: "arn:aws:execute-api:{{ ec2_region }}:{{ aws_account_id }}:*/*"
|
||||
aws_access_key: '{{aws_access_key}}'
|
||||
aws_secret_key: '{{aws_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
|
||||
- name: check API works with execute permissions
|
||||
uri: url="https://{{create_result.api_id}}.execute-api.{{ec2_region}}.amazonaws.com/lambdabased/mini/Mr_Ansible_Tester"
|
||||
register: uri_result
|
||||
|
||||
- name: assert API works success
|
||||
assert:
|
||||
that:
|
||||
- 'uri_result'
|
||||
|
||||
|
||||
- name: deploy new API
|
||||
aws_api_gateway:
|
||||
api_file: "{{output_dir}}/endpoint-test-swagger-api.yml.j2"
|
||||
stage: "lambdabased"
|
||||
region: '{{ec2_region}}'
|
||||
aws_access_key: '{{aws_access_key}}'
|
||||
aws_secret_key: '{{aws_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
register: create_result
|
||||
ignore_errors: true
|
||||
|
||||
|
||||
always:
|
||||
|
||||
# ============================================================
|
||||
- name: destroy lambda for test cleanup if created
|
||||
lambda:
|
||||
name="{{lambda_function_name}}"
|
||||
ec2_region='{{ec2_region}}'
|
||||
ec2_access_key='{{ec2_access_key}}'
|
||||
ec2_secret_key='{{ec2_secret_key}}'
|
||||
security_token='{{security_token}}'
|
||||
state=absent
|
||||
register: result
|
||||
|
||||
- name: destroy API for test cleanup if created
|
||||
aws_api_gateway:
|
||||
state: absent
|
||||
api_id: '{{api_id}}'
|
||||
region: '{{ec2_region}}'
|
||||
aws_access_key: '{{ec2_access_key}}'
|
||||
aws_secret_key: '{{ec2_secret_key}}'
|
||||
security_token: '{{security_token}}'
|
||||
register: destroy_result
|
||||
|
||||
- name: assert destroy statements succeeded
|
||||
assert:
|
||||
that:
|
||||
- 'destroy_result.changed == True'
|
||||
- 'not result|failed'
|
|
@ -0,0 +1,39 @@
|
|||
---
|
||||
swagger: "2.0"
|
||||
info:
|
||||
version: "2017-05-11T12:14:59Z"
|
||||
title: "{{resource_prefix}}LambdaBased_API"
|
||||
host: "fakeexample.execute-api.us-east-1.amazonaws.com"
|
||||
basePath: "/lambdabased"
|
||||
schemes:
|
||||
- "https"
|
||||
paths:
|
||||
/mini/{greet_name}:
|
||||
get:
|
||||
produces:
|
||||
- "application/json"
|
||||
parameters:
|
||||
- name: "greet_name"
|
||||
in: "path"
|
||||
required: true
|
||||
type: "string"
|
||||
responses:
|
||||
200:
|
||||
description: "200 response"
|
||||
schema:
|
||||
$ref: "#/definitions/Empty"
|
||||
x-amazon-apigateway-integration:
|
||||
responses:
|
||||
default:
|
||||
statusCode: "200"
|
||||
uri: "{{mini_lambda_uri}}"
|
||||
requestTemplates:
|
||||
application/json: "{\"statusCode\": 200}"
|
||||
passthroughBehavior: "when_no_match"
|
||||
httpMethod: "POST"
|
||||
contentHandling: "CONVERT_TO_TEXT"
|
||||
type: "aws_proxy"
|
||||
definitions:
|
||||
Empty:
|
||||
type: "object"
|
||||
title: "Empty Schema"
|
|
@ -51,8 +51,6 @@ lib/ansible/modules/cloud/amazon/elb_classic_lb.py
|
|||
lib/ansible/modules/cloud/amazon/execute_lambda.py
|
||||
lib/ansible/modules/cloud/amazon/iam.py
|
||||
lib/ansible/modules/cloud/amazon/iam_policy.py
|
||||
lib/ansible/modules/cloud/amazon/lambda.py
|
||||
lib/ansible/modules/cloud/amazon/lambda_facts.py
|
||||
lib/ansible/modules/cloud/amazon/rds_subnet_group.py
|
||||
lib/ansible/modules/cloud/amazon/redshift.py
|
||||
lib/ansible/modules/cloud/amazon/route53_health_check.py
|
||||
|
|
|
@ -39,6 +39,7 @@ def set_module_args(args):
|
|||
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
|
||||
basic._ANSIBLE_ARGS = to_bytes(args)
|
||||
|
||||
|
||||
base_lambda_config = {
|
||||
'FunctionName': 'lambda_name',
|
||||
'Role': 'arn:aws:iam::987654321012:role/lambda_basic_execution',
|
||||
|
@ -130,7 +131,8 @@ def test_create_lambda_if_not_exist():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
|
||||
"unexpectedly updated lambda configuration when should have only created"
|
||||
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
|
||||
|
@ -162,7 +164,8 @@ def test_update_lambda_if_code_changed():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
|
||||
"unexpectedly updatede lambda configuration when only code changed"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
|
||||
|
@ -187,7 +190,8 @@ def test_update_lambda_if_config_changed():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
|
||||
"failed to update lambda function when configuration changed"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
|
||||
|
@ -208,7 +212,8 @@ def test_update_lambda_if_only_one_config_item_changed():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
|
||||
"failed to update lambda function when configuration changed"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
|
||||
|
@ -229,7 +234,8 @@ def test_update_lambda_if_added_environment_variable():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
|
||||
"failed to update lambda function when configuration changed"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
|
||||
|
@ -253,7 +259,8 @@ def test_dont_update_lambda_if_nothing_changed():
|
|||
pass
|
||||
|
||||
# guard against calling other than for a lambda connection (e.g. IAM)
|
||||
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
|
||||
assert(len(boto3_conn_double.mock_calls) > 0), "boto connections never used"
|
||||
assert(len(boto3_conn_double.mock_calls) < 2), "multiple boto connections used unexpectedly"
|
||||
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
|
||||
"updated lambda function when no configuration changed"
|
||||
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
|
||||
|
|
171
test/units/modules/cloud/amazon/test_lambda_policy.py
Normal file
171
test/units/modules/cloud/amazon/test_lambda_policy.py
Normal file
|
@ -0,0 +1,171 @@
|
|||
#
|
||||
# (c) 2017 Michael De La Rue
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
# Make coding more python3-ish
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
from nose.plugins.skip import SkipTest
|
||||
import ansible.modules.cloud.amazon.lambda_policy as lambda_policy
|
||||
from ansible.modules.cloud.amazon.lambda_policy import setup_module_object
|
||||
from ansible.module_utils.aws.core import HAS_BOTO3
|
||||
from ansible.module_utils import basic
|
||||
from ansible.module_utils.basic import to_bytes
|
||||
from ansible.compat.tests.mock import MagicMock
|
||||
import json
|
||||
import copy
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
# try:
|
||||
# from botocore import ResourceNotFoundException
|
||||
# except:
|
||||
# pass # will be protected by HAS_BOTO3
|
||||
|
||||
if not HAS_BOTO3:
|
||||
raise SkipTest("test_api_gateway.py requires the `boto3` and `botocore` modules")
|
||||
|
||||
base_module_args = {
|
||||
"region": "us-west-1",
|
||||
"function_name": "this_is_a_test_function",
|
||||
"state": "present",
|
||||
"statement_id": "test-allow-lambda",
|
||||
"principal": 123456,
|
||||
"action": "lambda:*"
|
||||
}
|
||||
|
||||
|
||||
def set_module_args(mod_args):
|
||||
args = json.dumps({'ANSIBLE_MODULE_ARGS': mod_args})
|
||||
basic._ANSIBLE_ARGS = to_bytes(args)
|
||||
|
||||
|
||||
def test_module_is_created_sensibly():
|
||||
set_module_args(base_module_args)
|
||||
module = setup_module_object()
|
||||
assert module.params['function_name'] == 'this_is_a_test_function'
|
||||
|
||||
|
||||
module_double = MagicMock()
|
||||
module_double.fail_json_aws.side_effect = Exception("unexpected call to fail_json_aws")
|
||||
module_double.check_mode = False
|
||||
|
||||
fake_module_params_present = {
|
||||
"state": "present",
|
||||
"statement_id": "test-allow-lambda",
|
||||
"principal": "apigateway.amazonaws.com",
|
||||
"action": "lambda:InvokeFunction",
|
||||
"source_arn": u'arn:aws:execute-api:us-east-1:123456789:efghijklmn/authorizers/*',
|
||||
"version": 0,
|
||||
"alias": None
|
||||
}
|
||||
fake_module_params_different = copy.deepcopy(fake_module_params_present)
|
||||
fake_module_params_different["action"] = "lambda:api-gateway"
|
||||
fake_module_params_absent = copy.deepcopy(fake_module_params_present)
|
||||
fake_module_params_absent["state"] = "absent"
|
||||
|
||||
fake_policy_return = {
|
||||
u'Policy': (
|
||||
u'{"Version":"2012-10-17","Id":"default","Statement":[{"Sid":"1234567890abcdef1234567890abcdef",'
|
||||
u'"Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},"Action":"lambda:InvokeFunction",'
|
||||
u'"Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
|
||||
u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:abcdefghij/authorizers/1a2b3c"}}},'
|
||||
u'{"Sid":"2234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
|
||||
u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
|
||||
u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:klmnopqrst/authorizers/4d5f6g"}}},'
|
||||
u'{"Sid":"1234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
|
||||
u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
|
||||
u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:eu-west-1:123456789:uvwxyzabcd/authorizers/7h8i9j"}}},'
|
||||
u'{"Sid":"test-allow-lambda","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
|
||||
u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
|
||||
u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:efghijklmn/authorizers/*"}}},'
|
||||
u'{"Sid":"1234567890abcdef1234567890abcdef","Effect":"Allow","Principal":{"Service":"apigateway.amazonaws.com"},'
|
||||
u'"Action":"lambda:InvokeFunction","Resource":"arn:aws:lambda:us-east-1:123456789:function:test_authorizer",'
|
||||
u'"Condition":{"ArnLike":{"AWS:SourceArn":"arn:aws:execute-api:us-east-1:123456789:opqrstuvwx/authorizers/0k1l2m"}}}]}'),
|
||||
'ResponseMetadata': {
|
||||
'RetryAttempts': 0,
|
||||
'HTTPStatusCode': 200,
|
||||
'RequestId': 'abcdefgi-1234-a567-b890-123456789abc',
|
||||
'HTTPHeaders': {
|
||||
'date': 'Sun, 13 Aug 2017 10:54:17 GMT',
|
||||
'x-amzn-requestid': 'abcdefgi-1234-a567-b890-123456789abc',
|
||||
'content-length': '1878',
|
||||
'content-type': 'application/json',
|
||||
'connection': 'keep-alive'}}}
|
||||
|
||||
error_response = {'Error': {'Code': 'ResourceNotFoundException', 'Message': 'Fake Testing Error'}}
|
||||
operation_name = 'FakeOperation'
|
||||
resource_not_found_e = ClientError(error_response, operation_name)
|
||||
|
||||
|
||||
def test_manage_state_adds_missing_permissions():
|
||||
lambda_client_double = MagicMock()
|
||||
# Policy actually: not present Requested State: present Should: create
|
||||
lambda_client_double.get_policy.side_effect = resource_not_found_e
|
||||
fake_module_params = copy.deepcopy(fake_module_params_present)
|
||||
module_double.params = fake_module_params
|
||||
lambda_policy.manage_state(module_double, lambda_client_double)
|
||||
assert lambda_client_double.get_policy.call_count > 0
|
||||
assert lambda_client_double.add_permission.call_count > 0
|
||||
lambda_client_double.remove_permission.assert_not_called()
|
||||
|
||||
|
||||
def test_manage_state_leaves_existing_permissions():
|
||||
lambda_client_double = MagicMock()
|
||||
# Policy actually: present Requested State: present Should: do nothing
|
||||
lambda_client_double.get_policy.return_value = fake_policy_return
|
||||
fake_module_params = copy.deepcopy(fake_module_params_present)
|
||||
module_double.params = fake_module_params
|
||||
lambda_policy.manage_state(module_double, lambda_client_double)
|
||||
assert lambda_client_double.get_policy.call_count > 0
|
||||
lambda_client_double.add_permission.assert_not_called()
|
||||
lambda_client_double.remove_permission.assert_not_called()
|
||||
|
||||
|
||||
def test_manage_state_updates_nonmatching_permissions():
|
||||
lambda_client_double = MagicMock()
|
||||
# Policy actually: present Requested State: present Should: do nothing
|
||||
lambda_client_double.get_policy.return_value = fake_policy_return
|
||||
fake_module_params = copy.deepcopy(fake_module_params_different)
|
||||
module_double.params = fake_module_params
|
||||
lambda_policy.manage_state(module_double, lambda_client_double)
|
||||
assert lambda_client_double.get_policy.call_count > 0
|
||||
assert lambda_client_double.add_permission.call_count > 0
|
||||
assert lambda_client_double.remove_permission.call_count > 0
|
||||
|
||||
|
||||
def test_manage_state_removes_unwanted_permissions():
|
||||
lambda_client_double = MagicMock()
|
||||
# Policy actually: present Requested State: not present Should: remove
|
||||
lambda_client_double.get_policy.return_value = fake_policy_return
|
||||
fake_module_params = copy.deepcopy(fake_module_params_absent)
|
||||
module_double.params = fake_module_params
|
||||
lambda_policy.manage_state(module_double, lambda_client_double)
|
||||
assert lambda_client_double.get_policy.call_count > 0
|
||||
lambda_client_double.add_permission.assert_not_called()
|
||||
assert lambda_client_double.remove_permission.call_count > 0
|
||||
|
||||
|
||||
def test_manage_state_leaves_already_removed_permissions():
|
||||
lambda_client_double = MagicMock()
|
||||
# Policy actually: absent Requested State: absent Should: do nothing
|
||||
lambda_client_double.get_policy.side_effect = resource_not_found_e
|
||||
fake_module_params = copy.deepcopy(fake_module_params_absent)
|
||||
module_double.params = fake_module_params
|
||||
lambda_policy.manage_state(module_double, lambda_client_double)
|
||||
assert lambda_client_double.get_policy.call_count > 0
|
||||
lambda_client_double.add_permission.assert_not_called()
|
||||
lambda_client_double.remove_permission.assert_not_called()
|
Loading…
Reference in a new issue