Replace incidental tests with intentional argspec tests (#72370)
* Remove incidental_consul tests (#71811) * Add explicit intg tests for argspec functionality * ci_complete ci_coverage * Remove incidental_consul and incidental_setup_openssl * ci_complete ci_coverage (cherry picked from commita99212464c
) * Remove incidental_nios_txt_record (#72009) * Add explicit coverage of argspec type=dict * Non string mapping failure * ci_complete ci_coverage * Remove incidental_nios_txt_record and associated files * Don't forget the ignore.txt changes * ci_complete ci_coverage (cherry picked from commit6f4aed5377
) * Remove incidental_vyos_static_route (#72024) * Add explicit tests for required_together suboptions * ci_complete ci_coverage * Remove incidental_vyos_static_route * ci_complete ci_coverage * Add explicit coverage of suboptions required_if * ci_complete ci_coverage * Remove incidental_vyos_logging * ci_complete ci_coverage (cherry picked from commit9081b22868
) * More explicit argspec tests (#72064) * Add more explicit coverage of argspec functionality * fail_on_missing_params * ci_complete ci_coverage * Remove incidental_aws_step_functions_state_machine * ci_complete ci_coverage * Remove incidental_cs_service_offering * ci_complete ci_coverage (cherry picked from commitab2b339dd6
) * Add explicit coverage of required_together (#72107) * Add explicit coverage of required_together * ci_complete ci_coverage * Remove incidental_hcloud_server * Remove hcloud from shippable matrix * ci_complete ci_coverage (cherry picked from commit460ba041c8
) * Add explicit coverage of suboptions=list without elements (#72108) * Add explicit coverage of suboptions=list without elements * ci_complete ci_coverage * Remove incidental_vmware_guest_custom_attributes * ci_complete ci_coverage (cherry picked from commit50c8c87fe2
) * Add explicit coverage of argspec choices with strings that shadow YAML bools (#72122) * Add explicit coverage of argspec choices with strings that shadow YAML bools * ci_complete ci_coverage * Remove incidental_ufw * ci_complete ci_coverage (cherry picked from commitcfa41898c4
) * Adds argspec tests for required, required_one_of and required_by (#72245) * Improve variable names. * Add test for required. * Add test for required_one_of. * Add test for required_by. (cherry picked from commit1489bf9190
) * Remove incidentals without coverage (#71788) * Remove incidental_lookup_hashi_vault * Remove incidental_connection_chroot * Remove incidental_selinux * Remove incidental_win_hosts (cherry picked from commite6e9840717
) Co-authored-by: Matt Martz <matt@sivel.net> Co-authored-by: Felix Fontein <felix@fontein.de>
This commit is contained in:
parent
dcae6d20d2
commit
522f167d27
109 changed files with 431 additions and 15567 deletions
|
@ -140,7 +140,6 @@ matrix:
|
|||
- env: T=i/cs//1
|
||||
- env: T=i/tower//1
|
||||
- env: T=i/cloud//1
|
||||
- env: T=i/hcloud//1
|
||||
|
||||
branches:
|
||||
except:
|
||||
|
|
1
test/integration/targets/argspec/aliases
Normal file
1
test/integration/targets/argspec/aliases
Normal file
|
@ -0,0 +1 @@
|
|||
shippable/posix/group5
|
118
test/integration/targets/argspec/library/argspec.py
Normal file
118
test/integration/targets/argspec/library/argspec.py
Normal file
|
@ -0,0 +1,118 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright: (c) 2020, Matt Martz <matt@sivel.net>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
{
|
||||
'required': {
|
||||
'required': True,
|
||||
},
|
||||
'required_one_of_one': {},
|
||||
'required_one_of_two': {},
|
||||
'required_by_one': {},
|
||||
'required_by_two': {},
|
||||
'required_by_three': {},
|
||||
'state': {
|
||||
'type': 'str',
|
||||
'choices': ['absent', 'present'],
|
||||
},
|
||||
'path': {},
|
||||
'content': {},
|
||||
'mapping': {
|
||||
'type': 'dict',
|
||||
},
|
||||
'required_one_of': {
|
||||
'required_one_of': [['thing', 'other']],
|
||||
'type': 'list',
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'thing': {},
|
||||
'other': {},
|
||||
},
|
||||
},
|
||||
'required_by': {
|
||||
'required_by': {'thing': 'other'},
|
||||
'type': 'list',
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'thing': {},
|
||||
'other': {},
|
||||
},
|
||||
},
|
||||
'required_together': {
|
||||
'required_together': [['thing', 'other']],
|
||||
'type': 'list',
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'thing': {},
|
||||
'other': {},
|
||||
'another': {},
|
||||
},
|
||||
},
|
||||
'required_if': {
|
||||
'required_if': (
|
||||
('thing', 'foo', ('other',), True),
|
||||
),
|
||||
'type': 'list',
|
||||
'elements': 'dict',
|
||||
'options': {
|
||||
'thing': {},
|
||||
'other': {},
|
||||
'another': {},
|
||||
},
|
||||
},
|
||||
'json': {
|
||||
'type': 'json',
|
||||
},
|
||||
'fail_on_missing_params': {
|
||||
'type': 'list',
|
||||
'default': [],
|
||||
},
|
||||
'needed_param': {},
|
||||
'required_together_one': {},
|
||||
'required_together_two': {},
|
||||
'suboptions_list_no_elements': {
|
||||
'type': 'list',
|
||||
'options': {
|
||||
'thing': {},
|
||||
},
|
||||
},
|
||||
'choices_with_strings_like_bools': {
|
||||
'type': 'str',
|
||||
'choices': [
|
||||
'on',
|
||||
'off',
|
||||
],
|
||||
},
|
||||
},
|
||||
required_if=(
|
||||
('state', 'present', ('path', 'content'), True),
|
||||
),
|
||||
mutually_exclusive=(
|
||||
('path', 'content'),
|
||||
),
|
||||
required_one_of=(
|
||||
('required_one_of_one', 'required_one_of_two'),
|
||||
),
|
||||
required_by={
|
||||
'required_by_one': ('required_by_two', 'required_by_three'),
|
||||
},
|
||||
required_together=(
|
||||
('required_together_one', 'required_together_two'),
|
||||
),
|
||||
)
|
||||
|
||||
module.fail_on_missing_params(module.params['fail_on_missing_params'])
|
||||
|
||||
module.exit_json(**module.params)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
312
test/integration/targets/argspec/tasks/main.yml
Normal file
312
test/integration/targets/argspec/tasks/main.yml
Normal file
|
@ -0,0 +1,312 @@
|
|||
- argspec:
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_one_of_one: value
|
||||
register: argspec_required_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required: value
|
||||
required_one_of_two: value
|
||||
|
||||
- argspec:
|
||||
required: value
|
||||
register: argspec_required_one_of_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required: value
|
||||
required_one_of_two: value
|
||||
required_by_one: value
|
||||
required_by_two: value
|
||||
required_by_three: value
|
||||
|
||||
- argspec:
|
||||
required: value
|
||||
required_one_of_two: value
|
||||
required_by_one: value
|
||||
required_by_two: value
|
||||
register: argspec_required_by_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
state: absent
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
state: present
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_required_if_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
state: present
|
||||
path: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
state: present
|
||||
content: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
state: present
|
||||
content: foo
|
||||
path: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_mutually_exclusive_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
mapping:
|
||||
foo: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_good_mapping
|
||||
|
||||
- argspec:
|
||||
mapping: foo=bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_good_mapping_kv
|
||||
|
||||
- argspec:
|
||||
mapping: !!str '{"foo": "bar"}'
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_good_mapping_json
|
||||
|
||||
- argspec:
|
||||
mapping: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_bad_mapping_string
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
mapping: 1
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_bad_mapping_int
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
mapping:
|
||||
- foo
|
||||
- bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_bad_mapping_list
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required_together:
|
||||
- thing: foo
|
||||
other: bar
|
||||
another: baz
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_together:
|
||||
- another: baz
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_together:
|
||||
- thing: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_required_together_fail
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required_together:
|
||||
- thing: foo
|
||||
other: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_if:
|
||||
- thing: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_if:
|
||||
- thing: foo
|
||||
other: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_if:
|
||||
- thing: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_required_if_fail_2
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required_one_of:
|
||||
- thing: foo
|
||||
other: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_one_of:
|
||||
- {}
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_required_one_of_fail_2
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required_by:
|
||||
- thing: foo
|
||||
other: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_by:
|
||||
- thing: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_required_by_fail_2
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
json: !!str '{"foo": "bar"}'
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_good_json_string
|
||||
|
||||
- argspec:
|
||||
json:
|
||||
foo: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_good_json_dict
|
||||
|
||||
- argspec:
|
||||
json: 1
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_bad_json
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
fail_on_missing_params:
|
||||
- needed_param
|
||||
needed_param: whatever
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
fail_on_missing_params:
|
||||
- needed_param
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_fail_on_missing_params_bad
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
required_together_one: foo
|
||||
required_together_two: bar
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
|
||||
- argspec:
|
||||
required_together_one: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_fail_required_together_2
|
||||
ignore_errors: true
|
||||
|
||||
- argspec:
|
||||
suboptions_list_no_elements:
|
||||
- thing: foo
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_suboptions_list_no_elements
|
||||
|
||||
- argspec:
|
||||
choices_with_strings_like_bools: on
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_choices_with_strings_like_bools_true
|
||||
|
||||
- argspec:
|
||||
choices_with_strings_like_bools: 'on'
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_choices_with_strings_like_bools_true_bool
|
||||
|
||||
- argspec:
|
||||
choices_with_strings_like_bools: off
|
||||
required: value
|
||||
required_one_of_one: value
|
||||
register: argspec_choices_with_strings_like_bools_false
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- argspec_required_fail is failed
|
||||
|
||||
- argspec_required_one_of_fail is failed
|
||||
|
||||
- argspec_required_by_fail is failed
|
||||
|
||||
- argspec_required_if_fail is failed
|
||||
|
||||
- argspec_mutually_exclusive_fail is failed
|
||||
|
||||
- argspec_good_mapping is successful
|
||||
- >-
|
||||
argspec_good_mapping.mapping == {'foo': 'bar'}
|
||||
- argspec_good_mapping_json is successful
|
||||
- >-
|
||||
argspec_good_mapping_json.mapping == {'foo': 'bar'}
|
||||
- argspec_good_mapping_kv is successful
|
||||
- >-
|
||||
argspec_good_mapping_kv.mapping == {'foo': 'bar'}
|
||||
- argspec_bad_mapping_string is failed
|
||||
- argspec_bad_mapping_int is failed
|
||||
- argspec_bad_mapping_list is failed
|
||||
|
||||
- argspec_required_together_fail is failed
|
||||
|
||||
- argspec_required_if_fail_2 is failed
|
||||
|
||||
- argspec_required_one_of_fail_2 is failed
|
||||
|
||||
- argspec_required_by_fail_2 is failed
|
||||
|
||||
- argspec_good_json_string is successful
|
||||
- >-
|
||||
argspec_good_json_string.json == '{"foo": "bar"}'
|
||||
- argspec_good_json_dict is successful
|
||||
- >-
|
||||
argspec_good_json_dict.json == '{"foo": "bar"}'
|
||||
- argspec_bad_json is failed
|
||||
|
||||
- argspec_fail_on_missing_params_bad is failed
|
||||
|
||||
- argspec_fail_required_together_2 is failed
|
||||
|
||||
- >-
|
||||
argspec_suboptions_list_no_elements.suboptions_list_no_elements.0 == {'thing': 'foo'}
|
||||
|
||||
- argspec_choices_with_strings_like_bools_true.choices_with_strings_like_bools == 'on'
|
||||
- argspec_choices_with_strings_like_bools_true_bool.choices_with_strings_like_bools == 'on'
|
||||
- argspec_choices_with_strings_like_bools_false.choices_with_strings_like_bools == 'off'
|
|
@ -1,2 +0,0 @@
|
|||
cloud/aws
|
||||
shippable/aws/incidental
|
|
@ -1,4 +0,0 @@
|
|||
# the random_num is generated in a set_fact task at the start of the testsuite
|
||||
state_machine_name: "{{ resource_prefix }}_step_functions_state_machine_ansible_test_{{ random_num }}"
|
||||
step_functions_role_name: "ansible-test-sts-{{ resource_prefix }}-step_functions-role"
|
||||
execution_name: "{{ resource_prefix }}_sfn_execution"
|
|
@ -1,15 +0,0 @@
|
|||
{
|
||||
"StartAt": "HelloWorld",
|
||||
"States": {
|
||||
"HelloWorld": {
|
||||
"Type": "Pass",
|
||||
"Result": "Some other result",
|
||||
"Next": "Wait"
|
||||
},
|
||||
"Wait": {
|
||||
"Type": "Wait",
|
||||
"Seconds": 30,
|
||||
"End": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"StartAt": "HelloWorld",
|
||||
"States": {
|
||||
"HelloWorld": {
|
||||
"Type": "Pass",
|
||||
"Result": "Hello World!",
|
||||
"End": true
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"Service": "states.amazonaws.com"
|
||||
},
|
||||
"Action": "sts:AssumeRole"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,296 +0,0 @@
|
|||
---
|
||||
|
||||
- name: Integration test for AWS Step Function state machine module
|
||||
module_defaults:
|
||||
iam_role:
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
security_token: "{{ security_token | default(omit) }}"
|
||||
region: "{{ aws_region }}"
|
||||
aws_step_functions_state_machine:
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
security_token: "{{ security_token | default(omit) }}"
|
||||
region: "{{ aws_region }}"
|
||||
aws_step_functions_state_machine_execution:
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
security_token: "{{ security_token | default(omit) }}"
|
||||
region: "{{ aws_region }}"
|
||||
block:
|
||||
|
||||
# ==== Setup ==================================================
|
||||
|
||||
- name: Create IAM service role needed for Step Functions
|
||||
iam_role:
|
||||
name: "{{ step_functions_role_name }}"
|
||||
description: Role with permissions for AWS Step Functions actions.
|
||||
assume_role_policy_document: "{{ lookup('file', 'state_machines_iam_trust_policy.json') }}"
|
||||
state: present
|
||||
register: step_functions_role
|
||||
|
||||
- name: Pause a few seconds to ensure IAM role is available to next task
|
||||
pause:
|
||||
seconds: 10
|
||||
|
||||
# ==== Tests ===================================================
|
||||
|
||||
- name: Create a random component for state machine name
|
||||
set_fact:
|
||||
random_num: "{{ 999999999 | random }}"
|
||||
|
||||
- name: Create a new state machine -- check_mode
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
project: helloWorld
|
||||
state: present
|
||||
register: creation_check
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- creation_check.changed == True
|
||||
- creation_check.output == 'State machine would be created.'
|
||||
|
||||
- name: Create a new state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
project: helloWorld
|
||||
state: present
|
||||
register: creation_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- creation_output.changed == True
|
||||
|
||||
- name: Pause a few seconds to ensure state machine role is available
|
||||
pause:
|
||||
seconds: 5
|
||||
|
||||
- name: Idempotent rerun of same state function -- check_mode
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
project: helloWorld
|
||||
state: present
|
||||
register: result
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.changed == False
|
||||
- result.output == 'State is up-to-date.'
|
||||
|
||||
- name: Idempotent rerun of same state function
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
project: helloWorld
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.changed == False
|
||||
|
||||
- name: Update an existing state machine -- check_mode
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','alternative_state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
differentTag: different_tag
|
||||
state: present
|
||||
register: update_check
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- update_check.changed == True
|
||||
- "update_check.output == 'State machine would be updated: {{ creation_output.state_machine_arn }}'"
|
||||
|
||||
- name: Update an existing state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
definition: "{{ lookup('file','alternative_state_machine.json') }}"
|
||||
role_arn: "{{ step_functions_role.iam_role.arn }}"
|
||||
tags:
|
||||
differentTag: different_tag
|
||||
state: present
|
||||
register: update_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- update_output.changed == True
|
||||
- update_output.state_machine_arn == creation_output.state_machine_arn
|
||||
|
||||
- name: Start execution of state machine -- check_mode
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: "{{ execution_name }}"
|
||||
execution_input: "{}"
|
||||
state_machine_arn: "{{ creation_output.state_machine_arn }}"
|
||||
register: start_execution_output
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_execution_output.changed == True
|
||||
- "start_execution_output.output == 'State machine execution would be started.'"
|
||||
|
||||
- name: Start execution of state machine
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: "{{ execution_name }}"
|
||||
execution_input: "{}"
|
||||
state_machine_arn: "{{ creation_output.state_machine_arn }}"
|
||||
register: start_execution_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- start_execution_output.changed
|
||||
- "'execution_arn' in start_execution_output"
|
||||
- "'start_date' in start_execution_output"
|
||||
|
||||
- name: Start execution of state machine (check for idempotency) (check mode)
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: "{{ execution_name }}"
|
||||
execution_input: "{}"
|
||||
state_machine_arn: "{{ creation_output.state_machine_arn }}"
|
||||
register: start_execution_output_idem_check
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not start_execution_output_idem_check.changed
|
||||
- "start_execution_output_idem_check.output == 'State machine execution already exists.'"
|
||||
|
||||
- name: Start execution of state machine (check for idempotency)
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: "{{ execution_name }}"
|
||||
execution_input: "{}"
|
||||
state_machine_arn: "{{ creation_output.state_machine_arn }}"
|
||||
register: start_execution_output_idem
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not start_execution_output_idem.changed
|
||||
|
||||
- name: Stop execution of state machine -- check_mode
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "{{ start_execution_output.execution_arn }}"
|
||||
cause: "cause of the failure"
|
||||
error: "error code of the failure"
|
||||
register: stop_execution_output
|
||||
check_mode: yes
|
||||
|
||||
- name: Stop execution of state machine
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "{{ start_execution_output.execution_arn }}"
|
||||
cause: "cause of the failure"
|
||||
error: "error code of the failure"
|
||||
register: stop_execution_output
|
||||
|
||||
- name: Stop execution of state machine (check for idempotency)
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "{{ start_execution_output.execution_arn }}"
|
||||
cause: "cause of the failure"
|
||||
error: "error code of the failure"
|
||||
register: stop_execution_output
|
||||
|
||||
- name: Try stopping a non-running execution -- check_mode
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "{{ start_execution_output.execution_arn }}"
|
||||
cause: "cause of the failure"
|
||||
error: "error code of the failure"
|
||||
register: stop_execution_output
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not stop_execution_output.changed
|
||||
- "stop_execution_output.output == 'State machine execution is not running.'"
|
||||
|
||||
- name: Try stopping a non-running execution
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "{{ start_execution_output.execution_arn }}"
|
||||
cause: "cause of the failure"
|
||||
error: "error code of the failure"
|
||||
register: stop_execution_output
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not stop_execution_output.changed
|
||||
|
||||
- name: Start execution of state machine with the same execution name
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: "{{ execution_name }}"
|
||||
state_machine_arn: "{{ creation_output.state_machine_arn }}"
|
||||
register: start_execution_output_again
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not start_execution_output_again.changed
|
||||
|
||||
- name: Remove state machine -- check_mode
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
state: absent
|
||||
register: deletion_check
|
||||
check_mode: yes
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- deletion_check.changed == True
|
||||
- "deletion_check.output == 'State machine would be deleted: {{ creation_output.state_machine_arn }}'"
|
||||
|
||||
- name: Remove state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
state: absent
|
||||
register: deletion_output
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- deletion_output.changed == True
|
||||
- deletion_output.state_machine_arn == creation_output.state_machine_arn
|
||||
|
||||
- name: Non-existent state machine is absent
|
||||
aws_step_functions_state_machine:
|
||||
name: "non_existing_state_machine"
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result.changed == False
|
||||
|
||||
# ==== Cleanup ====================================================
|
||||
|
||||
always:
|
||||
|
||||
- name: Cleanup - delete state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: "{{ state_machine_name }}"
|
||||
state: absent
|
||||
ignore_errors: true
|
||||
|
||||
- name: Cleanup - delete IAM role needed for Step Functions test
|
||||
iam_role:
|
||||
name: "{{ step_functions_role_name }}"
|
||||
state: absent
|
||||
ignore_errors: true
|
|
@ -1,3 +0,0 @@
|
|||
needs/root
|
||||
shippable/posix/incidental
|
||||
needs/target/connection
|
|
@ -1,18 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
|
||||
# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
|
||||
|
||||
group=$(python -c \
|
||||
"from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('incidental_connection_', ''))")
|
||||
|
||||
cd ../connection
|
||||
|
||||
INVENTORY="../incidental_connection_${group}/test_connection.inventory" ./test.sh \
|
||||
-e target_hosts="${group}" \
|
||||
-e action_prefix= \
|
||||
-e local_tmp=/tmp/ansible-local \
|
||||
-e remote_tmp=/tmp/ansible-remote \
|
||||
"$@"
|
|
@ -1,7 +0,0 @@
|
|||
[chroot]
|
||||
chroot-pipelining ansible_ssh_pipelining=true
|
||||
chroot-no-pipelining ansible_ssh_pipelining=false
|
||||
[chroot:vars]
|
||||
ansible_host=/
|
||||
ansible_connection=chroot
|
||||
ansible_python_interpreter="{{ ansible_playbook_python }}"
|
|
@ -1,4 +0,0 @@
|
|||
shippable/posix/incidental
|
||||
destructive
|
||||
skip/aix
|
||||
skip/power/centos
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- incidental_setup_openssl
|
|
@ -1,162 +0,0 @@
|
|||
- name: list sessions
|
||||
consul_session:
|
||||
state: list
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- "'sessions' in result"
|
||||
|
||||
- name: create a session
|
||||
consul_session:
|
||||
state: present
|
||||
name: testsession
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result['name'] == 'testsession'
|
||||
- "'session_id' in result"
|
||||
|
||||
- set_fact:
|
||||
session_id: "{{ result['session_id'] }}"
|
||||
|
||||
- name: list sessions after creation
|
||||
consul_session:
|
||||
state: list
|
||||
register: result
|
||||
|
||||
- set_fact:
|
||||
session_count: "{{ result['sessions'] | length }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
# selectattr not available on Jinja 2.2 provided by CentOS 6
|
||||
# hence the two following tasks (set_fact/assert) are used
|
||||
# - (result['sessions'] | selectattr('ID', 'match', '^' ~ session_id ~ '$') | first)['Name'] == 'testsession'
|
||||
|
||||
- name: search created session
|
||||
set_fact:
|
||||
test_session_found: True
|
||||
loop: "{{ result['sessions'] }}"
|
||||
when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
|
||||
|
||||
- name: ensure session was created
|
||||
assert:
|
||||
that:
|
||||
- test_session_found|default(False)
|
||||
|
||||
- name: fetch info about a session
|
||||
consul_session:
|
||||
state: info
|
||||
id: '{{ session_id }}'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: ensure 'id' parameter is required when state=info
|
||||
consul_session:
|
||||
state: info
|
||||
name: test
|
||||
register: result
|
||||
ignore_errors: True
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
|
||||
- name: ensure unknown scheme fails
|
||||
consul_session:
|
||||
state: info
|
||||
id: '{{ session_id }}'
|
||||
scheme: non_existent
|
||||
register: result
|
||||
ignore_errors: True
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is failed
|
||||
|
||||
- when: pyopenssl_version.stdout is version('0.15', '>=')
|
||||
block:
|
||||
- name: ensure SSL certificate is checked
|
||||
consul_session:
|
||||
state: info
|
||||
id: '{{ session_id }}'
|
||||
port: 8501
|
||||
scheme: https
|
||||
register: result
|
||||
ignore_errors: True
|
||||
|
||||
- name: previous task should fail since certificate is not known
|
||||
assert:
|
||||
that:
|
||||
- result is failed
|
||||
- "'certificate verify failed' in result.msg"
|
||||
|
||||
- name: ensure SSL certificate isn't checked when validate_certs is disabled
|
||||
consul_session:
|
||||
state: info
|
||||
id: '{{ session_id }}'
|
||||
port: 8501
|
||||
scheme: https
|
||||
validate_certs: False
|
||||
register: result
|
||||
|
||||
- name: previous task should succeed since certificate isn't checked
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: ensure a secure connection is possible
|
||||
consul_session:
|
||||
state: info
|
||||
id: '{{ session_id }}'
|
||||
port: 8501
|
||||
scheme: https
|
||||
environment:
|
||||
REQUESTS_CA_BUNDLE: '{{ remote_dir }}/cert.pem'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: delete a session
|
||||
consul_session:
|
||||
state: absent
|
||||
id: '{{ session_id }}'
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: list sessions after deletion
|
||||
consul_session:
|
||||
state: list
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- result is changed
|
||||
# selectattr and equalto not available on Jinja 2.2 provided by CentOS 6
|
||||
# hence the two following tasks (command/assert) are used
|
||||
# - (result['sessions'] | selectattr('ID', 'equalto', session_id) | list | length) == 0
|
||||
|
||||
- name: search deleted session
|
||||
command: echo 'session found'
|
||||
loop: "{{ result['sessions'] }}"
|
||||
when: "item.get('ID') == session_id and item.get('Name') == 'testsession'"
|
||||
register: search_deleted
|
||||
|
||||
- name: ensure session was deleted
|
||||
assert:
|
||||
that:
|
||||
- search_deleted is skipped # each iteration is skipped
|
||||
- search_deleted is not changed # and then unchanged
|
|
@ -1,97 +0,0 @@
|
|||
---
|
||||
- name: Install Consul and test
|
||||
|
||||
vars:
|
||||
consul_version: '1.5.0'
|
||||
consul_uri: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/consul/consul_{{ consul_version }}_{{ ansible_system | lower }}_{{ consul_arch }}.zip
|
||||
consul_cmd: '{{ output_dir }}/consul'
|
||||
|
||||
block:
|
||||
- name: register pyOpenSSL version
|
||||
command: "{{ ansible_python_interpreter }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
|
||||
register: pyopenssl_version
|
||||
|
||||
- name: Install requests<2.20 (CentOS/RHEL 6)
|
||||
pip:
|
||||
name: requests<2.20
|
||||
register: result
|
||||
until: result is success
|
||||
when: ansible_distribution_file_variety|default() == 'RedHat' and ansible_distribution_major_version is version('6', '<=')
|
||||
|
||||
- name: Install python-consul
|
||||
pip:
|
||||
name: python-consul
|
||||
register: result
|
||||
until: result is success
|
||||
|
||||
- when: pyopenssl_version.stdout is version('0.15', '>=')
|
||||
block:
|
||||
- name: Generate privatekey
|
||||
openssl_privatekey:
|
||||
path: '{{ output_dir }}/privatekey.pem'
|
||||
|
||||
- name: Generate CSR
|
||||
openssl_csr:
|
||||
path: '{{ output_dir }}/csr.csr'
|
||||
privatekey_path: '{{ output_dir }}/privatekey.pem'
|
||||
subject:
|
||||
commonName: localhost
|
||||
|
||||
- name: Generate selfsigned certificate
|
||||
openssl_certificate:
|
||||
path: '{{ output_dir }}/cert.pem'
|
||||
csr_path: '{{ output_dir }}/csr.csr'
|
||||
privatekey_path: '{{ output_dir }}/privatekey.pem'
|
||||
provider: selfsigned
|
||||
selfsigned_digest: sha256
|
||||
register: selfsigned_certificate
|
||||
|
||||
- name: 'Install unzip'
|
||||
package:
|
||||
name: unzip
|
||||
register: result
|
||||
until: result is success
|
||||
when: ansible_distribution != "MacOSX" # unzip already installed
|
||||
|
||||
- assert:
|
||||
# Linux: x86_64, FreeBSD: amd64
|
||||
that: ansible_architecture in ['i386', 'x86_64', 'amd64']
|
||||
- set_fact:
|
||||
consul_arch: '386'
|
||||
when: ansible_architecture == 'i386'
|
||||
- set_fact:
|
||||
consul_arch: amd64
|
||||
when: ansible_architecture in ['x86_64', 'amd64']
|
||||
|
||||
- name: 'Download consul binary'
|
||||
unarchive:
|
||||
src: '{{ consul_uri }}'
|
||||
dest: '{{ output_dir }}'
|
||||
remote_src: true
|
||||
register: result
|
||||
until: result is success
|
||||
|
||||
- vars:
|
||||
remote_dir: '{{ echo_output_dir.stdout }}'
|
||||
block:
|
||||
- command: 'echo {{ output_dir }}'
|
||||
register: echo_output_dir
|
||||
|
||||
- name: 'Create configuration file'
|
||||
template:
|
||||
src: consul_config.hcl.j2
|
||||
dest: '{{ output_dir }}/consul_config.hcl'
|
||||
|
||||
- name: 'Start Consul (dev mode enabled)'
|
||||
shell: 'nohup {{ consul_cmd }} agent -dev -config-file {{ output_dir }}/consul_config.hcl </dev/null >/dev/null 2>&1 &'
|
||||
|
||||
- name: 'Create some data'
|
||||
command: '{{ consul_cmd }} kv put data/value{{ item }} foo{{ item }}'
|
||||
loop: [1, 2, 3]
|
||||
|
||||
- import_tasks: consul_session.yml
|
||||
|
||||
always:
|
||||
- name: 'Kill consul process'
|
||||
shell: "kill $(cat {{ output_dir }}/consul.pid)"
|
||||
ignore_errors: true
|
|
@ -1,13 +0,0 @@
|
|||
# {{ ansible_managed }}
|
||||
server = true
|
||||
pid_file = "{{ remote_dir }}/consul.pid"
|
||||
ports {
|
||||
http = 8500
|
||||
{% if pyopenssl_version.stdout is version('0.15', '>=') %}
|
||||
https = 8501
|
||||
{% endif %}
|
||||
}
|
||||
{% if pyopenssl_version.stdout is version('0.15', '>=') %}
|
||||
key_file = "{{ remote_dir }}/privatekey.pem"
|
||||
cert_file = "{{ remote_dir }}/cert.pem"
|
||||
{% endif %}
|
|
@ -1,2 +0,0 @@
|
|||
cloud/cs
|
||||
shippable/cs/incidental
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
dependencies:
|
||||
- incidental_cs_common
|
|
@ -1,223 +0,0 @@
|
|||
---
|
||||
- name: setup service offering
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify setup service offering
|
||||
assert:
|
||||
that:
|
||||
- so is successful
|
||||
|
||||
- name: create service offering in check mode
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro 512mb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 512
|
||||
host_tags: eco
|
||||
storage_tags:
|
||||
- eco
|
||||
- backup
|
||||
storage_type: local
|
||||
register: so
|
||||
check_mode: true
|
||||
- name: verify create service offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
|
||||
- name: create service offering
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro 512mb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 512
|
||||
host_tags: eco
|
||||
storage_tags:
|
||||
- eco
|
||||
- backup
|
||||
storage_type: local
|
||||
register: so
|
||||
- name: verify create service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro 512mb 1cpu"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: create service offering idempotence
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro 512mb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 512
|
||||
host_tags: eco
|
||||
storage_tags:
|
||||
- eco
|
||||
- backup
|
||||
storage_type: local
|
||||
register: so
|
||||
- name: verify create service offering idempotence
|
||||
assert:
|
||||
that:
|
||||
- so is not changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro 512mb 1cpu"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: update service offering in check mode
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro RAM 512MB 1vCPU
|
||||
register: so
|
||||
check_mode: true
|
||||
- name: verify create update offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro 512mb 1cpu"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: update service offering
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro RAM 512MB 1vCPU
|
||||
register: so
|
||||
- name: verify update service offerin
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro RAM 512MB 1vCPU"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: update service offering idempotence
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro RAM 512MB 1vCPU
|
||||
register: so
|
||||
- name: verify update service offering idempotence
|
||||
assert:
|
||||
that:
|
||||
- so is not changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro RAM 512MB 1vCPU"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: remove service offering in check mode
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
state: absent
|
||||
check_mode: true
|
||||
register: so
|
||||
- name: verify remove service offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro RAM 512MB 1vCPU"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: remove service offering
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify remove service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "Micro"
|
||||
- so.display_text == "Micro RAM 512MB 1vCPU"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 2198
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: remove service offering idempotence
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify remove service offering idempotence
|
||||
assert:
|
||||
that:
|
||||
- so is not changed
|
||||
|
||||
- name: create custom service offering
|
||||
cs_service_offering:
|
||||
name: custom
|
||||
display_text: custom offer
|
||||
is_customized: yes
|
||||
host_tags: eco
|
||||
storage_tags:
|
||||
- eco
|
||||
- backup
|
||||
storage_type: local
|
||||
register: so
|
||||
- name: verify create custom service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "custom"
|
||||
- so.display_text == "custom offer"
|
||||
- so.is_customized == True
|
||||
- so.cpu_number is not defined
|
||||
- so.cpu_speed is not defined
|
||||
- so.memory is not defined
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
||||
|
||||
- name: remove custom service offering
|
||||
cs_service_offering:
|
||||
name: custom
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify remove service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "custom"
|
||||
- so.display_text == "custom offer"
|
||||
- so.host_tags == ['eco']
|
||||
- so.storage_tags == ['eco', 'backup']
|
||||
- so.storage_type == "local"
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
- import_tasks: guest_vm_service_offering.yml
|
||||
- import_tasks: system_vm_service_offering.yml
|
|
@ -1,151 +0,0 @@
|
|||
---
|
||||
- name: setup system offering
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
is_system: true
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify setup system offering
|
||||
assert:
|
||||
that:
|
||||
- so is successful
|
||||
|
||||
- name: fail missing storage type and is_system
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
cpu_number: 1
|
||||
cpu_speed: 500
|
||||
memory: 512
|
||||
host_tag: perf
|
||||
storage_tag: perf
|
||||
storage_type: shared
|
||||
offer_ha: true
|
||||
limit_cpu_usage: false
|
||||
is_system: true
|
||||
register: so
|
||||
ignore_errors: true
|
||||
- name: verify create system service offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is failed
|
||||
- so.msg.startswith('missing required arguments:')
|
||||
|
||||
- name: create system service offering in check mode
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
cpu_number: 1
|
||||
cpu_speed: 500
|
||||
memory: 512
|
||||
host_tag: perf
|
||||
storage_tag: perf
|
||||
storage_type: shared
|
||||
offer_ha: true
|
||||
limit_cpu_usage: false
|
||||
system_vm_type: domainrouter
|
||||
is_system: true
|
||||
register: so
|
||||
check_mode: true
|
||||
- name: verify create system service offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
|
||||
- name: create system service offering
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
cpu_number: 1
|
||||
cpu_speed: 500
|
||||
memory: 512
|
||||
host_tag: perf
|
||||
storage_tag: perf
|
||||
storage_type: shared
|
||||
offer_ha: true
|
||||
limit_cpu_usage: false
|
||||
system_vm_type: domainrouter
|
||||
is_system: true
|
||||
register: so
|
||||
- name: verify create system service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "System Offering for Ansible"
|
||||
- so.display_text == "System Offering for Ansible"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 500
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['perf']
|
||||
- so.storage_tags == ['perf']
|
||||
- so.storage_type == "shared"
|
||||
- so.offer_ha == true
|
||||
- so.limit_cpu_usage == false
|
||||
- so.system_vm_type == "domainrouter"
|
||||
- so.is_system == true
|
||||
|
||||
- name: create system service offering idempotence
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
cpu_number: 1
|
||||
cpu_speed: 500
|
||||
memory: 512
|
||||
host_tag: perf
|
||||
storage_tag: perf
|
||||
storage_type: shared
|
||||
offer_ha: true
|
||||
limit_cpu_usage: false
|
||||
system_vm_type: domainrouter
|
||||
is_system: true
|
||||
register: so
|
||||
- name: verify create system service offering idempotence
|
||||
assert:
|
||||
that:
|
||||
- so is not changed
|
||||
- so.name == "System Offering for Ansible"
|
||||
- so.display_text == "System Offering for Ansible"
|
||||
- so.cpu_number == 1
|
||||
- so.cpu_speed == 500
|
||||
- so.memory == 512
|
||||
- so.host_tags == ['perf']
|
||||
- so.storage_tags == ['perf']
|
||||
- so.storage_type == "shared"
|
||||
- so.offer_ha == true
|
||||
- so.limit_cpu_usage == false
|
||||
- so.system_vm_type == "domainrouter"
|
||||
- so.is_system == true
|
||||
|
||||
- name: remove system service offering in check mode
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
is_system: true
|
||||
state: absent
|
||||
check_mode: true
|
||||
register: so
|
||||
- name: verify remove system service offering in check mode
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "System Offering for Ansible"
|
||||
- so.is_system == true
|
||||
|
||||
- name: remove system service offering
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
is_system: true
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify remove system service offering
|
||||
assert:
|
||||
that:
|
||||
- so is changed
|
||||
- so.name == "System Offering for Ansible"
|
||||
- so.is_system == true
|
||||
|
||||
- name: remove system service offering idempotence
|
||||
cs_service_offering:
|
||||
name: System Offering for Ansible
|
||||
is_system: true
|
||||
state: absent
|
||||
register: so
|
||||
- name: verify remove system service offering idempotence
|
||||
assert:
|
||||
that:
|
||||
- so is not changed
|
|
@ -1,2 +0,0 @@
|
|||
cloud/hcloud
|
||||
shippable/hcloud/incidental
|
|
@ -1,5 +0,0 @@
|
|||
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
---
|
||||
hcloud_prefix: "tests"
|
||||
hcloud_server_name: "{{hcloud_prefix}}-integration"
|
|
@ -1,517 +0,0 @@
|
|||
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
---
|
||||
- name: setup
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify setup
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
||||
- name: test missing required parameters on create server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
register: result
|
||||
ignore_errors: yes
|
||||
- name: verify fail test missing required parameters on create server
|
||||
assert:
|
||||
that:
|
||||
- result is failed
|
||||
- 'result.msg == "missing required arguments: server_type, image"'
|
||||
|
||||
- name: test create server with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
server_type: cx11
|
||||
image: ubuntu-18.04
|
||||
state: present
|
||||
register: result
|
||||
check_mode: yes
|
||||
- name: test create server server
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: test create server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: ubuntu-18.04
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify create server
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
|
||||
- main_server.hcloud_server.server_type == "cx11"
|
||||
- main_server.hcloud_server.status == "running"
|
||||
- main_server.root_password != ""
|
||||
|
||||
- name: test create server idempotence
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: started
|
||||
register: result
|
||||
- name: verify create server idempotence
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
|
||||
- name: test stop server with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: stopped
|
||||
register: result
|
||||
check_mode: yes
|
||||
- name: verify stop server with check mode
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.status == "running"
|
||||
|
||||
- name: test stop server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: stopped
|
||||
register: result
|
||||
- name: verify stop server
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.status == "off"
|
||||
|
||||
- name: test start server with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: started
|
||||
register: result
|
||||
check_mode: true
|
||||
- name: verify start server with check mode
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: test start server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: started
|
||||
register: result
|
||||
- name: verify start server
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.status == "running"
|
||||
|
||||
- name: test start server idempotence
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: started
|
||||
register: result
|
||||
- name: verify start server idempotence
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.hcloud_server.status == "running"
|
||||
|
||||
- name: test stop server by its id
|
||||
hcloud_server:
|
||||
id: "{{ main_server.hcloud_server.id }}"
|
||||
state: stopped
|
||||
register: result
|
||||
- name: verify stop server by its id
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.status == "off"
|
||||
|
||||
- name: test resize server running without force
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
server_type: "cx21"
|
||||
state: present
|
||||
register: result
|
||||
check_mode: true
|
||||
- name: verify test resize server running without force
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.server_type == "cx11"
|
||||
|
||||
- name: test resize server with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
server_type: "cx21"
|
||||
state: stopped
|
||||
register: result
|
||||
check_mode: true
|
||||
- name: verify resize server with check mode
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: test enable backups with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
backups: true
|
||||
state: stopped
|
||||
register: result
|
||||
check_mode: true
|
||||
- name: verify enable backups with check mode
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
|
||||
- name: test enable backups
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
backups: true
|
||||
state: stopped
|
||||
register: result
|
||||
- name: verify enable backups
|
||||
assert:
|
||||
that:
|
||||
- result is changed
|
||||
- result.hcloud_server.backup_window != ""
|
||||
|
||||
- name: test enable backups idempotence
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
backups: true
|
||||
state: stopped
|
||||
register: result
|
||||
- name: verify enable backups idempotence
|
||||
assert:
|
||||
that:
|
||||
- result is not changed
|
||||
- result.hcloud_server.backup_window != ""
|
||||
|
||||
- name: test rebuild server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
image: ubuntu-18.04
|
||||
state: rebuild
|
||||
register: result_after_test
|
||||
- name: verify rebuild server
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is changed
|
||||
- result.hcloud_server.id == result_after_test.hcloud_server.id
|
||||
|
||||
- name: test rebuild server with check mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
image: ubuntu-18.04
|
||||
state: rebuild
|
||||
register: result_after_test
|
||||
check_mode: true
|
||||
- name: verify rebuild server with check mode
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is changed
|
||||
|
||||
- name: test update server protection booth protection arguments are required
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
delete_protection: true
|
||||
state: present
|
||||
register: result_after_test
|
||||
ignore_errors: true
|
||||
- name: verify update server protection booth protection arguments are required
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is failed
|
||||
- 'result_after_test.msg == "parameters are required together: delete_protection, rebuild_protection"'
|
||||
|
||||
- name: test update server protection fails if they are not the same
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
delete_protection: true
|
||||
rebuild_protection: false
|
||||
state: present
|
||||
register: result_after_test
|
||||
ignore_errors: true
|
||||
- name: verify update server protection fails if they are not the same
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is failed
|
||||
|
||||
- name: test update server protection
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
delete_protection: true
|
||||
rebuild_protection: true
|
||||
state: present
|
||||
register: result_after_test
|
||||
ignore_errors: true
|
||||
- name: verify update server protection
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is changed
|
||||
- result_after_test.hcloud_server.delete_protection is sameas true
|
||||
- result_after_test.hcloud_server.rebuild_protection is sameas true
|
||||
|
||||
- name: test server without protection set to be idempotent
|
||||
hcloud_server:
|
||||
name: "{{hcloud_server_name}}"
|
||||
register: result_after_test
|
||||
- name: verify test server without protection set to be idempotent
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is not changed
|
||||
- result_after_test.hcloud_server.delete_protection is sameas true
|
||||
- result_after_test.hcloud_server.rebuild_protection is sameas true
|
||||
|
||||
- name: test delete server fails if it is protected
|
||||
hcloud_server:
|
||||
name: "{{hcloud_server_name}}"
|
||||
state: absent
|
||||
ignore_errors: yes
|
||||
register: result
|
||||
- name: verify delete server fails if it is protected
|
||||
assert:
|
||||
that:
|
||||
- result is failed
|
||||
- 'result.msg == "server deletion is protected"'
|
||||
|
||||
- name: test rebuild server fails if it is protected
|
||||
hcloud_server:
|
||||
name: "{{hcloud_server_name}}"
|
||||
image: ubuntu-18.04
|
||||
state: rebuild
|
||||
ignore_errors: yes
|
||||
register: result
|
||||
- name: verify rebuild server fails if it is protected
|
||||
assert:
|
||||
that:
|
||||
- result is failed
|
||||
- 'result.msg == "server rebuild is protected"'
|
||||
|
||||
- name: test remove server protection
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
delete_protection: false
|
||||
rebuild_protection: false
|
||||
state: present
|
||||
register: result_after_test
|
||||
ignore_errors: true
|
||||
- name: verify remove server protection
|
||||
assert:
|
||||
that:
|
||||
- result_after_test is changed
|
||||
- result_after_test.hcloud_server.delete_protection is sameas false
|
||||
- result_after_test.hcloud_server.rebuild_protection is sameas false
|
||||
|
||||
- name: absent server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify absent server
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
||||
|
||||
- name: test create server with ssh key
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: "ubuntu-18.04"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify create server with ssh key
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
|
||||
- main_server.hcloud_server.server_type == "cx11"
|
||||
- main_server.hcloud_server.status == "running"
|
||||
- main_server.root_password != ""
|
||||
|
||||
- name: absent server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify absent server
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
||||
|
||||
- name: test create server with rescue_mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: "ubuntu-18.04"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
rescue_mode: "linux64"
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify create server with rescue_mode
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
|
||||
- main_server.hcloud_server.server_type == "cx11"
|
||||
- main_server.hcloud_server.status == "running"
|
||||
- main_server.root_password != ""
|
||||
- main_server.hcloud_server.rescue_enabled is sameas true
|
||||
|
||||
- name: absent server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify absent server
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
||||
|
||||
- name: setup server
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: ubuntu-18.04
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify setup server
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.name == "{{ hcloud_server_name }}"
|
||||
- main_server.hcloud_server.server_type == "cx11"
|
||||
- main_server.hcloud_server.status == "running"
|
||||
- main_server.root_password != ""
|
||||
|
||||
- name: test activate rescue mode with check_mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
rescue_mode: "linux64"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
state: present
|
||||
register: main_server
|
||||
check_mode: true
|
||||
- name: verify activate rescue mode
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
|
||||
- name: test activate rescue mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
rescue_mode: "linux64"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
state: present
|
||||
register: main_server
|
||||
- name: verify activate rescue mode
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.rescue_enabled is sameas true
|
||||
|
||||
- name: test disable rescue mode
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
state: present
|
||||
register: main_server
|
||||
- name: verify activate rescue mode
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.rescue_enabled is sameas false
|
||||
|
||||
- name: test activate rescue mode without ssh keys
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
rescue_mode: "linux64"
|
||||
state: present
|
||||
register: main_server
|
||||
- name: verify activate rescue mode without ssh keys
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.rescue_enabled is sameas true
|
||||
|
||||
- name: cleanup
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify cleanup
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
||||
|
||||
- name: test create server with labels
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: "ubuntu-18.04"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
labels:
|
||||
key: value
|
||||
mylabel: "val123"
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify create server with labels
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.labels.key == "value"
|
||||
- main_server.hcloud_server.labels.mylabel == "val123"
|
||||
|
||||
- name: test update server with labels
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: "ubuntu-18.04"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
labels:
|
||||
key: other
|
||||
mylabel: "val123"
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify update server with labels
|
||||
assert:
|
||||
that:
|
||||
- main_server is changed
|
||||
- main_server.hcloud_server.labels.key == "other"
|
||||
- main_server.hcloud_server.labels.mylabel == "val123"
|
||||
|
||||
- name: test update server with labels in other order
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name}}"
|
||||
server_type: cx11
|
||||
image: "ubuntu-18.04"
|
||||
ssh_keys:
|
||||
- ci@ansible.hetzner.cloud
|
||||
labels:
|
||||
mylabel: "val123"
|
||||
key: other
|
||||
state: started
|
||||
register: main_server
|
||||
- name: verify update server with labels in other order
|
||||
assert:
|
||||
that:
|
||||
- main_server is not changed
|
||||
|
||||
- name: cleanup with labels
|
||||
hcloud_server:
|
||||
name: "{{ hcloud_server_name }}"
|
||||
state: absent
|
||||
register: result
|
||||
- name: verify cleanup
|
||||
assert:
|
||||
that:
|
||||
- result is success
|
|
@ -1,7 +0,0 @@
|
|||
shippable/posix/incidental
|
||||
destructive
|
||||
needs/target/incidental_setup_openssl
|
||||
needs/file/test/lib/ansible_test/_data/requirements/constraints.txt
|
||||
skip/aix
|
||||
skip/power/centos
|
||||
skip/python2.6
|
|
@ -1,4 +0,0 @@
|
|||
---
|
||||
vault_gen_path: 'gen/testproject'
|
||||
vault_kv1_path: 'kv1/testproject'
|
||||
vault_kv2_path: 'kv2/data/testproject'
|
|
@ -1,21 +0,0 @@
|
|||
- name: 'Create an approle policy'
|
||||
shell: "echo '{{ policy }}' | {{ vault_cmd }} policy write approle-policy -"
|
||||
vars:
|
||||
policy: |
|
||||
path "auth/approle/login" {
|
||||
capabilities = [ "create", "read" ]
|
||||
}
|
||||
|
||||
- name: 'Enable the AppRole auth method'
|
||||
command: '{{ vault_cmd }} auth enable approle'
|
||||
|
||||
- name: 'Create a named role'
|
||||
command: '{{ vault_cmd }} write auth/approle/role/test-role policies="test-policy,approle-policy"'
|
||||
|
||||
- name: 'Fetch the RoleID of the AppRole'
|
||||
command: '{{ vault_cmd }} read -field=role_id auth/approle/role/test-role/role-id'
|
||||
register: role_id_cmd
|
||||
|
||||
- name: 'Get a SecretID issued against the AppRole'
|
||||
command: '{{ vault_cmd }} write -field=secret_id -f auth/approle/role/test-role/secret-id'
|
||||
register: secret_id_cmd
|
|
@ -1,45 +0,0 @@
|
|||
- vars:
|
||||
role_id: '{{ role_id_cmd.stdout }}'
|
||||
secret_id: '{{ secret_id_cmd.stdout }}'
|
||||
block:
|
||||
- name: 'Fetch secrets using "hashi_vault" lookup'
|
||||
set_fact:
|
||||
secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
|
||||
secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
|
||||
|
||||
- name: 'Check secret values'
|
||||
fail:
|
||||
msg: 'unexpected secret values'
|
||||
when: secret1['value'] != 'foo1' or secret2['value'] != 'foo2'
|
||||
|
||||
- name: 'Failure expected when erroneous credentials are used'
|
||||
vars:
|
||||
secret_wrong_cred: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=approle secret_id=toto role_id=' ~ role_id) }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_wrong_cred }})'
|
||||
register: test_wrong_cred
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Failure expected when unauthorized secret is read'
|
||||
vars:
|
||||
secret_unauthorized: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_unauthorized }})'
|
||||
register: test_unauthorized
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Failure expected when inexistent secret is read'
|
||||
vars:
|
||||
secret_inexistent: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret4 auth_method=approle secret_id=' ~ secret_id ~ ' role_id=' ~ role_id) }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_inexistent }})'
|
||||
register: test_inexistent
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Check expected failures'
|
||||
assert:
|
||||
msg: "an expected failure didn't occur"
|
||||
that:
|
||||
- test_wrong_cred is failed
|
||||
- test_unauthorized is failed
|
||||
- test_inexistent is failed
|
|
@ -1,155 +0,0 @@
|
|||
---
|
||||
- name: Install Hashi Vault on controlled node and test
|
||||
|
||||
vars:
|
||||
vault_version: '0.11.0'
|
||||
vault_uri: 'https://ansible-ci-files.s3.amazonaws.com/test/integration/targets/lookup_hashi_vault/vault_{{ vault_version }}_{{ ansible_system | lower }}_{{ vault_arch }}.zip'
|
||||
vault_cmd: '{{ local_temp_dir }}/vault'
|
||||
|
||||
block:
|
||||
- name: Create a local temporary directory
|
||||
tempfile:
|
||||
state: directory
|
||||
register: tempfile_result
|
||||
|
||||
- set_fact:
|
||||
local_temp_dir: '{{ tempfile_result.path }}'
|
||||
|
||||
- when: pyopenssl_version.stdout is version('0.15', '>=')
|
||||
block:
|
||||
- name: Generate privatekey
|
||||
openssl_privatekey:
|
||||
path: '{{ local_temp_dir }}/privatekey.pem'
|
||||
|
||||
- name: Generate CSR
|
||||
openssl_csr:
|
||||
path: '{{ local_temp_dir }}/csr.csr'
|
||||
privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
|
||||
subject:
|
||||
commonName: localhost
|
||||
|
||||
- name: Generate selfsigned certificate
|
||||
openssl_certificate:
|
||||
path: '{{ local_temp_dir }}/cert.pem'
|
||||
csr_path: '{{ local_temp_dir }}/csr.csr'
|
||||
privatekey_path: '{{ local_temp_dir }}/privatekey.pem'
|
||||
provider: selfsigned
|
||||
selfsigned_digest: sha256
|
||||
register: selfsigned_certificate
|
||||
|
||||
- name: 'Install unzip'
|
||||
package:
|
||||
name: unzip
|
||||
when: ansible_distribution != "MacOSX" # unzip already installed
|
||||
|
||||
- assert:
|
||||
# Linux: x86_64, FreeBSD: amd64
|
||||
that: ansible_architecture in ['i386', 'x86_64', 'amd64']
|
||||
- set_fact:
|
||||
vault_arch: '386'
|
||||
when: ansible_architecture == 'i386'
|
||||
- set_fact:
|
||||
vault_arch: amd64
|
||||
when: ansible_architecture in ['x86_64', 'amd64']
|
||||
|
||||
- name: 'Download vault binary'
|
||||
unarchive:
|
||||
src: '{{ vault_uri }}'
|
||||
dest: '{{ local_temp_dir }}'
|
||||
remote_src: true
|
||||
|
||||
- environment:
|
||||
# used by vault command
|
||||
VAULT_DEV_ROOT_TOKEN_ID: '47542cbc-6bf8-4fba-8eda-02e0a0d29a0a'
|
||||
block:
|
||||
- name: 'Create configuration file'
|
||||
template:
|
||||
src: vault_config.hcl.j2
|
||||
dest: '{{ local_temp_dir }}/vault_config.hcl'
|
||||
|
||||
- name: 'Start vault service'
|
||||
environment:
|
||||
VAULT_ADDR: 'http://localhost:8200'
|
||||
block:
|
||||
- name: 'Start vault server (dev mode enabled)'
|
||||
shell: 'nohup {{ vault_cmd }} server -dev -config {{ local_temp_dir }}/vault_config.hcl </dev/null >/dev/null 2>&1 &'
|
||||
|
||||
- name: 'Create generic secrets engine'
|
||||
command: '{{ vault_cmd }} secrets enable -path=gen generic'
|
||||
|
||||
- name: 'Create KV v1 secrets engine'
|
||||
command: '{{ vault_cmd }} secrets enable -path=kv1 -version=1 kv'
|
||||
|
||||
- name: 'Create KV v2 secrets engine'
|
||||
command: '{{ vault_cmd }} secrets enable -path=kv2 -version=2 kv'
|
||||
|
||||
- name: 'Create a test policy'
|
||||
shell: "echo '{{ policy }}' | {{ vault_cmd }} policy write test-policy -"
|
||||
vars:
|
||||
policy: |
|
||||
path "{{ vault_gen_path }}/secret1" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
path "{{ vault_gen_path }}/secret2" {
|
||||
capabilities = ["read", "update"]
|
||||
}
|
||||
path "{{ vault_gen_path }}/secret3" {
|
||||
capabilities = ["deny"]
|
||||
}
|
||||
path "{{ vault_kv1_path }}/secret1" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
path "{{ vault_kv1_path }}/secret2" {
|
||||
capabilities = ["read", "update"]
|
||||
}
|
||||
path "{{ vault_kv1_path }}/secret3" {
|
||||
capabilities = ["deny"]
|
||||
}
|
||||
path "{{ vault_kv2_path }}/secret1" {
|
||||
capabilities = ["read"]
|
||||
}
|
||||
path "{{ vault_kv2_path }}/secret2" {
|
||||
capabilities = ["read", "update"]
|
||||
}
|
||||
path "{{ vault_kv2_path }}/secret3" {
|
||||
capabilities = ["deny"]
|
||||
}
|
||||
|
||||
- name: 'Create generic secrets'
|
||||
command: '{{ vault_cmd }} write {{ vault_gen_path }}/secret{{ item }} value=foo{{ item }}'
|
||||
loop: [1, 2, 3]
|
||||
|
||||
- name: 'Create KV v1 secrets'
|
||||
command: '{{ vault_cmd }} kv put {{ vault_kv1_path }}/secret{{ item }} value=foo{{ item }}'
|
||||
loop: [1, 2, 3]
|
||||
|
||||
- name: 'Create KV v2 secrets'
|
||||
command: '{{ vault_cmd }} kv put {{ vault_kv2_path | regex_replace("/data") }}/secret{{ item }} value=foo{{ item }}'
|
||||
loop: [1, 2, 3]
|
||||
|
||||
- name: setup approle auth
|
||||
import_tasks: approle_setup.yml
|
||||
when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
|
||||
|
||||
- name: setup token auth
|
||||
import_tasks: token_setup.yml
|
||||
|
||||
- import_tasks: tests.yml
|
||||
vars:
|
||||
auth_type: approle
|
||||
when: ansible_distribution != 'RedHat' or ansible_distribution_major_version is version('7', '>')
|
||||
|
||||
- import_tasks: tests.yml
|
||||
vars:
|
||||
auth_type: token
|
||||
|
||||
always:
|
||||
- name: 'Kill vault process'
|
||||
shell: "kill $(cat {{ local_temp_dir }}/vault.pid)"
|
||||
ignore_errors: true
|
||||
|
||||
always:
|
||||
- name: 'Delete temp dir'
|
||||
file:
|
||||
path: '{{ local_temp_dir }}'
|
||||
state: absent
|
|
@ -1,35 +0,0 @@
|
|||
- name: 'test {{ auth_type }} auth without SSL (lookup parameters)'
|
||||
include_tasks: '{{ auth_type }}_test.yml'
|
||||
vars:
|
||||
conn_params: 'url=http://localhost:8200 '
|
||||
|
||||
- name: 'test {{ auth_type }} auth without SSL (environment variable)'
|
||||
include_tasks: '{{ auth_type }}_test.yml'
|
||||
args:
|
||||
apply:
|
||||
vars:
|
||||
conn_params: ''
|
||||
environment:
|
||||
VAULT_ADDR: 'http://localhost:8200'
|
||||
|
||||
- when: pyopenssl_version.stdout is version('0.15', '>=')
|
||||
block:
|
||||
- name: 'test {{ auth_type }} auth with certs (validation enabled, lookup parameters)'
|
||||
include_tasks: '{{ auth_type }}_test.yml'
|
||||
vars:
|
||||
conn_params: 'url=https://localhost:8201 ca_cert={{ local_temp_dir }}/cert.pem validate_certs=True '
|
||||
|
||||
- name: 'test {{ auth_type }} auth with certs (validation enabled, environment variables)'
|
||||
include_tasks: '{{ auth_type }}_test.yml'
|
||||
args:
|
||||
apply:
|
||||
vars:
|
||||
conn_params: ''
|
||||
environment:
|
||||
VAULT_ADDR: 'https://localhost:8201'
|
||||
VAULT_CACERT: '{{ local_temp_dir }}/cert.pem'
|
||||
|
||||
- name: 'test {{ auth_type }} auth with certs (validation disabled, lookup parameters)'
|
||||
include_tasks: '{{ auth_type }}_test.yml'
|
||||
vars:
|
||||
conn_params: 'url=https://localhost:8201 validate_certs=False '
|
|
@ -1,3 +0,0 @@
|
|||
- name: 'Create a test credentials (token)'
|
||||
command: '{{ vault_cmd }} token create -policy test-policy -field token'
|
||||
register: user_token_cmd
|
|
@ -1,58 +0,0 @@
|
|||
- vars:
|
||||
user_token: '{{ user_token_cmd.stdout }}'
|
||||
block:
|
||||
- name: 'Fetch secrets using "hashi_vault" lookup'
|
||||
set_fact:
|
||||
gen_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
|
||||
gen_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_gen_path ~ '/secret2 token=' ~ user_token) }}"
|
||||
kv1_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
|
||||
kv1_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv1_path ~ '/secret2 token=' ~ user_token) }}"
|
||||
kv2_secret1: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret1 auth_method=token token=' ~ user_token) }}"
|
||||
kv2_secret2: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 token=' ~ user_token) }}"
|
||||
|
||||
- name: 'Check secret generic values'
|
||||
fail:
|
||||
msg: 'unexpected secret values'
|
||||
when: gen_secret1['value'] != 'foo1' or gen_secret2['value'] != 'foo2'
|
||||
|
||||
- name: 'Check secret kv1 values'
|
||||
fail:
|
||||
msg: 'unexpected secret values'
|
||||
when: kv1_secret1['value'] != 'foo1' or kv1_secret2['value'] != 'foo2'
|
||||
|
||||
- name: 'Check secret kv2 values'
|
||||
fail:
|
||||
msg: 'unexpected secret values'
|
||||
when: kv2_secret1['value'] != 'foo1' or kv2_secret2['value'] != 'foo2'
|
||||
|
||||
- name: 'Failure expected when erroneous credentials are used'
|
||||
vars:
|
||||
secret_wrong_cred: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret2 auth_method=token token=wrong_token') }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_wrong_cred }})'
|
||||
register: test_wrong_cred
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Failure expected when unauthorized secret is read'
|
||||
vars:
|
||||
secret_unauthorized: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret3 token=' ~ user_token) }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_unauthorized }})'
|
||||
register: test_unauthorized
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Failure expected when inexistent secret is read'
|
||||
vars:
|
||||
secret_inexistent: "{{ lookup('hashi_vault', conn_params ~ 'secret=' ~ vault_kv2_path ~ '/secret4 token=' ~ user_token) }}"
|
||||
debug:
|
||||
msg: 'Failure is expected ({{ secret_inexistent }})'
|
||||
register: test_inexistent
|
||||
ignore_errors: true
|
||||
|
||||
- name: 'Check expected failures'
|
||||
assert:
|
||||
msg: "an expected failure didn't occur"
|
||||
that:
|
||||
- test_wrong_cred is failed
|
||||
- test_unauthorized is failed
|
||||
- test_inexistent is failed
|
|
@ -1,10 +0,0 @@
|
|||
# {{ ansible_managed }}
|
||||
pid_file = "{{ local_temp_dir }}/vault.pid"
|
||||
{% if pyopenssl_version.stdout is version('0.15', '>=') %}
|
||||
listener "tcp" {
|
||||
tls_key_file = "{{ local_temp_dir }}/privatekey.pem"
|
||||
tls_cert_file = "{{ local_temp_dir }}/cert.pem"
|
||||
tls_disable = false
|
||||
address = "localhost:8201"
|
||||
}
|
||||
{% endif %}
|
|
@ -1,19 +0,0 @@
|
|||
- hosts: localhost
|
||||
tasks:
|
||||
- name: Install openssl
|
||||
import_role:
|
||||
name: incidental_setup_openssl
|
||||
|
||||
- name: "RedHat <= 7, select last version compatible with request 2.6.0 (this version doesn't support approle auth)"
|
||||
set_fact:
|
||||
hvac_package: 'hvac==0.2.5'
|
||||
when: ansible_distribution == 'RedHat' and ansible_distribution_major_version is version('7', '<=')
|
||||
|
||||
- name: 'CentOS < 7, select last version compatible with Python 2.6'
|
||||
set_fact:
|
||||
hvac_package: 'hvac==0.5.0'
|
||||
when: ansible_distribution == 'CentOS' and ansible_distribution_major_version is version('7', '<')
|
||||
|
||||
- name: 'Install hvac Python package'
|
||||
pip:
|
||||
name: "{{ hvac_package|default('hvac') }}"
|
|
@ -1,9 +0,0 @@
|
|||
- hosts: localhost
|
||||
tasks:
|
||||
- name: register pyOpenSSL version
|
||||
command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
|
||||
register: pyopenssl_version
|
||||
|
||||
- name: Test lookup hashi_vault
|
||||
import_role:
|
||||
name: incidental_lookup_hashi_vault/lookup_hashi_vault
|
|
@ -1,23 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -eux
|
||||
|
||||
# First install pyOpenSSL, then test lookup in a second playbook in order to
|
||||
# workaround this error which occurs on OS X 10.11 only:
|
||||
#
|
||||
# TASK [lookup_hashi_vault : test token auth with certs (validation enabled, lookup parameters)] ***
|
||||
# included: lookup_hashi_vault/tasks/token_test.yml for testhost
|
||||
#
|
||||
# TASK [lookup_hashi_vault : Fetch secrets using "hashi_vault" lookup] ***
|
||||
# From cffi callback <function _verify_callback at 0x106f995f0>:
|
||||
# Traceback (most recent call last):
|
||||
# File "/usr/local/lib/python2.7/site-packages/OpenSSL/SSL.py", line 309, in wrapper
|
||||
# _lib.X509_up_ref(x509)
|
||||
# AttributeError: 'module' object has no attribute 'X509_up_ref'
|
||||
# fatal: [testhost]: FAILED! => { "msg": "An unhandled exception occurred while running the lookup plugin 'hashi_vault'. Error was a <class 'requests.exceptions.SSLError'>, original message: HTTPSConnectionPool(host='localhost', port=8201): Max retries exceeded with url: /v1/auth/token/lookup-self (Caused by SSLError(SSLError(\"bad handshake: Error([('SSL routines', 'ssl3_get_server_certificate', 'certificate verify failed')],)\",),))"}
|
||||
|
||||
ANSIBLE_ROLES_PATH=../ \
|
||||
ansible-playbook playbooks/install_dependencies.yml -v "$@"
|
||||
|
||||
ANSIBLE_ROLES_PATH=../ \
|
||||
ansible-playbook playbooks/test_lookup_hashi_vault.yml -v "$@"
|
|
@ -1 +0,0 @@
|
|||
hidden
|
|
@ -1,3 +0,0 @@
|
|||
shippable/cloud/incidental
|
||||
cloud/nios
|
||||
destructive
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
testcase: "*"
|
||||
test_items: []
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- incidental_nios_prepare_tests
|
|
@ -1 +0,0 @@
|
|||
- include: nios_txt_record_idempotence.yml
|
|
@ -1,80 +0,0 @@
|
|||
- name: cleanup the parent object
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
state: absent
|
||||
provider: "{{ nios_provider }}"
|
||||
|
||||
- name: create the parent object
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
state: present
|
||||
provider: "{{ nios_provider }}"
|
||||
|
||||
- name: cleanup txt record
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
text: mytext
|
||||
state: absent
|
||||
provider: "{{ nios_provider }}"
|
||||
|
||||
- name: create txt record
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
text: mytext
|
||||
state: present
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_create1
|
||||
|
||||
- name: create txt record
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
text: mytext
|
||||
state: present
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_create2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "txt_create1.changed"
|
||||
- "not txt_create2.changed"
|
||||
|
||||
- name: add a comment to an existing txt record
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
text: mytext
|
||||
state: present
|
||||
comment: mycomment
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_update1
|
||||
|
||||
- name: add a comment to an existing txt record
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
text: mytext
|
||||
state: present
|
||||
comment: mycomment
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_update2
|
||||
|
||||
- name: remove a txt record from the system
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
state: absent
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_delete1
|
||||
|
||||
- name: remove a txt record from the system
|
||||
nios_txt_record:
|
||||
name: txt.ansible.com
|
||||
state: absent
|
||||
provider: "{{ nios_provider }}"
|
||||
register: txt_delete2
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "txt_create1.changed"
|
||||
- "not txt_create2.changed"
|
||||
- "txt_update1.changed"
|
||||
- "not txt_update2.changed"
|
||||
- "txt_delete1.changed"
|
||||
- "not txt_delete2.changed"
|
|
@ -1,3 +0,0 @@
|
|||
needs/root
|
||||
shippable/posix/incidental
|
||||
skip/aix
|
|
@ -1,36 +0,0 @@
|
|||
# (c) 2017, Sam Doran <sdoran@redhat.com>
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
- debug:
|
||||
msg: SELinux is disabled
|
||||
when: ansible_selinux is defined and ansible_selinux == False
|
||||
|
||||
- debug:
|
||||
msg: SELinux is {{ ansible_selinux.status }}
|
||||
when: ansible_selinux is defined and ansible_selinux != False
|
||||
|
||||
- include: selinux.yml
|
||||
when:
|
||||
- ansible_selinux is defined
|
||||
- ansible_selinux != False
|
||||
- ansible_selinux.status == 'enabled'
|
||||
|
||||
- include: selogin.yml
|
||||
when:
|
||||
- ansible_selinux is defined
|
||||
- ansible_selinux != False
|
||||
- ansible_selinux.status == 'enabled'
|
|
@ -1,364 +0,0 @@
|
|||
# (c) 2017, Sam Doran <sdoran@redhat.com>
|
||||
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
# First Test
|
||||
# ##############################################################################
|
||||
# Test changing the state, which requires a reboot
|
||||
|
||||
- name: TEST 1 | Get current SELinux config file contents
|
||||
set_fact:
|
||||
selinux_config_original: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
|
||||
before_test_sestatus: "{{ ansible_selinux }}"
|
||||
|
||||
- debug:
|
||||
var: "{{ item }}"
|
||||
verbosity: 1
|
||||
with_items:
|
||||
- selinux_config_original
|
||||
- before_test_sestatus
|
||||
- ansible_selinux
|
||||
|
||||
- name: TEST 1 | Setup SELinux configuration for tests
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
|
||||
- name: TEST 1 | Disable SELinux
|
||||
selinux:
|
||||
state: disabled
|
||||
policy: targeted
|
||||
register: _disable_test1
|
||||
|
||||
- debug:
|
||||
var: _disable_test1
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 1 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- name: TEST 1 | Assert that status was changed, reboot_required is True, a warning was displayed, and SELinux is configured properly
|
||||
assert:
|
||||
that:
|
||||
- _disable_test1 is changed
|
||||
- _disable_test1.reboot_required
|
||||
- (_disable_test1.warnings | length ) >= 1
|
||||
- ansible_selinux.config_mode == 'disabled'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 1 | Disable SELinux again
|
||||
selinux:
|
||||
state: disabled
|
||||
policy: targeted
|
||||
register: _disable_test2
|
||||
|
||||
- debug:
|
||||
var: _disable_test2
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 1 | Assert that no change is reported, a warnking was dispalyed, and reboot_required is True
|
||||
assert:
|
||||
that:
|
||||
- _disable_test2 is not changed
|
||||
- (_disable_test1.warnings | length ) >= 1
|
||||
- _disable_test2.reboot_required
|
||||
|
||||
- name: TEST 1 | Get modified config file
|
||||
set_fact:
|
||||
selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
|
||||
|
||||
- debug:
|
||||
var: selinux_config_after
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 1 | Ensure SELinux config file is properly formatted
|
||||
assert:
|
||||
that:
|
||||
- selinux_config_original | length == selinux_config_after | length
|
||||
- selinux_config_after[selinux_config_after.index('SELINUX=disabled')] is search("^SELINUX=\w+$")
|
||||
- selinux_config_after[selinux_config_after.index('SELINUXTYPE=targeted')] is search("^SELINUXTYPE=\w+$")
|
||||
|
||||
- name: TEST 1 | Reset SELinux configuration for next test
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
|
||||
|
||||
# Second Test
|
||||
# ##############################################################################
|
||||
# Test changing only the policy, which does not require a reboot
|
||||
|
||||
- name: TEST 2 | Make sure the policy is present
|
||||
package:
|
||||
name: selinux-policy-mls
|
||||
state: present
|
||||
|
||||
- name: TEST 2 | Set SELinux policy
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: mls
|
||||
register: _state_test1
|
||||
|
||||
- debug:
|
||||
var: _state_test1
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 2 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
tags: debug
|
||||
|
||||
- name: TEST 2 | Assert that status was changed, reboot_required is False, no warnings were displayed, and SELinux is configured properly
|
||||
assert:
|
||||
that:
|
||||
- _state_test1 is changed
|
||||
- not _state_test1.reboot_required
|
||||
- _state_test1.warnings is not defined
|
||||
- ansible_selinux.config_mode == 'enforcing'
|
||||
- ansible_selinux.type == 'mls'
|
||||
|
||||
- name: TEST 2 | Set SELinux policy again
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: mls
|
||||
register: _state_test2
|
||||
|
||||
- debug:
|
||||
var: _state_test2
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 2 | Assert that no change was reported, no warnings were dispalyed, and reboot_required is False
|
||||
assert:
|
||||
that:
|
||||
- _state_test2 is not changed
|
||||
- _state_test2.warnings is not defined
|
||||
- not _state_test2.reboot_required
|
||||
|
||||
- name: TEST 2 | Get modified config file
|
||||
set_fact:
|
||||
selinux_config_after: "{{ lookup('file', '/etc/sysconfig/selinux').split('\n') }}"
|
||||
|
||||
- debug:
|
||||
var: selinux_config_after
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 2 | Ensure SELinux config file is properly formatted
|
||||
assert:
|
||||
that:
|
||||
- selinux_config_original | length == selinux_config_after | length
|
||||
- selinux_config_after[selinux_config_after.index('SELINUX=enforcing')] is search("^SELINUX=\w+$")
|
||||
- selinux_config_after[selinux_config_after.index('SELINUXTYPE=mls')] is search("^SELINUXTYPE=\w+$")
|
||||
|
||||
- name: TEST 2 | Reset SELinux configuration for next test
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
|
||||
|
||||
# Third Test
|
||||
# ##############################################################################
|
||||
# Test changing non-existing policy
|
||||
|
||||
- name: TEST 3 | Set SELinux policy
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: non-existing-selinux-policy
|
||||
register: _state_test1
|
||||
ignore_errors: yes
|
||||
|
||||
- debug:
|
||||
var: _state_test1
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 3 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
tags: debug
|
||||
|
||||
- name: TEST 3 | Assert that status was not changed, the task failed, the msg contains proper information and SELinux was not changed
|
||||
assert:
|
||||
that:
|
||||
- _state_test1 is not changed
|
||||
- _state_test1 is failed
|
||||
- _state_test1.msg == 'Policy non-existing-selinux-policy does not exist in /etc/selinux/'
|
||||
- ansible_selinux.config_mode == 'enforcing'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
|
||||
# Fourth Test
|
||||
# ##############################################################################
|
||||
# Test if check mode returns correct changed values and
|
||||
# doesn't make any changes
|
||||
|
||||
|
||||
- name: TEST 4 | Set SELinux to enforcing
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
register: _check_mode_test1
|
||||
|
||||
- debug:
|
||||
var: _check_mode_test1
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 4 | Set SELinux to enforcing in check mode
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
register: _check_mode_test1
|
||||
check_mode: yes
|
||||
|
||||
- name: TEST 4 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
tags: debug
|
||||
|
||||
- name: TEST 4 | Assert that check mode is idempotent
|
||||
assert:
|
||||
that:
|
||||
- _check_mode_test1 is success
|
||||
- not _check_mode_test1.reboot_required
|
||||
- ansible_selinux.config_mode == 'enforcing'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
- name: TEST 4 | Set SELinux to permissive in check mode
|
||||
selinux:
|
||||
state: permissive
|
||||
policy: targeted
|
||||
register: _check_mode_test2
|
||||
check_mode: yes
|
||||
|
||||
- name: TEST 4 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
tags: debug
|
||||
|
||||
- name: TEST 4 | Assert that check mode doesn't set state permissive and returns changed
|
||||
assert:
|
||||
that:
|
||||
- _check_mode_test2 is changed
|
||||
- not _check_mode_test2.reboot_required
|
||||
- ansible_selinux.config_mode == 'enforcing'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
- name: TEST 4 | Disable SELinux in check mode
|
||||
selinux:
|
||||
state: disabled
|
||||
register: _check_mode_test3
|
||||
check_mode: yes
|
||||
|
||||
- name: TEST 4 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
tags: debug
|
||||
|
||||
- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed
|
||||
assert:
|
||||
that:
|
||||
- _check_mode_test3 is changed
|
||||
- _check_mode_test3.reboot_required
|
||||
- (_check_mode_test3.warnings | length ) >= 1
|
||||
- ansible_selinux.config_mode == 'enforcing'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
- name: TEST 4 | Set SELinux to permissive
|
||||
selinux:
|
||||
state: permissive
|
||||
policy: targeted
|
||||
register: _check_mode_test4
|
||||
|
||||
- debug:
|
||||
var: _check_mode_test4
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 4 | Disable SELinux in check mode
|
||||
selinux:
|
||||
state: disabled
|
||||
register: _check_mode_test4
|
||||
check_mode: yes
|
||||
|
||||
- name: TEST 4 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
tags: debug
|
||||
|
||||
- name: TEST 4 | Assert that check mode didn't change anything, status is changed, reboot_required is True, a warning was displayed
|
||||
assert:
|
||||
that:
|
||||
- _check_mode_test4 is changed
|
||||
- _check_mode_test4.reboot_required
|
||||
- (_check_mode_test3.warnings | length ) >= 1
|
||||
- ansible_selinux.config_mode == 'permissive'
|
||||
- ansible_selinux.type == 'targeted'
|
||||
|
||||
- name: TEST 4 | Set SELinux to enforcing
|
||||
selinux:
|
||||
state: enforcing
|
||||
policy: targeted
|
||||
register: _check_mode_test5
|
||||
|
||||
- debug:
|
||||
var: _check_mode_test5
|
||||
verbosity: 1
|
||||
|
||||
- name: TEST 4 | Disable SELinux
|
||||
selinux:
|
||||
state: disabled
|
||||
register: _check_mode_test5
|
||||
|
||||
- name: TEST 4 | Disable SELinux in check mode
|
||||
selinux:
|
||||
state: disabled
|
||||
register: _check_mode_test5
|
||||
check_mode: yes
|
||||
|
||||
- name: TEST 4 | Re-gather facts
|
||||
setup:
|
||||
|
||||
- debug:
|
||||
var: ansible_selinux
|
||||
verbosity: 1
|
||||
tags: debug
|
||||
|
||||
- name: TEST 4 | Assert that in check mode status was not changed, reboot_required is True, a warning was displayed, and SELinux is configured properly
|
||||
assert:
|
||||
that:
|
||||
- _check_mode_test5 is success
|
||||
- _check_mode_test5.reboot_required
|
||||
- (_check_mode_test5.warnings | length ) >= 1
|
||||
- ansible_selinux.config_mode == 'disabled'
|
||||
- ansible_selinux.type == 'targeted'
|
|
@ -1,81 +0,0 @@
|
|||
---
|
||||
|
||||
- name: create user for testing
|
||||
user:
|
||||
name: seuser
|
||||
|
||||
- name: attempt to add mapping without 'seuser'
|
||||
selogin:
|
||||
login: seuser
|
||||
register: selogin_error
|
||||
ignore_errors: yes
|
||||
|
||||
- name: verify failure
|
||||
assert:
|
||||
that:
|
||||
- selogin_error is failed
|
||||
|
||||
- name: map login to SELinux user
|
||||
selogin:
|
||||
login: seuser
|
||||
seuser: staff_u
|
||||
register: selogin_new_mapping
|
||||
check_mode: "{{ item }}"
|
||||
with_items:
|
||||
- yes
|
||||
- no
|
||||
- yes
|
||||
- no
|
||||
|
||||
- name: new mapping- verify functionality and check_mode
|
||||
assert:
|
||||
that:
|
||||
- selogin_new_mapping.results[0] is changed
|
||||
- selogin_new_mapping.results[1] is changed
|
||||
- selogin_new_mapping.results[2] is not changed
|
||||
- selogin_new_mapping.results[3] is not changed
|
||||
|
||||
- name: change SELinux user login mapping
|
||||
selogin:
|
||||
login: seuser
|
||||
seuser: user_u
|
||||
register: selogin_mod_mapping
|
||||
check_mode: "{{ item }}"
|
||||
with_items:
|
||||
- yes
|
||||
- no
|
||||
- yes
|
||||
- no
|
||||
|
||||
- name: changed mapping- verify functionality and check_mode
|
||||
assert:
|
||||
that:
|
||||
- selogin_mod_mapping.results[0] is changed
|
||||
- selogin_mod_mapping.results[1] is changed
|
||||
- selogin_mod_mapping.results[2] is not changed
|
||||
- selogin_mod_mapping.results[3] is not changed
|
||||
|
||||
- name: remove SELinux user mapping
|
||||
selogin:
|
||||
login: seuser
|
||||
state: absent
|
||||
register: selogin_del_mapping
|
||||
check_mode: "{{ item }}"
|
||||
with_items:
|
||||
- yes
|
||||
- no
|
||||
- yes
|
||||
- no
|
||||
|
||||
- name: delete mapping- verify functionality and check_mode
|
||||
assert:
|
||||
that:
|
||||
- selogin_del_mapping.results[0] is changed
|
||||
- selogin_del_mapping.results[1] is changed
|
||||
- selogin_del_mapping.results[2] is not changed
|
||||
- selogin_del_mapping.results[3] is not changed
|
||||
|
||||
- name: remove test user
|
||||
user:
|
||||
name: seuser
|
||||
state: absent
|
|
@ -1,2 +0,0 @@
|
|||
hidden
|
||||
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
- name: Include OS-specific variables
|
||||
include_vars: '{{ ansible_os_family }}.yml'
|
||||
when: not ansible_os_family == "Darwin"
|
||||
|
||||
- name: Install OpenSSL
|
||||
become: True
|
||||
package:
|
||||
name: '{{ openssl_package_name }}'
|
||||
when: not ansible_os_family == 'Darwin'
|
||||
|
||||
- name: Install pyOpenSSL (Python 3)
|
||||
become: True
|
||||
package:
|
||||
name: '{{ pyopenssl_package_name_python3 }}'
|
||||
when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '>=')
|
||||
|
||||
- name: Install pyOpenSSL (Python 2)
|
||||
become: True
|
||||
package:
|
||||
name: '{{ pyopenssl_package_name }}'
|
||||
when: not ansible_os_family == 'Darwin' and ansible_python_version is version('3.0', '<')
|
||||
|
||||
- name: Install pyOpenSSL (Darwin)
|
||||
become: True
|
||||
pip:
|
||||
name:
|
||||
- pyOpenSSL==19.1.0
|
||||
# dependencies for pyOpenSSL
|
||||
- cffi==1.14.2
|
||||
- cryptography==3.1
|
||||
- enum34==1.1.10
|
||||
- ipaddress==1.0.23
|
||||
- pycparser==2.20
|
||||
- six==1.15.0
|
||||
when: ansible_os_family == 'Darwin'
|
||||
|
||||
- name: register pyOpenSSL version
|
||||
command: "{{ ansible_python.executable }} -c 'import OpenSSL; print(OpenSSL.__version__)'"
|
||||
register: pyopenssl_version
|
||||
|
||||
- name: register openssl version
|
||||
shell: "openssl version | cut -d' ' -f2"
|
||||
register: openssl_version
|
||||
|
||||
- name: register cryptography version
|
||||
command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'"
|
||||
register: cryptography_version
|
|
@ -1,3 +0,0 @@
|
|||
pyopenssl_package_name: python-openssl
|
||||
pyopenssl_package_name_python3: python3-openssl
|
||||
openssl_package_name: openssl
|
|
@ -1,3 +0,0 @@
|
|||
pyopenssl_package_name: py27-openssl
|
||||
pyopenssl_package_name_python3: py36-openssl
|
||||
openssl_package_name: openssl
|
|
@ -1,3 +0,0 @@
|
|||
pyopenssl_package_name: pyOpenSSL
|
||||
pyopenssl_package_name_python3: python3-pyOpenSSL
|
||||
openssl_package_name: openssl
|
|
@ -1,3 +0,0 @@
|
|||
pyopenssl_package_name: python-pyOpenSSL
|
||||
pyopenssl_package_name_python3: python3-pyOpenSSL
|
||||
openssl_package_name: openssl
|
|
@ -1,13 +0,0 @@
|
|||
shippable/posix/incidental
|
||||
skip/aix
|
||||
skip/power/centos
|
||||
skip/osx
|
||||
skip/macos
|
||||
skip/freebsd
|
||||
skip/rhel8.0
|
||||
skip/rhel8.0b
|
||||
skip/rhel8.1b
|
||||
skip/docker
|
||||
needs/root
|
||||
destructive
|
||||
needs/target/setup_epel
|
|
@ -1,34 +0,0 @@
|
|||
---
|
||||
# Make sure ufw is installed
|
||||
- name: Install EPEL repository (RHEL only)
|
||||
include_role:
|
||||
name: setup_epel
|
||||
when: ansible_distribution == 'RedHat'
|
||||
- name: Install iptables (SuSE only)
|
||||
package:
|
||||
name: iptables
|
||||
become: yes
|
||||
when: ansible_os_family == 'Suse'
|
||||
- name: Install ufw
|
||||
become: yes
|
||||
package:
|
||||
name: ufw
|
||||
|
||||
# Run the tests
|
||||
- block:
|
||||
- include_tasks: run-test.yml
|
||||
with_fileglob:
|
||||
- "tests/*.yml"
|
||||
become: yes
|
||||
|
||||
# Cleanup
|
||||
always:
|
||||
- pause:
|
||||
# ufw creates backups of the rule files with a timestamp; if reset is called
|
||||
# twice in a row fast enough (so that both timestamps are taken in the same second),
|
||||
# the second call will notice that the backup files are already there and fail.
|
||||
# Waiting one second fixes this problem.
|
||||
seconds: 1
|
||||
- name: Reset ufw to factory defaults and disable
|
||||
ufw:
|
||||
state: reset
|
|
@ -1,21 +0,0 @@
|
|||
---
|
||||
- pause:
|
||||
# ufw creates backups of the rule files with a timestamp; if reset is called
|
||||
# twice in a row fast enough (so that both timestamps are taken in the same second),
|
||||
# the second call will notice that the backup files are already there and fail.
|
||||
# Waiting one second fixes this problem.
|
||||
seconds: 1
|
||||
- name: Reset ufw to factory defaults
|
||||
ufw:
|
||||
state: reset
|
||||
- name: Disable ufw
|
||||
ufw:
|
||||
# Some versions of ufw have a bug which won't disable on reset.
|
||||
# That's why we explicitly deactivate here. See
|
||||
# https://bugs.launchpad.net/ufw/+bug/1810082
|
||||
state: disabled
|
||||
- name: "Loading tasks from {{ item }}"
|
||||
include_tasks: "{{ item }}"
|
||||
- name: Reset to factory defaults
|
||||
ufw:
|
||||
state: reset
|
|
@ -1,402 +0,0 @@
|
|||
---
|
||||
# ############################################
|
||||
- name: Make sure it is off
|
||||
ufw:
|
||||
state: disabled
|
||||
- name: Enable (check mode)
|
||||
ufw:
|
||||
state: enabled
|
||||
check_mode: yes
|
||||
register: enable_check
|
||||
- name: Enable
|
||||
ufw:
|
||||
state: enabled
|
||||
register: enable
|
||||
- name: Enable (idempotency)
|
||||
ufw:
|
||||
state: enabled
|
||||
register: enable_idem
|
||||
- name: Enable (idempotency, check mode)
|
||||
ufw:
|
||||
state: enabled
|
||||
check_mode: yes
|
||||
register: enable_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- enable_check is changed
|
||||
- enable is changed
|
||||
- enable_idem is not changed
|
||||
- enable_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: ipv4 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
check_mode: yes
|
||||
register: ipv4_allow_check
|
||||
- name: ipv4 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
register: ipv4_allow
|
||||
- name: ipv4 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
register: ipv4_allow_idem
|
||||
- name: ipv4 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
check_mode: yes
|
||||
register: ipv4_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- ipv4_allow_check is changed
|
||||
- ipv4_allow is changed
|
||||
- ipv4_allow_idem is not changed
|
||||
- ipv4_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: delete ipv4 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv4_allow_check
|
||||
- name: delete ipv4 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
register: delete_ipv4_allow
|
||||
- name: delete ipv4 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
register: delete_ipv4_allow_idem
|
||||
- name: delete ipv4 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv4_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- delete_ipv4_allow_check is changed
|
||||
- delete_ipv4_allow is changed
|
||||
- delete_ipv4_allow_idem is not changed
|
||||
- delete_ipv4_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: ipv6 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
check_mode: yes
|
||||
register: ipv6_allow_check
|
||||
- name: ipv6 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
register: ipv6_allow
|
||||
- name: ipv6 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
register: ipv6_allow_idem
|
||||
- name: ipv6 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
check_mode: yes
|
||||
register: ipv6_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- ipv6_allow_check is changed
|
||||
- ipv6_allow is changed
|
||||
- ipv6_allow_idem is not changed
|
||||
- ipv6_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: delete ipv6 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv6_allow_check
|
||||
- name: delete ipv6 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
register: delete_ipv6_allow
|
||||
- name: delete ipv6 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
register: delete_ipv6_allow_idem
|
||||
- name: delete ipv6 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv6_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- delete_ipv6_allow_check is changed
|
||||
- delete_ipv6_allow is changed
|
||||
- delete_ipv6_allow_idem is not changed
|
||||
- delete_ipv6_allow_idem_check is not changed
|
||||
|
||||
|
||||
# ############################################
|
||||
- name: ipv4 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
check_mode: yes
|
||||
register: ipv4_allow_check
|
||||
- name: ipv4 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
register: ipv4_allow
|
||||
- name: ipv4 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
register: ipv4_allow_idem
|
||||
- name: ipv4 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
check_mode: yes
|
||||
register: ipv4_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- ipv4_allow_check is changed
|
||||
- ipv4_allow is changed
|
||||
- ipv4_allow_idem is not changed
|
||||
- ipv4_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: delete ipv4 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv4_allow_check
|
||||
- name: delete ipv4 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
register: delete_ipv4_allow
|
||||
- name: delete ipv4 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
register: delete_ipv4_allow_idem
|
||||
- name: delete ipv4 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv4_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- delete_ipv4_allow_check is changed
|
||||
- delete_ipv4_allow is changed
|
||||
- delete_ipv4_allow_idem is not changed
|
||||
- delete_ipv4_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: ipv6 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
check_mode: yes
|
||||
register: ipv6_allow_check
|
||||
- name: ipv6 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
register: ipv6_allow
|
||||
- name: ipv6 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
register: ipv6_allow_idem
|
||||
- name: ipv6 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
check_mode: yes
|
||||
register: ipv6_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- ipv6_allow_check is changed
|
||||
- ipv6_allow is changed
|
||||
- ipv6_allow_idem is not changed
|
||||
- ipv6_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: delete ipv6 allow (check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv6_allow_check
|
||||
- name: delete ipv6 allow
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
register: delete_ipv6_allow
|
||||
- name: delete ipv6 allow (idempotency)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
register: delete_ipv6_allow_idem
|
||||
- name: delete ipv6 allow (idempotency, check mode)
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 23
|
||||
to_ip: "::"
|
||||
delete: yes
|
||||
check_mode: yes
|
||||
register: delete_ipv6_allow_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- delete_ipv6_allow_check is changed
|
||||
- delete_ipv6_allow is changed
|
||||
- delete_ipv6_allow_idem is not changed
|
||||
- delete_ipv6_allow_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: Reload ufw
|
||||
ufw:
|
||||
state: reloaded
|
||||
register: reload
|
||||
- name: Reload ufw (check mode)
|
||||
ufw:
|
||||
state: reloaded
|
||||
check_mode: yes
|
||||
register: reload_check
|
||||
- assert:
|
||||
that:
|
||||
- reload is changed
|
||||
- reload_check is changed
|
||||
|
||||
# ############################################
|
||||
- name: Disable (check mode)
|
||||
ufw:
|
||||
state: disabled
|
||||
check_mode: yes
|
||||
register: disable_check
|
||||
- name: Disable
|
||||
ufw:
|
||||
state: disabled
|
||||
register: disable
|
||||
- name: Disable (idempotency)
|
||||
ufw:
|
||||
state: disabled
|
||||
register: disable_idem
|
||||
- name: Disable (idempotency, check mode)
|
||||
ufw:
|
||||
state: disabled
|
||||
check_mode: yes
|
||||
register: disable_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- disable_check is changed
|
||||
- disable is changed
|
||||
- disable_idem is not changed
|
||||
- disable_idem_check is not changed
|
||||
|
||||
# ############################################
|
||||
- name: Re-enable
|
||||
ufw:
|
||||
state: enabled
|
||||
- name: Reset (check mode)
|
||||
ufw:
|
||||
state: reset
|
||||
check_mode: yes
|
||||
register: reset_check
|
||||
- pause:
|
||||
# Should not be needed, but since ufw is ignoring --dry-run for reset
|
||||
# (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
|
||||
seconds: 1
|
||||
- name: Reset
|
||||
ufw:
|
||||
state: reset
|
||||
register: reset
|
||||
- pause:
|
||||
# ufw creates backups of the rule files with a timestamp; if reset is called
|
||||
# twice in a row fast enough (so that both timestamps are taken in the same second),
|
||||
# the second call will notice that the backup files are already there and fail.
|
||||
# Waiting one second fixes this problem.
|
||||
seconds: 1
|
||||
- name: Reset (idempotency)
|
||||
ufw:
|
||||
state: reset
|
||||
register: reset_idem
|
||||
- pause:
|
||||
# Should not be needed, but since ufw is ignoring --dry-run for reset
|
||||
# (https://bugs.launchpad.net/ufw/+bug/1810082) we have to wait here as well.
|
||||
seconds: 1
|
||||
- name: Reset (idempotency, check mode)
|
||||
ufw:
|
||||
state: reset
|
||||
check_mode: yes
|
||||
register: reset_idem_check
|
||||
- assert:
|
||||
that:
|
||||
- reset_check is changed
|
||||
- reset is changed
|
||||
- reset_idem is changed
|
||||
- reset_idem_check is changed
|
|
@ -1,150 +0,0 @@
|
|||
---
|
||||
- name: Enable ufw
|
||||
ufw:
|
||||
state: enabled
|
||||
|
||||
# ############################################
|
||||
- name: Make sure logging is off
|
||||
ufw:
|
||||
logging: no
|
||||
- name: Logging (check mode)
|
||||
ufw:
|
||||
logging: yes
|
||||
check_mode: yes
|
||||
register: logging_check
|
||||
- name: Logging
|
||||
ufw:
|
||||
logging: yes
|
||||
register: logging
|
||||
- name: Get logging
|
||||
shell: |
|
||||
ufw status verbose | grep "^Logging:"
|
||||
register: ufw_logging
|
||||
environment:
|
||||
LC_ALL: C
|
||||
- name: Logging (idempotency)
|
||||
ufw:
|
||||
logging: yes
|
||||
register: logging_idem
|
||||
- name: Logging (idempotency, check mode)
|
||||
ufw:
|
||||
logging: yes
|
||||
check_mode: yes
|
||||
register: logging_idem_check
|
||||
- name: Logging (change, check mode)
|
||||
ufw:
|
||||
logging: full
|
||||
check_mode: yes
|
||||
register: logging_change_check
|
||||
- name: Logging (change)
|
||||
ufw:
|
||||
logging: full
|
||||
register: logging_change
|
||||
- name: Get logging
|
||||
shell: |
|
||||
ufw status verbose | grep "^Logging:"
|
||||
register: ufw_logging_change
|
||||
environment:
|
||||
LC_ALL: C
|
||||
- assert:
|
||||
that:
|
||||
- logging_check is changed
|
||||
- logging is changed
|
||||
- "ufw_logging.stdout == 'Logging: on (low)'"
|
||||
- logging_idem is not changed
|
||||
- logging_idem_check is not changed
|
||||
- "ufw_logging_change.stdout == 'Logging: on (full)'"
|
||||
- logging_change is changed
|
||||
- logging_change_check is changed
|
||||
|
||||
# ############################################
|
||||
- name: Default (check mode)
|
||||
ufw:
|
||||
default: reject
|
||||
direction: incoming
|
||||
check_mode: yes
|
||||
register: default_check
|
||||
- name: Default
|
||||
ufw:
|
||||
default: reject
|
||||
direction: incoming
|
||||
register: default
|
||||
- name: Get defaults
|
||||
shell: |
|
||||
ufw status verbose | grep "^Default:"
|
||||
register: ufw_defaults
|
||||
environment:
|
||||
LC_ALL: C
|
||||
- name: Default (idempotency)
|
||||
ufw:
|
||||
default: reject
|
||||
direction: incoming
|
||||
register: default_idem
|
||||
- name: Default (idempotency, check mode)
|
||||
ufw:
|
||||
default: reject
|
||||
direction: incoming
|
||||
check_mode: yes
|
||||
register: default_idem_check
|
||||
- name: Default (change, check mode)
|
||||
ufw:
|
||||
default: allow
|
||||
direction: incoming
|
||||
check_mode: yes
|
||||
register: default_change_check
|
||||
- name: Default (change)
|
||||
ufw:
|
||||
default: allow
|
||||
direction: incoming
|
||||
register: default_change
|
||||
- name: Get defaults
|
||||
shell: |
|
||||
ufw status verbose | grep "^Default:"
|
||||
register: ufw_defaults_change
|
||||
environment:
|
||||
LC_ALL: C
|
||||
- name: Default (change again)
|
||||
ufw:
|
||||
default: deny
|
||||
direction: incoming
|
||||
register: default_change_2
|
||||
- name: Default (change incoming implicitly, check mode)
|
||||
ufw:
|
||||
default: allow
|
||||
check_mode: yes
|
||||
register: default_change_implicit_check
|
||||
- name: Default (change incoming implicitly)
|
||||
ufw:
|
||||
default: allow
|
||||
register: default_change_implicit
|
||||
- name: Get defaults
|
||||
shell: |
|
||||
ufw status verbose | grep "^Default:"
|
||||
register: ufw_defaults_change_implicit
|
||||
environment:
|
||||
LC_ALL: C
|
||||
- name: Default (change incoming implicitly, idempotent, check mode)
|
||||
ufw:
|
||||
default: allow
|
||||
check_mode: yes
|
||||
register: default_change_implicit_idem_check
|
||||
- name: Default (change incoming implicitly, idempotent)
|
||||
ufw:
|
||||
default: allow
|
||||
register: default_change_implicit_idem
|
||||
- assert:
|
||||
that:
|
||||
- default_check is changed
|
||||
- default is changed
|
||||
- "'reject (incoming)' in ufw_defaults.stdout"
|
||||
- default_idem is not changed
|
||||
- default_idem_check is not changed
|
||||
- default_change_check is changed
|
||||
- default_change is changed
|
||||
- "'allow (incoming)' in ufw_defaults_change.stdout"
|
||||
- default_change_2 is changed
|
||||
- default_change_implicit_check is changed
|
||||
- default_change_implicit is changed
|
||||
- default_change_implicit_idem_check is not changed
|
||||
- default_change_implicit_idem is not changed
|
||||
- "'allow (incoming)' in ufw_defaults_change_implicit.stdout"
|
|
@ -1,80 +0,0 @@
|
|||
---
|
||||
- name: Enable
|
||||
ufw:
|
||||
state: enabled
|
||||
register: enable
|
||||
|
||||
# ## CREATE RULES ############################
|
||||
- name: ipv4
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 22
|
||||
to_ip: 0.0.0.0
|
||||
- name: ipv4
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 23
|
||||
to_ip: 0.0.0.0
|
||||
|
||||
- name: ipv6
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 122
|
||||
to_ip: "::"
|
||||
- name: ipv6
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 123
|
||||
to_ip: "::"
|
||||
|
||||
- name: first-ipv4
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 10
|
||||
to_ip: 0.0.0.0
|
||||
insert: 0
|
||||
insert_relative_to: first-ipv4
|
||||
- name: last-ipv4
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 11
|
||||
to_ip: 0.0.0.0
|
||||
insert: 0
|
||||
insert_relative_to: last-ipv4
|
||||
|
||||
- name: first-ipv6
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 110
|
||||
to_ip: "::"
|
||||
insert: 0
|
||||
insert_relative_to: first-ipv6
|
||||
- name: last-ipv6
|
||||
ufw:
|
||||
rule: deny
|
||||
port: 111
|
||||
to_ip: "::"
|
||||
insert: 0
|
||||
insert_relative_to: last-ipv6
|
||||
|
||||
# ## CHECK RESULT ############################
|
||||
- name: Get rules
|
||||
shell: |
|
||||
ufw status | grep DENY | cut -f 1-2 -d ' ' | grep -E "^(0\.0\.0\.0|::) [123]+"
|
||||
# Note that there was also a rule "ff02::fb mDNS" on at least one CI run;
|
||||
# to ignore these, the extra filtering (grepping for DENY and the regex) makes
|
||||
# sure to remove all rules not added here.
|
||||
register: ufw_status
|
||||
- assert:
|
||||
that:
|
||||
- ufw_status.stdout_lines == expected_stdout
|
||||
vars:
|
||||
expected_stdout:
|
||||
- "0.0.0.0 10"
|
||||
- "0.0.0.0 22"
|
||||
- "0.0.0.0 11"
|
||||
- "0.0.0.0 23"
|
||||
- ":: 110"
|
||||
- ":: 122"
|
||||
- ":: 111"
|
||||
- ":: 123"
|
|
@ -1,81 +0,0 @@
|
|||
- name: Enable
|
||||
ufw:
|
||||
state: enabled
|
||||
|
||||
- name: Route with interface in and out
|
||||
ufw:
|
||||
rule: allow
|
||||
route: yes
|
||||
interface_in: foo
|
||||
interface_out: bar
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
to_ip: 8.8.8.8
|
||||
from_port: 1111
|
||||
to_port: 2222
|
||||
|
||||
- name: Route with interface in
|
||||
ufw:
|
||||
rule: allow
|
||||
route: yes
|
||||
interface_in: foo
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
from_port: 1111
|
||||
|
||||
- name: Route with interface out
|
||||
ufw:
|
||||
rule: allow
|
||||
route: yes
|
||||
interface_out: bar
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
from_port: 1111
|
||||
|
||||
- name: Non-route with interface in
|
||||
ufw:
|
||||
rule: allow
|
||||
interface_in: foo
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
from_port: 3333
|
||||
|
||||
- name: Non-route with interface out
|
||||
ufw:
|
||||
rule: allow
|
||||
interface_out: bar
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
from_port: 4444
|
||||
|
||||
- name: Check result
|
||||
shell: ufw status |grep -E '(ALLOW|DENY|REJECT|LIMIT)' |sed -E 's/[ \t]+/ /g'
|
||||
register: ufw_status
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- '"8.8.8.8 2222/tcp on bar ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
|
||||
- '"Anywhere ALLOW FWD 1.1.1.1 1111/tcp on foo " in stdout'
|
||||
- '"Anywhere on bar ALLOW FWD 1.1.1.1 1111/tcp " in stdout'
|
||||
- '"Anywhere on foo ALLOW 1.1.1.1 3333/tcp " in stdout'
|
||||
- '"Anywhere ALLOW OUT 1.1.1.1 4444/tcp on bar " in stdout'
|
||||
vars:
|
||||
stdout: '{{ ufw_status.stdout_lines }}'
|
||||
|
||||
- name: Non-route with interface_in and interface_out
|
||||
ufw:
|
||||
rule: allow
|
||||
interface_in: foo
|
||||
interface_out: bar
|
||||
proto: tcp
|
||||
from_ip: 1.1.1.1
|
||||
from_port: 1111
|
||||
to_ip: 8.8.8.8
|
||||
to_port: 2222
|
||||
ignore_errors: yes
|
||||
register: ufw_non_route_iface
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- ufw_non_route_iface is failed
|
||||
- '"Only route rules" in ufw_non_route_iface.msg'
|
|
@ -1,3 +0,0 @@
|
|||
cloud/vcenter
|
||||
shippable/vcenter/incidental
|
||||
needs/target/incidental_vmware_prepare_tests
|
|
@ -1,110 +0,0 @@
|
|||
# Test code for the vmware_guest_custom_attributes module.
|
||||
# Copyright: (c) 2018, Abhijeet Kasurde <akasurde@redhat.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
# TODO: Current pinned version of vcsim does not support custom fields
|
||||
# commenting testcase below
|
||||
- import_role:
|
||||
name: incidental_vmware_prepare_tests
|
||||
vars:
|
||||
setup_attach_host: true
|
||||
setup_datastore: true
|
||||
setup_virtualmachines: true
|
||||
- name: Add custom attribute to the given virtual machine
|
||||
vmware_guest_custom_attributes:
|
||||
validate_certs: False
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: "{{ dc1 }}"
|
||||
name: "{{ virtual_machines[0].name }}"
|
||||
folder: "{{ virtual_machines[0].folder }}"
|
||||
state: present
|
||||
attributes:
|
||||
- name: 'sample_1'
|
||||
value: 'sample_1_value'
|
||||
- name: 'sample_2'
|
||||
value: 'sample_2_value'
|
||||
- name: 'sample_3'
|
||||
value: 'sample_3_value'
|
||||
register: guest_info_0001
|
||||
|
||||
- debug: var=guest_info_0001
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- guest_info_0001 is changed
|
||||
|
||||
- name: Add custom attribute to the given virtual machine again
|
||||
vmware_guest_custom_attributes:
|
||||
validate_certs: False
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: "{{ dc1 }}"
|
||||
name: "{{ virtual_machines[0].name }}"
|
||||
folder: "{{ virtual_machines[0].folder }}"
|
||||
state: present
|
||||
attributes:
|
||||
- name: 'sample_1'
|
||||
value: 'sample_1_value'
|
||||
- name: 'sample_2'
|
||||
value: 'sample_2_value'
|
||||
- name: 'sample_3'
|
||||
value: 'sample_3_value'
|
||||
register: guest_info_0002
|
||||
|
||||
- debug: var=guest_info_0002
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not (guest_info_0002 is changed)
|
||||
|
||||
- name: Remove custom attribute to the given virtual machine
|
||||
vmware_guest_custom_attributes:
|
||||
validate_certs: False
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: "{{ dc1 }}"
|
||||
name: "{{ virtual_machines[0].name }}"
|
||||
folder: "{{ virtual_machines[0].folder }}"
|
||||
state: absent
|
||||
attributes:
|
||||
- name: 'sample_1'
|
||||
- name: 'sample_2'
|
||||
- name: 'sample_3'
|
||||
register: guest_info_0004
|
||||
|
||||
- debug: msg="{{ guest_info_0004 }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "guest_info_0004.changed"
|
||||
|
||||
# TODO: vcsim returns duplicate values so removing custom attributes
|
||||
# results in change. vCenter show correct behavior. Commenting this
|
||||
# till this is supported by vcsim.
|
||||
- when: vcsim is not defined
|
||||
block:
|
||||
- name: Remove custom attribute to the given virtual machine again
|
||||
vmware_guest_custom_attributes:
|
||||
validate_certs: False
|
||||
hostname: '{{ vcenter_hostname }}'
|
||||
username: '{{ vcenter_username }}'
|
||||
password: '{{ vcenter_password }}'
|
||||
datacenter: "{{ dc1 }}"
|
||||
name: "{{ virtual_machines[0].name }}"
|
||||
folder: "{{ virtual_machines[0].folder }}"
|
||||
state: absent
|
||||
attributes:
|
||||
- name: 'sample_1'
|
||||
- name: 'sample_2'
|
||||
- name: 'sample_3'
|
||||
register: guest_info_0005
|
||||
|
||||
- debug: var=guest_info_0005
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- not (guest_info_0005 is changed)
|
|
@ -1,2 +0,0 @@
|
|||
shippable/vyos/incidental
|
||||
network/vyos
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
testcase: "*"
|
||||
test_items: []
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- name: collect all cli test cases
|
||||
find:
|
||||
paths: "{{ role_path }}/tests/cli"
|
||||
patterns: "{{ testcase }}.yaml"
|
||||
register: test_cases
|
||||
delegate_to: localhost
|
||||
|
||||
- name: set test_items
|
||||
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
|
||||
|
||||
- name: run test case (connection=ansible.netcommon.network_cli)
|
||||
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
|
||||
with_items: "{{ test_items }}"
|
||||
loop_control:
|
||||
loop_var: test_case_to_run
|
||||
|
||||
- name: run test case (connection=local)
|
||||
include: "{{ test_case_to_run }} ansible_connection=local"
|
||||
with_first_found: "{{ test_items }}"
|
||||
loop_control:
|
||||
loop_var: test_case_to_run
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
- {include: cli.yaml, tags: ['cli']}
|
|
@ -1,126 +0,0 @@
|
|||
---
|
||||
- debug: msg="START cli/basic.yaml on connection={{ ansible_connection }}"
|
||||
|
||||
- name: set-up logging
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: console
|
||||
facility: all
|
||||
level: info
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set system syslog console facility all level info" in result.commands'
|
||||
|
||||
- name: set-up logging again (idempotent)
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: console
|
||||
facility: all
|
||||
level: info
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: file logging
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set system syslog file test facility all level notice" in result.commands'
|
||||
|
||||
- name: file logging again (idempotent)
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: delete logging
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete system syslog file test facility all level notice" in result.commands'
|
||||
|
||||
- name: delete logging again (idempotent)
|
||||
vyos.vyos.vyos_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: Add logging collections
|
||||
vyos.vyos.vyos_logging:
|
||||
aggregate:
|
||||
- {dest: file, name: test1, facility: all, level: info}
|
||||
- {dest: file, name: test2, facility: news, level: debug}
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set system syslog file test1 facility all level info" in result.commands'
|
||||
- '"set system syslog file test2 facility news level debug" in result.commands'
|
||||
|
||||
- name: Add and remove logging collections with overrides
|
||||
vyos.vyos.vyos_logging:
|
||||
aggregate:
|
||||
- {dest: console, facility: all, level: info}
|
||||
- {dest: file, name: test1, facility: all, level: info, state: absent}
|
||||
- {dest: console, facility: daemon, level: warning}
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete system syslog file test1 facility all level info" in result.commands'
|
||||
- '"set system syslog console facility daemon level warning" in result.commands'
|
||||
|
||||
- name: Remove logging collections
|
||||
vyos.vyos.vyos_logging:
|
||||
aggregate:
|
||||
- {dest: console, facility: all, level: info}
|
||||
- {dest: console, facility: daemon, level: warning}
|
||||
- {dest: file, name: test2, facility: news, level: debug}
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete system syslog console facility all level info" in result.commands'
|
||||
- '"delete system syslog console facility daemon level warning" in result.commands'
|
||||
- '"delete system syslog file test2 facility news level debug" in result.commands'
|
|
@ -1,39 +0,0 @@
|
|||
---
|
||||
- debug: msg="START vyos cli/net_logging.yaml on connection={{ ansible_connection }}"
|
||||
|
||||
# Add minimal testcase to check args are passed correctly to
|
||||
# implementation module and module run is successful.
|
||||
|
||||
- name: delete logging - setup
|
||||
ansible.netcommon.net_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- name: file logging using platform agnostic module
|
||||
ansible.netcommon.net_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set system syslog file test facility all level notice" in result.commands'
|
||||
|
||||
- name: delete logging - teardown
|
||||
ansible.netcommon.net_logging:
|
||||
dest: file
|
||||
name: test
|
||||
facility: all
|
||||
level: notice
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- debug: msg="END vyos cli/net_logging.yaml on connection={{ ansible_connection }}"
|
|
@ -1,2 +0,0 @@
|
|||
shippable/vyos/incidental
|
||||
network/vyos
|
|
@ -1,3 +0,0 @@
|
|||
---
|
||||
testcase: "*"
|
||||
test_items: []
|
|
@ -1,22 +0,0 @@
|
|||
---
|
||||
- name: collect all cli test cases
|
||||
find:
|
||||
paths: "{{ role_path }}/tests/cli"
|
||||
patterns: "{{ testcase }}.yaml"
|
||||
register: test_cases
|
||||
delegate_to: localhost
|
||||
|
||||
- name: set test_items
|
||||
set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
|
||||
|
||||
- name: run test case (connection=ansible.netcommon.network_cli)
|
||||
include: "{{ test_case_to_run }} ansible_connection=ansible.netcommon.network_cli"
|
||||
with_items: "{{ test_items }}"
|
||||
loop_control:
|
||||
loop_var: test_case_to_run
|
||||
|
||||
- name: run test case (connection=local)
|
||||
include: "{{ test_case_to_run }} ansible_connection=local"
|
||||
with_first_found: "{{ test_items }}"
|
||||
loop_control:
|
||||
loop_var: test_case_to_run
|
|
@ -1,2 +0,0 @@
|
|||
---
|
||||
- {include: cli.yaml, tags: ['cli']}
|
|
@ -1,120 +0,0 @@
|
|||
---
|
||||
- debug: msg="START cli/basic.yaml on connection={{ ansible_connection }}"
|
||||
|
||||
- name: create static route
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64" in result.commands'
|
||||
|
||||
- name: create static route again (idempotent)
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0
|
||||
mask: 24
|
||||
next_hop: 192.168.42.64
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: modify admin distance of static route
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
admin_distance: 1
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64 distance 1" in result.commands'
|
||||
|
||||
- name: modify admin distance of static route again (idempotent)
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0
|
||||
mask: 24
|
||||
next_hop: 192.168.42.64
|
||||
admin_distance: 1
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: delete static route
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
admin_distance: 1
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete protocols static route 172.24.0.0/24" in result.commands'
|
||||
|
||||
- name: delete static route again (idempotent)
|
||||
vyos.vyos.vyos_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
admin_distance: 1
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == false'
|
||||
|
||||
- name: Add static route collections
|
||||
vyos.vyos.vyos_static_route:
|
||||
aggregate:
|
||||
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
|
||||
- {prefix: 172.24.2.0, mask: 24, next_hop: 192.168.42.64}
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set protocols static route 172.24.1.0/24 next-hop 192.168.42.64" in result.commands'
|
||||
- '"set protocols static route 172.24.2.0/24 next-hop 192.168.42.64" in result.commands'
|
||||
|
||||
- name: Add and remove static route collections with overrides
|
||||
vyos.vyos.vyos_static_route:
|
||||
aggregate:
|
||||
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
|
||||
- {prefix: 172.24.2.0/24, next_hop: 192.168.42.64, state: absent}
|
||||
- {prefix: 172.24.3.0/24, next_hop: 192.168.42.64}
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete protocols static route 172.24.2.0/24" in result.commands'
|
||||
- '"set protocols static route 172.24.3.0/24 next-hop 192.168.42.64" in result.commands'
|
||||
|
||||
- name: Remove static route collections
|
||||
vyos.vyos.vyos_static_route:
|
||||
aggregate:
|
||||
- {prefix: 172.24.1.0/24, next_hop: 192.168.42.64}
|
||||
- {prefix: 172.24.3.0/24, next_hop: 192.168.42.64}
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"delete protocols static route 172.24.1.0/24" in result.commands'
|
||||
- '"delete protocols static route 172.24.3.0/24" in result.commands'
|
|
@ -1,33 +0,0 @@
|
|||
---
|
||||
- debug: msg="START vyos cli/net_static_route.yaml on connection={{ ansible_connection }}"
|
||||
|
||||
# Add minimal testcase to check args are passed correctly to
|
||||
# implementation module and module run is successful.
|
||||
|
||||
- name: delete static route - setup
|
||||
ansible.netcommon.net_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- name: create static route using platform agnostic module
|
||||
ansible.netcommon.net_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
state: present
|
||||
register: result
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- 'result.changed == true'
|
||||
- '"set protocols static route 172.24.0.0/24 next-hop 192.168.42.64" in result.commands'
|
||||
|
||||
- name: delete static route - teardown
|
||||
ansible.netcommon.net_static_route:
|
||||
prefix: 172.24.0.0/24
|
||||
next_hop: 192.168.42.64
|
||||
state: absent
|
||||
register: result
|
||||
|
||||
- debug: msg="END vyos cli/net_static_route.yaml on connection={{ ansible_connection }}"
|
|
@ -1,2 +0,0 @@
|
|||
shippable/windows/incidental
|
||||
windows
|
|
@ -1,13 +0,0 @@
|
|||
---
|
||||
test_win_hosts_cname: testhost
|
||||
test_win_hosts_ip: 192.168.168.1
|
||||
|
||||
test_win_hosts_aliases_set:
|
||||
- alias1
|
||||
- alias2
|
||||
- alias3
|
||||
- alias4
|
||||
|
||||
test_win_hosts_aliases_remove:
|
||||
- alias3
|
||||
- alias4
|
|
@ -1,2 +0,0 @@
|
|||
dependencies:
|
||||
- setup_remote_tmp_dir
|
|
@ -1,17 +0,0 @@
|
|||
---
|
||||
- name: take a copy of the original hosts file
|
||||
win_copy:
|
||||
src: C:\Windows\System32\drivers\etc\hosts
|
||||
dest: '{{ remote_tmp_dir }}\hosts'
|
||||
remote_src: yes
|
||||
|
||||
- block:
|
||||
- name: run tests
|
||||
include_tasks: tests.yml
|
||||
|
||||
always:
|
||||
- name: restore hosts file
|
||||
win_copy:
|
||||
src: '{{ remote_tmp_dir }}\hosts'
|
||||
dest: C:\Windows\System32\drivers\etc\hosts
|
||||
remote_src: yes
|
|
@ -1,189 +0,0 @@
|
|||
---
|
||||
|
||||
- name: add a simple host with address
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
register: add_ip
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "add_ip.changed == true"
|
||||
|
||||
- name: get actual dns result
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ test_win_hosts_cname }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: add_ip_actual
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "add_ip_actual.stdout_lines[0]|lower == 'true'"
|
||||
|
||||
- name: add a simple host with ipv4 address (idempotent)
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
register: add_ip
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "add_ip.changed == false"
|
||||
|
||||
- name: remove simple host
|
||||
win_hosts:
|
||||
state: absent
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
register: remove_ip
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "remove_ip.changed == true"
|
||||
|
||||
- name: get actual dns result
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ test_win_hosts_cname}}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: remove_ip_actual
|
||||
failed_when: "remove_ip_actual.rc == 0"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "remove_ip_actual.stdout_lines[0]|lower == 'false'"
|
||||
|
||||
- name: remove simple host (idempotent)
|
||||
win_hosts:
|
||||
state: absent
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
register: remove_ip
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "remove_ip.changed == false"
|
||||
|
||||
- name: add host and set aliases
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_set | union(test_win_hosts_aliases_remove) }}"
|
||||
action: set
|
||||
register: set_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "set_aliases.changed == true"
|
||||
|
||||
- name: get actual dns result for host
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ test_win_hosts_cname }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: set_aliases_actual_host
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "set_aliases_actual_host.stdout_lines[0]|lower == 'true'"
|
||||
|
||||
- name: get actual dns results for aliases
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ item }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: set_aliases_actual
|
||||
with_items: "{{ test_win_hosts_aliases_set | union(test_win_hosts_aliases_remove) }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "item.stdout_lines[0]|lower == 'true'"
|
||||
with_items: "{{ set_aliases_actual.results }}"
|
||||
|
||||
- name: add host and set aliases (idempotent)
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_set | union(test_win_hosts_aliases_remove) }}"
|
||||
action: set
|
||||
register: set_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "set_aliases.changed == false"
|
||||
|
||||
- name: remove aliases from the list
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_remove }}"
|
||||
action: remove
|
||||
register: remove_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "remove_aliases.changed == true"
|
||||
|
||||
- name: get actual dns result for removed aliases
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ item }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: remove_aliases_removed_actual
|
||||
failed_when: "remove_aliases_removed_actual.rc == 0"
|
||||
with_items: "{{ test_win_hosts_aliases_remove }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "item.stdout_lines[0]|lower == 'false'"
|
||||
with_items: "{{ remove_aliases_removed_actual.results }}"
|
||||
|
||||
- name: get actual dns result for remaining aliases
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ item }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: remove_aliases_remain_actual
|
||||
with_items: "{{ test_win_hosts_aliases_set | difference(test_win_hosts_aliases_remove) }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "item.stdout_lines[0]|lower == 'true'"
|
||||
with_items: "{{ remove_aliases_remain_actual.results }}"
|
||||
|
||||
- name: remove aliases from the list (idempotent)
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_remove }}"
|
||||
action: remove
|
||||
register: remove_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "remove_aliases.changed == false"
|
||||
|
||||
- name: add aliases back
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_remove }}"
|
||||
action: add
|
||||
register: add_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "add_aliases.changed == true"
|
||||
|
||||
- name: get actual dns results for aliases
|
||||
win_shell: "try{ [array]$t = [Net.DNS]::GetHostEntry('{{ item }}') } catch { return 'false' } if ($t[0].HostName -eq '{{ test_win_hosts_cname }}' -and $t[0].AddressList[0].toString() -eq '{{ test_win_hosts_ip }}'){ return 'true' } else { return 'false' }"
|
||||
register: add_aliases_actual
|
||||
with_items: "{{ test_win_hosts_aliases_set | union(test_win_hosts_aliases_remove) }}"
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "item.stdout_lines[0]|lower == 'true'"
|
||||
with_items: "{{ add_aliases_actual.results }}"
|
||||
|
||||
- name: add aliases back (idempotent)
|
||||
win_hosts:
|
||||
state: present
|
||||
ip_address: "{{ test_win_hosts_ip }}"
|
||||
canonical_name: "{{ test_win_hosts_cname }}"
|
||||
aliases: "{{ test_win_hosts_aliases_remove }}"
|
||||
action: add
|
||||
register: add_aliases
|
||||
|
||||
- assert:
|
||||
that:
|
||||
- "add_aliases.changed == false"
|
|
@ -339,8 +339,6 @@ test/support/integration/plugins/module_utils/k8s/common.py metaclass-boilerplat
|
|||
test/support/integration/plugins/module_utils/k8s/raw.py metaclass-boilerplate
|
||||
test/support/integration/plugins/module_utils/mysql.py future-import-boilerplate
|
||||
test/support/integration/plugins/module_utils/mysql.py metaclass-boilerplate
|
||||
test/support/integration/plugins/module_utils/net_tools/nios/api.py future-import-boilerplate
|
||||
test/support/integration/plugins/module_utils/net_tools/nios/api.py metaclass-boilerplate
|
||||
test/support/integration/plugins/module_utils/network/common/utils.py future-import-boilerplate
|
||||
test/support/integration/plugins/module_utils/network/common/utils.py metaclass-boilerplate
|
||||
test/support/integration/plugins/module_utils/postgres.py future-import-boilerplate
|
||||
|
@ -395,10 +393,6 @@ test/support/network-integration/collections/ansible_collections/vyos/vyos/plugi
|
|||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_config.py metaclass-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py future-import-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_facts.py metaclass-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_logging.py future-import-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_logging.py metaclass-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_static_route.py future-import-boilerplate
|
||||
test/support/network-integration/collections/ansible_collections/vyos/vyos/plugins/modules/vyos_static_route.py metaclass-boilerplate
|
||||
test/support/windows-integration/plugins/modules/async_status.ps1 pslint!skip
|
||||
test/support/windows-integration/plugins/modules/setup.ps1 pslint!skip
|
||||
test/support/windows-integration/plugins/modules/win_copy.ps1 pslint!skip
|
||||
|
|
|
@ -1,208 +0,0 @@
|
|||
# Based on local.py (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
||||
#
|
||||
# (c) 2013, Maykel Moya <mmoya@speedyrails.com>
|
||||
# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
|
||||
# Copyright (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
author: Maykel Moya <mmoya@speedyrails.com>
|
||||
connection: chroot
|
||||
short_description: Interact with local chroot
|
||||
description:
|
||||
- Run commands or put/fetch files to an existing chroot on the Ansible controller.
|
||||
version_added: "1.1"
|
||||
options:
|
||||
remote_addr:
|
||||
description:
|
||||
- The path of the chroot you want to access.
|
||||
default: inventory_hostname
|
||||
vars:
|
||||
- name: ansible_host
|
||||
executable:
|
||||
description:
|
||||
- User specified executable shell
|
||||
ini:
|
||||
- section: defaults
|
||||
key: executable
|
||||
env:
|
||||
- name: ANSIBLE_EXECUTABLE
|
||||
vars:
|
||||
- name: ansible_executable
|
||||
default: /bin/sh
|
||||
chroot_exe:
|
||||
version_added: '2.8'
|
||||
description:
|
||||
- User specified chroot binary
|
||||
ini:
|
||||
- section: chroot_connection
|
||||
key: exe
|
||||
env:
|
||||
- name: ANSIBLE_CHROOT_EXE
|
||||
vars:
|
||||
- name: ansible_chroot_exe
|
||||
default: chroot
|
||||
"""
|
||||
|
||||
import os
|
||||
import os.path
|
||||
import subprocess
|
||||
import traceback
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.basic import is_executable
|
||||
from ansible.module_utils.common.process import get_bin_path
|
||||
from ansible.module_utils.six.moves import shlex_quote
|
||||
from ansible.module_utils._text import to_bytes, to_native
|
||||
from ansible.plugins.connection import ConnectionBase, BUFSIZE
|
||||
from ansible.utils.display import Display
|
||||
|
||||
display = Display()
|
||||
|
||||
|
||||
class Connection(ConnectionBase):
|
||||
''' Local chroot based connections '''
|
||||
|
||||
transport = 'chroot'
|
||||
has_pipelining = True
|
||||
# su currently has an undiagnosed issue with calculating the file
|
||||
# checksums (so copy, for instance, doesn't work right)
|
||||
# Have to look into that before re-enabling this
|
||||
has_tty = False
|
||||
|
||||
default_user = 'root'
|
||||
|
||||
def __init__(self, play_context, new_stdin, *args, **kwargs):
|
||||
super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
|
||||
|
||||
self.chroot = self._play_context.remote_addr
|
||||
|
||||
if os.geteuid() != 0:
|
||||
raise AnsibleError("chroot connection requires running as root")
|
||||
|
||||
# we're running as root on the local system so do some
|
||||
# trivial checks for ensuring 'host' is actually a chroot'able dir
|
||||
if not os.path.isdir(self.chroot):
|
||||
raise AnsibleError("%s is not a directory" % self.chroot)
|
||||
|
||||
chrootsh = os.path.join(self.chroot, 'bin/sh')
|
||||
# Want to check for a usable bourne shell inside the chroot.
|
||||
# is_executable() == True is sufficient. For symlinks it
|
||||
# gets really complicated really fast. So we punt on finding that
|
||||
# out. As long as it's a symlink we assume that it will work
|
||||
if not (is_executable(chrootsh) or (os.path.lexists(chrootsh) and os.path.islink(chrootsh))):
|
||||
raise AnsibleError("%s does not look like a chrootable dir (/bin/sh missing)" % self.chroot)
|
||||
|
||||
def _connect(self):
|
||||
''' connect to the chroot '''
|
||||
if os.path.isabs(self.get_option('chroot_exe')):
|
||||
self.chroot_cmd = self.get_option('chroot_exe')
|
||||
else:
|
||||
try:
|
||||
self.chroot_cmd = get_bin_path(self.get_option('chroot_exe'))
|
||||
except ValueError as e:
|
||||
raise AnsibleError(to_native(e))
|
||||
|
||||
super(Connection, self)._connect()
|
||||
if not self._connected:
|
||||
display.vvv("THIS IS A LOCAL CHROOT DIR", host=self.chroot)
|
||||
self._connected = True
|
||||
|
||||
def _buffered_exec_command(self, cmd, stdin=subprocess.PIPE):
|
||||
''' run a command on the chroot. This is only needed for implementing
|
||||
put_file() get_file() so that we don't have to read the whole file
|
||||
into memory.
|
||||
|
||||
compared to exec_command() it looses some niceties like being able to
|
||||
return the process's exit code immediately.
|
||||
'''
|
||||
executable = self.get_option('executable')
|
||||
local_cmd = [self.chroot_cmd, self.chroot, executable, '-c', cmd]
|
||||
|
||||
display.vvv("EXEC %s" % (local_cmd), host=self.chroot)
|
||||
local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
|
||||
p = subprocess.Popen(local_cmd, shell=False, stdin=stdin,
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
return p
|
||||
|
||||
def exec_command(self, cmd, in_data=None, sudoable=False):
|
||||
''' run a command on the chroot '''
|
||||
super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
|
||||
|
||||
p = self._buffered_exec_command(cmd)
|
||||
|
||||
stdout, stderr = p.communicate(in_data)
|
||||
return (p.returncode, stdout, stderr)
|
||||
|
||||
def _prefix_login_path(self, remote_path):
|
||||
''' Make sure that we put files into a standard path
|
||||
|
||||
If a path is relative, then we need to choose where to put it.
|
||||
ssh chooses $HOME but we aren't guaranteed that a home dir will
|
||||
exist in any given chroot. So for now we're choosing "/" instead.
|
||||
This also happens to be the former default.
|
||||
|
||||
Can revisit using $HOME instead if it's a problem
|
||||
'''
|
||||
if not remote_path.startswith(os.path.sep):
|
||||
remote_path = os.path.join(os.path.sep, remote_path)
|
||||
return os.path.normpath(remote_path)
|
||||
|
||||
def put_file(self, in_path, out_path):
|
||||
''' transfer a file from local to chroot '''
|
||||
super(Connection, self).put_file(in_path, out_path)
|
||||
display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
out_path = shlex_quote(self._prefix_login_path(out_path))
|
||||
try:
|
||||
with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
|
||||
if not os.fstat(in_file.fileno()).st_size:
|
||||
count = ' count=0'
|
||||
else:
|
||||
count = ''
|
||||
try:
|
||||
p = self._buffered_exec_command('dd of=%s bs=%s%s' % (out_path, BUFSIZE, count), stdin=in_file)
|
||||
except OSError:
|
||||
raise AnsibleError("chroot connection requires dd command in the chroot")
|
||||
try:
|
||||
stdout, stderr = p.communicate()
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
except IOError:
|
||||
raise AnsibleError("file or module does not exist at: %s" % in_path)
|
||||
|
||||
def fetch_file(self, in_path, out_path):
|
||||
''' fetch a file from chroot to local '''
|
||||
super(Connection, self).fetch_file(in_path, out_path)
|
||||
display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.chroot)
|
||||
|
||||
in_path = shlex_quote(self._prefix_login_path(in_path))
|
||||
try:
|
||||
p = self._buffered_exec_command('dd if=%s bs=%s' % (in_path, BUFSIZE))
|
||||
except OSError:
|
||||
raise AnsibleError("chroot connection requires dd command in the chroot")
|
||||
|
||||
with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb+') as out_file:
|
||||
try:
|
||||
chunk = p.stdout.read(BUFSIZE)
|
||||
while chunk:
|
||||
out_file.write(chunk)
|
||||
chunk = p.stdout.read(BUFSIZE)
|
||||
except Exception:
|
||||
traceback.print_exc()
|
||||
raise AnsibleError("failed to transfer file %s to %s" % (in_path, out_path))
|
||||
stdout, stderr = p.communicate()
|
||||
if p.returncode != 0:
|
||||
raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
|
||||
|
||||
def close(self):
|
||||
''' terminate the connection; nothing to do here '''
|
||||
super(Connection, self).close()
|
||||
self._connected = False
|
|
@ -1,302 +0,0 @@
|
|||
# (c) 2015, Jonathan Davila <jonathan(at)davila.io>
|
||||
# (c) 2017 Ansible Project
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
DOCUMENTATION = """
|
||||
lookup: hashi_vault
|
||||
author: Jonathan Davila <jdavila(at)ansible.com>
|
||||
version_added: "2.0"
|
||||
short_description: retrieve secrets from HashiCorp's vault
|
||||
requirements:
|
||||
- hvac (python library)
|
||||
description:
|
||||
- retrieve secrets from HashiCorp's vault
|
||||
notes:
|
||||
- Due to a current limitation in the HVAC library there won't necessarily be an error if a bad endpoint is specified.
|
||||
- As of Ansible 2.10, only the latest secret is returned when specifying a KV v2 path.
|
||||
options:
|
||||
secret:
|
||||
description: query you are making.
|
||||
required: True
|
||||
token:
|
||||
description: vault token.
|
||||
env:
|
||||
- name: VAULT_TOKEN
|
||||
url:
|
||||
description: URL to vault service.
|
||||
env:
|
||||
- name: VAULT_ADDR
|
||||
default: 'http://127.0.0.1:8200'
|
||||
username:
|
||||
description: Authentication user name.
|
||||
password:
|
||||
description: Authentication password.
|
||||
role_id:
|
||||
description: Role id for a vault AppRole auth.
|
||||
env:
|
||||
- name: VAULT_ROLE_ID
|
||||
secret_id:
|
||||
description: Secret id for a vault AppRole auth.
|
||||
env:
|
||||
- name: VAULT_SECRET_ID
|
||||
auth_method:
|
||||
description:
|
||||
- Authentication method to be used.
|
||||
- C(userpass) is added in version 2.8.
|
||||
env:
|
||||
- name: VAULT_AUTH_METHOD
|
||||
choices:
|
||||
- userpass
|
||||
- ldap
|
||||
- approle
|
||||
mount_point:
|
||||
description: vault mount point, only required if you have a custom mount point.
|
||||
default: ldap
|
||||
ca_cert:
|
||||
description: path to certificate to use for authentication.
|
||||
aliases: [ cacert ]
|
||||
validate_certs:
|
||||
description: controls verification and validation of SSL certificates, mostly you only want to turn off with self signed ones.
|
||||
type: boolean
|
||||
default: True
|
||||
namespace:
|
||||
version_added: "2.8"
|
||||
description: namespace where secrets reside. requires HVAC 0.7.0+ and Vault 0.11+.
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
|
||||
|
||||
- name: Return all secrets from a path
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}}"
|
||||
|
||||
- name: Vault that requires authentication via LDAP
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=ldap mount_point=ldap username=myuser password=mypas url=http://myvault:8200')}}"
|
||||
|
||||
- name: Vault that requires authentication via username and password
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=userpass username=myuser password=mypas url=http://myvault:8200')}}"
|
||||
|
||||
- name: Using an ssl vault
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hola:value token=c975b780-d1be-8016-866b-01d0f9b688a5 url=https://myvault:8200 validate_certs=False')}}"
|
||||
|
||||
- name: using certificate auth
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hi:value token=xxxx-xxx-xxx url=https://myvault:8200 validate_certs=True cacert=/cacert/path/ca.pem')}}"
|
||||
|
||||
- name: authenticate with a Vault app role
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello:value auth_method=approle role_id=myroleid secret_id=mysecretid url=http://myvault:8200')}}"
|
||||
|
||||
- name: Return all secrets from a path in a namespace
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200 namespace=teama/admins')}}"
|
||||
|
||||
# When using KV v2 the PATH should include "data" between the secret engine mount and path (e.g. "secret/data/:path")
|
||||
# see: https://www.vaultproject.io/api/secret/kv/kv-v2.html#read-secret-version
|
||||
- name: Return latest KV v2 secret from path
|
||||
debug:
|
||||
msg: "{{ lookup('hashi_vault', 'secret=secret/data/hello token=my_vault_token url=http://myvault_url:8200') }}"
|
||||
|
||||
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
_raw:
|
||||
description:
|
||||
- secrets(s) requested
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
from ansible.errors import AnsibleError
|
||||
from ansible.module_utils.parsing.convert_bool import boolean
|
||||
from ansible.plugins.lookup import LookupBase
|
||||
|
||||
HAS_HVAC = False
|
||||
try:
|
||||
import hvac
|
||||
HAS_HVAC = True
|
||||
except ImportError:
|
||||
HAS_HVAC = False
|
||||
|
||||
|
||||
ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200'
|
||||
|
||||
if os.getenv('VAULT_ADDR') is not None:
|
||||
ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR']
|
||||
|
||||
|
||||
class HashiVault:
|
||||
def __init__(self, **kwargs):
|
||||
|
||||
self.url = kwargs.get('url', ANSIBLE_HASHI_VAULT_ADDR)
|
||||
self.namespace = kwargs.get('namespace', None)
|
||||
self.avail_auth_method = ['approle', 'userpass', 'ldap']
|
||||
|
||||
# split secret arg, which has format 'secret/hello:value' into secret='secret/hello' and secret_field='value'
|
||||
s = kwargs.get('secret')
|
||||
if s is None:
|
||||
raise AnsibleError("No secret specified for hashi_vault lookup")
|
||||
|
||||
s_f = s.rsplit(':', 1)
|
||||
self.secret = s_f[0]
|
||||
if len(s_f) >= 2:
|
||||
self.secret_field = s_f[1]
|
||||
else:
|
||||
self.secret_field = ''
|
||||
|
||||
self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
|
||||
|
||||
# If a particular backend is asked for (and its method exists) we call it, otherwise drop through to using
|
||||
# token auth. This means if a particular auth backend is requested and a token is also given, then we
|
||||
# ignore the token and attempt authentication against the specified backend.
|
||||
#
|
||||
# to enable a new auth backend, simply add a new 'def auth_<type>' method below.
|
||||
#
|
||||
self.auth_method = kwargs.get('auth_method', os.environ.get('VAULT_AUTH_METHOD'))
|
||||
self.verify = self.boolean_or_cacert(kwargs.get('validate_certs', True), kwargs.get('cacert', ''))
|
||||
if self.auth_method and self.auth_method != 'token':
|
||||
try:
|
||||
if self.namespace is not None:
|
||||
self.client = hvac.Client(url=self.url, verify=self.verify, namespace=self.namespace)
|
||||
else:
|
||||
self.client = hvac.Client(url=self.url, verify=self.verify)
|
||||
# prefixing with auth_ to limit which methods can be accessed
|
||||
getattr(self, 'auth_' + self.auth_method)(**kwargs)
|
||||
except AttributeError:
|
||||
raise AnsibleError("Authentication method '%s' not supported."
|
||||
" Available options are %r" % (self.auth_method, self.avail_auth_method))
|
||||
else:
|
||||
self.token = kwargs.get('token', os.environ.get('VAULT_TOKEN', None))
|
||||
if self.token is None and os.environ.get('HOME'):
|
||||
token_filename = os.path.join(
|
||||
os.environ.get('HOME'),
|
||||
'.vault-token'
|
||||
)
|
||||
if os.path.exists(token_filename):
|
||||
with open(token_filename) as token_file:
|
||||
self.token = token_file.read().strip()
|
||||
|
||||
if self.token is None:
|
||||
raise AnsibleError("No Vault Token specified")
|
||||
|
||||
if self.namespace is not None:
|
||||
self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify, namespace=self.namespace)
|
||||
else:
|
||||
self.client = hvac.Client(url=self.url, token=self.token, verify=self.verify)
|
||||
|
||||
if not self.client.is_authenticated():
|
||||
raise AnsibleError("Invalid Hashicorp Vault Token Specified for hashi_vault lookup")
|
||||
|
||||
def get(self):
|
||||
data = self.client.read(self.secret)
|
||||
|
||||
# Check response for KV v2 fields and flatten nested secret data.
|
||||
#
|
||||
# https://vaultproject.io/api/secret/kv/kv-v2.html#sample-response-1
|
||||
try:
|
||||
# sentinel field checks
|
||||
check_dd = data['data']['data']
|
||||
check_md = data['data']['metadata']
|
||||
# unwrap nested data
|
||||
data = data['data']
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
if data is None:
|
||||
raise AnsibleError("The secret %s doesn't seem to exist for hashi_vault lookup" % self.secret)
|
||||
|
||||
if self.secret_field == '':
|
||||
return data['data']
|
||||
|
||||
if self.secret_field not in data['data']:
|
||||
raise AnsibleError("The secret %s does not contain the field '%s'. for hashi_vault lookup" % (self.secret, self.secret_field))
|
||||
|
||||
return data['data'][self.secret_field]
|
||||
|
||||
def check_params(self, **kwargs):
|
||||
username = kwargs.get('username')
|
||||
if username is None:
|
||||
raise AnsibleError("Authentication method %s requires a username" % self.auth_method)
|
||||
|
||||
password = kwargs.get('password')
|
||||
if password is None:
|
||||
raise AnsibleError("Authentication method %s requires a password" % self.auth_method)
|
||||
|
||||
mount_point = kwargs.get('mount_point')
|
||||
|
||||
return username, password, mount_point
|
||||
|
||||
def auth_userpass(self, **kwargs):
|
||||
username, password, mount_point = self.check_params(**kwargs)
|
||||
if mount_point is None:
|
||||
mount_point = 'userpass'
|
||||
|
||||
self.client.auth_userpass(username, password, mount_point=mount_point)
|
||||
|
||||
def auth_ldap(self, **kwargs):
|
||||
username, password, mount_point = self.check_params(**kwargs)
|
||||
if mount_point is None:
|
||||
mount_point = 'ldap'
|
||||
|
||||
self.client.auth.ldap.login(username, password, mount_point=mount_point)
|
||||
|
||||
def boolean_or_cacert(self, validate_certs, cacert):
|
||||
validate_certs = boolean(validate_certs, strict=False)
|
||||
'''' return a bool or cacert '''
|
||||
if validate_certs is True:
|
||||
if cacert != '':
|
||||
return cacert
|
||||
else:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def auth_approle(self, **kwargs):
|
||||
role_id = kwargs.get('role_id', os.environ.get('VAULT_ROLE_ID', None))
|
||||
if role_id is None:
|
||||
raise AnsibleError("Authentication method app role requires a role_id")
|
||||
|
||||
secret_id = kwargs.get('secret_id', os.environ.get('VAULT_SECRET_ID', None))
|
||||
if secret_id is None:
|
||||
raise AnsibleError("Authentication method app role requires a secret_id")
|
||||
|
||||
self.client.auth_approle(role_id, secret_id)
|
||||
|
||||
|
||||
class LookupModule(LookupBase):
|
||||
def run(self, terms, variables=None, **kwargs):
|
||||
if not HAS_HVAC:
|
||||
raise AnsibleError("Please pip install hvac to use the hashi_vault lookup module.")
|
||||
|
||||
vault_args = terms[0].split()
|
||||
vault_dict = {}
|
||||
ret = []
|
||||
|
||||
for param in vault_args:
|
||||
try:
|
||||
key, value = param.split('=')
|
||||
except ValueError:
|
||||
raise AnsibleError("hashi_vault lookup plugin needs key=value pairs, but received %s" % terms)
|
||||
vault_dict[key] = value
|
||||
|
||||
if 'ca_cert' in vault_dict.keys():
|
||||
vault_dict['cacert'] = vault_dict['ca_cert']
|
||||
vault_dict.pop('ca_cert', None)
|
||||
|
||||
vault_conn = HashiVault(**vault_dict)
|
||||
|
||||
for term in terms:
|
||||
key = term.split()[0]
|
||||
value = vault_conn.get()
|
||||
ret.append(value)
|
||||
|
||||
return ret
|
|
@ -1,63 +0,0 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
|
||||
|
||||
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
from ansible.module_utils.ansible_release import __version__
|
||||
from ansible.module_utils.basic import env_fallback, missing_required_lib
|
||||
|
||||
try:
|
||||
import hcloud
|
||||
|
||||
HAS_HCLOUD = True
|
||||
except ImportError:
|
||||
HAS_HCLOUD = False
|
||||
|
||||
|
||||
class Hcloud(object):
|
||||
def __init__(self, module, represent):
|
||||
self.module = module
|
||||
self.represent = represent
|
||||
self.result = {"changed": False, self.represent: None}
|
||||
if not HAS_HCLOUD:
|
||||
module.fail_json(msg=missing_required_lib("hcloud-python"))
|
||||
self._build_client()
|
||||
|
||||
def _build_client(self):
|
||||
self.client = hcloud.Client(
|
||||
token=self.module.params["api_token"],
|
||||
api_endpoint=self.module.params["endpoint"],
|
||||
application_name="ansible-module",
|
||||
application_version=__version__,
|
||||
)
|
||||
|
||||
def _mark_as_changed(self):
|
||||
self.result["changed"] = True
|
||||
|
||||
@staticmethod
|
||||
def base_module_arguments():
|
||||
return {
|
||||
"api_token": {
|
||||
"type": "str",
|
||||
"required": True,
|
||||
"fallback": (env_fallback, ["HCLOUD_TOKEN"]),
|
||||
"no_log": True,
|
||||
},
|
||||
"endpoint": {"type": "str", "default": "https://api.hetzner.cloud/v1"},
|
||||
}
|
||||
|
||||
def _prepare_result(self):
|
||||
"""Prepare the result for every module
|
||||
|
||||
:return: dict
|
||||
"""
|
||||
return {}
|
||||
|
||||
def get_result(self):
|
||||
if getattr(self, self.represent) is not None:
|
||||
self.result[self.represent] = self._prepare_result()
|
||||
return self.result
|
|
@ -1,601 +0,0 @@
|
|||
# This code is part of Ansible, but is an independent component.
|
||||
# This particular file snippet, and this file snippet only, is BSD licensed.
|
||||
# Modules you write using this snippet, which is embedded dynamically by Ansible
|
||||
# still belong to the author of the module, and may assign their own license
|
||||
# to the complete work.
|
||||
#
|
||||
# (c) 2018 Red Hat Inc.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without modification,
|
||||
# are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
||||
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
||||
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
|
||||
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
|
||||
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
#
|
||||
|
||||
import os
|
||||
from functools import partial
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils._text import to_text
|
||||
from ansible.module_utils.basic import env_fallback
|
||||
|
||||
try:
|
||||
from infoblox_client.connector import Connector
|
||||
from infoblox_client.exceptions import InfobloxException
|
||||
HAS_INFOBLOX_CLIENT = True
|
||||
except ImportError:
|
||||
HAS_INFOBLOX_CLIENT = False
|
||||
|
||||
# defining nios constants
|
||||
NIOS_DNS_VIEW = 'view'
|
||||
NIOS_NETWORK_VIEW = 'networkview'
|
||||
NIOS_HOST_RECORD = 'record:host'
|
||||
NIOS_IPV4_NETWORK = 'network'
|
||||
NIOS_IPV6_NETWORK = 'ipv6network'
|
||||
NIOS_ZONE = 'zone_auth'
|
||||
NIOS_PTR_RECORD = 'record:ptr'
|
||||
NIOS_A_RECORD = 'record:a'
|
||||
NIOS_AAAA_RECORD = 'record:aaaa'
|
||||
NIOS_CNAME_RECORD = 'record:cname'
|
||||
NIOS_MX_RECORD = 'record:mx'
|
||||
NIOS_SRV_RECORD = 'record:srv'
|
||||
NIOS_NAPTR_RECORD = 'record:naptr'
|
||||
NIOS_TXT_RECORD = 'record:txt'
|
||||
NIOS_NSGROUP = 'nsgroup'
|
||||
NIOS_IPV4_FIXED_ADDRESS = 'fixedaddress'
|
||||
NIOS_IPV6_FIXED_ADDRESS = 'ipv6fixedaddress'
|
||||
NIOS_NEXT_AVAILABLE_IP = 'func:nextavailableip'
|
||||
NIOS_IPV4_NETWORK_CONTAINER = 'networkcontainer'
|
||||
NIOS_IPV6_NETWORK_CONTAINER = 'ipv6networkcontainer'
|
||||
NIOS_MEMBER = 'member'
|
||||
|
||||
NIOS_PROVIDER_SPEC = {
|
||||
'host': dict(fallback=(env_fallback, ['INFOBLOX_HOST'])),
|
||||
'username': dict(fallback=(env_fallback, ['INFOBLOX_USERNAME'])),
|
||||
'password': dict(fallback=(env_fallback, ['INFOBLOX_PASSWORD']), no_log=True),
|
||||
'validate_certs': dict(type='bool', default=False, fallback=(env_fallback, ['INFOBLOX_SSL_VERIFY']), aliases=['ssl_verify']),
|
||||
'silent_ssl_warnings': dict(type='bool', default=True),
|
||||
'http_request_timeout': dict(type='int', default=10, fallback=(env_fallback, ['INFOBLOX_HTTP_REQUEST_TIMEOUT'])),
|
||||
'http_pool_connections': dict(type='int', default=10),
|
||||
'http_pool_maxsize': dict(type='int', default=10),
|
||||
'max_retries': dict(type='int', default=3, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES'])),
|
||||
'wapi_version': dict(default='2.1', fallback=(env_fallback, ['INFOBLOX_WAP_VERSION'])),
|
||||
'max_results': dict(type='int', default=1000, fallback=(env_fallback, ['INFOBLOX_MAX_RETRIES']))
|
||||
}
|
||||
|
||||
|
||||
def get_connector(*args, **kwargs):
|
||||
''' Returns an instance of infoblox_client.connector.Connector
|
||||
:params args: positional arguments are silently ignored
|
||||
:params kwargs: dict that is passed to Connector init
|
||||
:returns: Connector
|
||||
'''
|
||||
if not HAS_INFOBLOX_CLIENT:
|
||||
raise Exception('infoblox-client is required but does not appear '
|
||||
'to be installed. It can be installed using the '
|
||||
'command `pip install infoblox-client`')
|
||||
|
||||
if not set(kwargs.keys()).issubset(list(NIOS_PROVIDER_SPEC.keys()) + ['ssl_verify']):
|
||||
raise Exception('invalid or unsupported keyword argument for connector')
|
||||
for key, value in iteritems(NIOS_PROVIDER_SPEC):
|
||||
if key not in kwargs:
|
||||
# apply default values from NIOS_PROVIDER_SPEC since we cannot just
|
||||
# assume the provider values are coming from AnsibleModule
|
||||
if 'default' in value:
|
||||
kwargs[key] = value['default']
|
||||
|
||||
# override any values with env variables unless they were
|
||||
# explicitly set
|
||||
env = ('INFOBLOX_%s' % key).upper()
|
||||
if env in os.environ:
|
||||
kwargs[key] = os.environ.get(env)
|
||||
|
||||
if 'validate_certs' in kwargs.keys():
|
||||
kwargs['ssl_verify'] = kwargs['validate_certs']
|
||||
kwargs.pop('validate_certs', None)
|
||||
|
||||
return Connector(kwargs)
|
||||
|
||||
|
||||
def normalize_extattrs(value):
|
||||
''' Normalize extattrs field to expected format
|
||||
The module accepts extattrs as key/value pairs. This method will
|
||||
transform the key/value pairs into a structure suitable for
|
||||
sending across WAPI in the format of:
|
||||
extattrs: {
|
||||
key: {
|
||||
value: <value>
|
||||
}
|
||||
}
|
||||
'''
|
||||
return dict([(k, {'value': v}) for k, v in iteritems(value)])
|
||||
|
||||
|
||||
def flatten_extattrs(value):
|
||||
''' Flatten the key/value struct for extattrs
|
||||
WAPI returns extattrs field as a dict in form of:
|
||||
extattrs: {
|
||||
key: {
|
||||
value: <value>
|
||||
}
|
||||
}
|
||||
This method will flatten the structure to:
|
||||
extattrs: {
|
||||
key: value
|
||||
}
|
||||
'''
|
||||
return dict([(k, v['value']) for k, v in iteritems(value)])
|
||||
|
||||
|
||||
def member_normalize(member_spec):
|
||||
''' Transforms the member module arguments into a valid WAPI struct
|
||||
This function will transform the arguments into a structure that
|
||||
is a valid WAPI structure in the format of:
|
||||
{
|
||||
key: <value>,
|
||||
}
|
||||
It will remove any arguments that are set to None since WAPI will error on
|
||||
that condition.
|
||||
The remainder of the value validation is performed by WAPI
|
||||
Some parameters in ib_spec are passed as a list in order to pass the validation for elements.
|
||||
In this function, they are converted to dictionary.
|
||||
'''
|
||||
member_elements = ['vip_setting', 'ipv6_setting', 'lan2_port_setting', 'mgmt_port_setting',
|
||||
'pre_provisioning', 'network_setting', 'v6_network_setting',
|
||||
'ha_port_setting', 'lan_port_setting', 'lan2_physical_setting',
|
||||
'lan_ha_port_setting', 'mgmt_network_setting', 'v6_mgmt_network_setting']
|
||||
for key in member_spec.keys():
|
||||
if key in member_elements and member_spec[key] is not None:
|
||||
member_spec[key] = member_spec[key][0]
|
||||
if isinstance(member_spec[key], dict):
|
||||
member_spec[key] = member_normalize(member_spec[key])
|
||||
elif isinstance(member_spec[key], list):
|
||||
for x in member_spec[key]:
|
||||
if isinstance(x, dict):
|
||||
x = member_normalize(x)
|
||||
elif member_spec[key] is None:
|
||||
del member_spec[key]
|
||||
return member_spec
|
||||
|
||||
|
||||
class WapiBase(object):
|
||||
''' Base class for implementing Infoblox WAPI API '''
|
||||
provider_spec = {'provider': dict(type='dict', options=NIOS_PROVIDER_SPEC)}
|
||||
|
||||
def __init__(self, provider):
|
||||
self.connector = get_connector(**provider)
|
||||
|
||||
def __getattr__(self, name):
|
||||
try:
|
||||
return self.__dict__[name]
|
||||
except KeyError:
|
||||
if name.startswith('_'):
|
||||
raise AttributeError("'%s' object has no attribute '%s'" % (self.__class__.__name__, name))
|
||||
return partial(self._invoke_method, name)
|
||||
|
||||
def _invoke_method(self, name, *args, **kwargs):
|
||||
try:
|
||||
method = getattr(self.connector, name)
|
||||
return method(*args, **kwargs)
|
||||
except InfobloxException as exc:
|
||||
if hasattr(self, 'handle_exception'):
|
||||
self.handle_exception(name, exc)
|
||||
else:
|
||||
raise
|
||||
|
||||
|
||||
class WapiLookup(WapiBase):
|
||||
''' Implements WapiBase for lookup plugins '''
|
||||
def handle_exception(self, method_name, exc):
|
||||
if ('text' in exc.response):
|
||||
raise Exception(exc.response['text'])
|
||||
else:
|
||||
raise Exception(exc)
|
||||
|
||||
|
||||
class WapiInventory(WapiBase):
|
||||
''' Implements WapiBase for dynamic inventory script '''
|
||||
pass
|
||||
|
||||
|
||||
class WapiModule(WapiBase):
|
||||
''' Implements WapiBase for executing a NIOS module '''
|
||||
def __init__(self, module):
|
||||
self.module = module
|
||||
provider = module.params['provider']
|
||||
try:
|
||||
super(WapiModule, self).__init__(provider)
|
||||
except Exception as exc:
|
||||
self.module.fail_json(msg=to_text(exc))
|
||||
|
||||
def handle_exception(self, method_name, exc):
|
||||
''' Handles any exceptions raised
|
||||
This method will be called if an InfobloxException is raised for
|
||||
any call to the instance of Connector and also, in case of generic
|
||||
exception. This method will then gracefully fail the module.
|
||||
:args exc: instance of InfobloxException
|
||||
'''
|
||||
if ('text' in exc.response):
|
||||
self.module.fail_json(
|
||||
msg=exc.response['text'],
|
||||
type=exc.response['Error'].split(':')[0],
|
||||
code=exc.response.get('code'),
|
||||
operation=method_name
|
||||
)
|
||||
else:
|
||||
self.module.fail_json(msg=to_native(exc))
|
||||
|
||||
def run(self, ib_obj_type, ib_spec):
|
||||
''' Runs the module and performans configuration tasks
|
||||
:args ib_obj_type: the WAPI object type to operate against
|
||||
:args ib_spec: the specification for the WAPI object as a dict
|
||||
:returns: a results dict
|
||||
'''
|
||||
|
||||
update = new_name = None
|
||||
state = self.module.params['state']
|
||||
if state not in ('present', 'absent'):
|
||||
self.module.fail_json(msg='state must be one of `present`, `absent`, got `%s`' % state)
|
||||
|
||||
result = {'changed': False}
|
||||
|
||||
obj_filter = dict([(k, self.module.params[k]) for k, v in iteritems(ib_spec) if v.get('ib_req')])
|
||||
|
||||
# get object reference
|
||||
ib_obj_ref, update, new_name = self.get_object_ref(self.module, ib_obj_type, obj_filter, ib_spec)
|
||||
proposed_object = {}
|
||||
for key, value in iteritems(ib_spec):
|
||||
if self.module.params[key] is not None:
|
||||
if 'transform' in value:
|
||||
proposed_object[key] = value['transform'](self.module)
|
||||
else:
|
||||
proposed_object[key] = self.module.params[key]
|
||||
|
||||
# If configure_by_dns is set to False, then delete the default dns set in the param else throw exception
|
||||
if not proposed_object.get('configure_for_dns') and proposed_object.get('view') == 'default'\
|
||||
and ib_obj_type == NIOS_HOST_RECORD:
|
||||
del proposed_object['view']
|
||||
elif not proposed_object.get('configure_for_dns') and proposed_object.get('view') != 'default'\
|
||||
and ib_obj_type == NIOS_HOST_RECORD:
|
||||
self.module.fail_json(msg='DNS Bypass is not allowed if DNS view is set other than \'default\'')
|
||||
|
||||
if ib_obj_ref:
|
||||
if len(ib_obj_ref) > 1:
|
||||
for each in ib_obj_ref:
|
||||
# To check for existing A_record with same name with input A_record by IP
|
||||
if each.get('ipv4addr') and each.get('ipv4addr') == proposed_object.get('ipv4addr'):
|
||||
current_object = each
|
||||
# To check for existing Host_record with same name with input Host_record by IP
|
||||
elif each.get('ipv4addrs')[0].get('ipv4addr') and each.get('ipv4addrs')[0].get('ipv4addr')\
|
||||
== proposed_object.get('ipv4addrs')[0].get('ipv4addr'):
|
||||
current_object = each
|
||||
# Else set the current_object with input value
|
||||
else:
|
||||
current_object = obj_filter
|
||||
ref = None
|
||||
else:
|
||||
current_object = ib_obj_ref[0]
|
||||
if 'extattrs' in current_object:
|
||||
current_object['extattrs'] = flatten_extattrs(current_object['extattrs'])
|
||||
if current_object.get('_ref'):
|
||||
ref = current_object.pop('_ref')
|
||||
else:
|
||||
current_object = obj_filter
|
||||
ref = None
|
||||
# checks if the object type is member to normalize the attributes being passed
|
||||
if (ib_obj_type == NIOS_MEMBER):
|
||||
proposed_object = member_normalize(proposed_object)
|
||||
|
||||
# checks if the name's field has been updated
|
||||
if update and new_name:
|
||||
proposed_object['name'] = new_name
|
||||
|
||||
check_remove = []
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
# this check is for idempotency, as if the same ip address shall be passed
|
||||
# add param will be removed, and same exists true for remove case as well.
|
||||
if 'ipv4addrs' in [current_object and proposed_object]:
|
||||
for each in current_object['ipv4addrs']:
|
||||
if each['ipv4addr'] == proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
||||
del proposed_object['ipv4addrs'][0]['add']
|
||||
break
|
||||
check_remove += each.values()
|
||||
if proposed_object['ipv4addrs'][0]['ipv4addr'] not in check_remove:
|
||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
del proposed_object['ipv4addrs'][0]['remove']
|
||||
|
||||
res = None
|
||||
modified = not self.compare_objects(current_object, proposed_object)
|
||||
if 'extattrs' in proposed_object:
|
||||
proposed_object['extattrs'] = normalize_extattrs(proposed_object['extattrs'])
|
||||
|
||||
# Checks if nios_next_ip param is passed in ipv4addrs/ipv4addr args
|
||||
proposed_object = self.check_if_nios_next_ip_exists(proposed_object)
|
||||
|
||||
if state == 'present':
|
||||
if ref is None:
|
||||
if not self.module.check_mode:
|
||||
self.create_object(ib_obj_type, proposed_object)
|
||||
result['changed'] = True
|
||||
# Check if NIOS_MEMBER and the flag to call function create_token is set
|
||||
elif (ib_obj_type == NIOS_MEMBER) and (proposed_object['create_token']):
|
||||
proposed_object = None
|
||||
# the function creates a token that can be used by a pre-provisioned member to join the grid
|
||||
result['api_results'] = self.call_func('create_token', ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif modified:
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if ('add' not in proposed_object['ipv4addrs'][0]) and ('remove' not in proposed_object['ipv4addrs'][0]):
|
||||
self.check_if_recordname_exists(obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object)
|
||||
|
||||
if (ib_obj_type in (NIOS_HOST_RECORD, NIOS_NETWORK_VIEW, NIOS_DNS_VIEW)):
|
||||
run_update = True
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if ('add' or 'remove') in proposed_object['ipv4addrs'][0]:
|
||||
run_update, proposed_object = self.check_if_add_remove_ip_arg_exists(proposed_object)
|
||||
if run_update:
|
||||
res = self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
else:
|
||||
res = ref
|
||||
if (ib_obj_type in (NIOS_A_RECORD, NIOS_AAAA_RECORD, NIOS_PTR_RECORD, NIOS_SRV_RECORD)):
|
||||
# popping 'view' key as update of 'view' is not supported with respect to a:record/aaaa:record/srv:record/ptr:record
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
del proposed_object['view']
|
||||
if not self.module.check_mode:
|
||||
res = self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif 'network_view' in proposed_object:
|
||||
proposed_object.pop('network_view')
|
||||
result['changed'] = True
|
||||
if not self.module.check_mode and res is None:
|
||||
proposed_object = self.on_update(proposed_object, ib_spec)
|
||||
self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
|
||||
elif state == 'absent':
|
||||
if ref is not None:
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
self.check_if_add_remove_ip_arg_exists(proposed_object)
|
||||
self.update_object(ref, proposed_object)
|
||||
result['changed'] = True
|
||||
elif not self.module.check_mode:
|
||||
self.delete_object(ref)
|
||||
result['changed'] = True
|
||||
|
||||
return result
|
||||
|
||||
def check_if_recordname_exists(self, obj_filter, ib_obj_ref, ib_obj_type, current_object, proposed_object):
|
||||
''' Send POST request if host record input name and retrieved ref name is same,
|
||||
but input IP and retrieved IP is different'''
|
||||
|
||||
if 'name' in (obj_filter and ib_obj_ref[0]) and ib_obj_type == NIOS_HOST_RECORD:
|
||||
obj_host_name = obj_filter['name']
|
||||
ref_host_name = ib_obj_ref[0]['name']
|
||||
if 'ipv4addrs' in (current_object and proposed_object):
|
||||
current_ip_addr = current_object['ipv4addrs'][0]['ipv4addr']
|
||||
proposed_ip_addr = proposed_object['ipv4addrs'][0]['ipv4addr']
|
||||
elif 'ipv6addrs' in (current_object and proposed_object):
|
||||
current_ip_addr = current_object['ipv6addrs'][0]['ipv6addr']
|
||||
proposed_ip_addr = proposed_object['ipv6addrs'][0]['ipv6addr']
|
||||
|
||||
if obj_host_name == ref_host_name and current_ip_addr != proposed_ip_addr:
|
||||
self.create_object(ib_obj_type, proposed_object)
|
||||
|
||||
def check_if_nios_next_ip_exists(self, proposed_object):
|
||||
''' Check if nios_next_ip argument is passed in ipaddr while creating
|
||||
host record, if yes then format proposed object ipv4addrs and pass
|
||||
func:nextavailableip and ipaddr range to create hostrecord with next
|
||||
available ip in one call to avoid any race condition '''
|
||||
|
||||
if 'ipv4addrs' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addrs'][0]['ipv4addr']:
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addrs'][0]['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addrs'][0]['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
elif 'ipv4addr' in proposed_object:
|
||||
if 'nios_next_ip' in proposed_object['ipv4addr']:
|
||||
ip_range = self.module._check_type_dict(proposed_object['ipv4addr'])['nios_next_ip']
|
||||
proposed_object['ipv4addr'] = NIOS_NEXT_AVAILABLE_IP + ':' + ip_range
|
||||
|
||||
return proposed_object
|
||||
|
||||
def check_if_add_remove_ip_arg_exists(self, proposed_object):
|
||||
'''
|
||||
This function shall check if add/remove param is set to true and
|
||||
is passed in the args, then we will update the proposed dictionary
|
||||
to add/remove IP to existing host_record, if the user passes false
|
||||
param with the argument nothing shall be done.
|
||||
:returns: True if param is changed based on add/remove, and also the
|
||||
changed proposed_object.
|
||||
'''
|
||||
update = False
|
||||
if 'add' in proposed_object['ipv4addrs'][0]:
|
||||
if proposed_object['ipv4addrs'][0]['add']:
|
||||
proposed_object['ipv4addrs+'] = proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs+'][0]['add']
|
||||
update = True
|
||||
else:
|
||||
del proposed_object['ipv4addrs'][0]['add']
|
||||
elif 'remove' in proposed_object['ipv4addrs'][0]:
|
||||
if proposed_object['ipv4addrs'][0]['remove']:
|
||||
proposed_object['ipv4addrs-'] = proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs']
|
||||
del proposed_object['ipv4addrs-'][0]['remove']
|
||||
update = True
|
||||
else:
|
||||
del proposed_object['ipv4addrs'][0]['remove']
|
||||
return update, proposed_object
|
||||
|
||||
def issubset(self, item, objects):
|
||||
''' Checks if item is a subset of objects
|
||||
:args item: the subset item to validate
|
||||
:args objects: superset list of objects to validate against
|
||||
:returns: True if item is a subset of one entry in objects otherwise
|
||||
this method will return None
|
||||
'''
|
||||
for obj in objects:
|
||||
if isinstance(item, dict):
|
||||
if all(entry in obj.items() for entry in item.items()):
|
||||
return True
|
||||
else:
|
||||
if item in obj:
|
||||
return True
|
||||
|
||||
def compare_objects(self, current_object, proposed_object):
|
||||
for key, proposed_item in iteritems(proposed_object):
|
||||
current_item = current_object.get(key)
|
||||
|
||||
# if proposed has a key that current doesn't then the objects are
|
||||
# not equal and False will be immediately returned
|
||||
if current_item is None:
|
||||
return False
|
||||
|
||||
elif isinstance(proposed_item, list):
|
||||
for subitem in proposed_item:
|
||||
if not self.issubset(subitem, current_item):
|
||||
return False
|
||||
|
||||
elif isinstance(proposed_item, dict):
|
||||
return self.compare_objects(current_item, proposed_item)
|
||||
|
||||
else:
|
||||
if current_item != proposed_item:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_object_ref(self, module, ib_obj_type, obj_filter, ib_spec):
|
||||
''' this function gets the reference object of pre-existing nios objects '''
|
||||
|
||||
update = False
|
||||
old_name = new_name = None
|
||||
if ('name' in obj_filter):
|
||||
# gets and returns the current object based on name/old_name passed
|
||||
try:
|
||||
name_obj = self.module._check_type_dict(obj_filter['name'])
|
||||
old_name = name_obj['old_name']
|
||||
new_name = name_obj['new_name']
|
||||
except TypeError:
|
||||
name = obj_filter['name']
|
||||
|
||||
if old_name and new_name:
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
test_obj_filter = dict([('name', old_name), ('view', obj_filter['view'])])
|
||||
elif (ib_obj_type in (NIOS_AAAA_RECORD, NIOS_A_RECORD)):
|
||||
test_obj_filter = obj_filter
|
||||
else:
|
||||
test_obj_filter = dict([('name', old_name)])
|
||||
# get the object reference
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
if ib_obj:
|
||||
obj_filter['name'] = new_name
|
||||
else:
|
||||
test_obj_filter['name'] = new_name
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter, return_fields=ib_spec.keys())
|
||||
update = True
|
||||
return ib_obj, update, new_name
|
||||
if (ib_obj_type == NIOS_HOST_RECORD):
|
||||
# to check only by name if dns bypassing is set
|
||||
if not obj_filter['configure_for_dns']:
|
||||
test_obj_filter = dict([('name', name)])
|
||||
else:
|
||||
test_obj_filter = dict([('name', name), ('view', obj_filter['view'])])
|
||||
elif (ib_obj_type == NIOS_IPV4_FIXED_ADDRESS or ib_obj_type == NIOS_IPV6_FIXED_ADDRESS and 'mac' in obj_filter):
|
||||
test_obj_filter = dict([['mac', obj_filter['mac']]])
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where a_record with uppercase name was returning null and was failing
|
||||
test_obj_filter = obj_filter
|
||||
test_obj_filter['name'] = test_obj_filter['name'].lower()
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
try:
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
# check if test_obj_filter is empty copy passed obj_filter
|
||||
else:
|
||||
test_obj_filter = obj_filter
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_A_RECORD):
|
||||
# resolves issue where multiple a_records with same name and different IP address
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
ipaddr_obj = self.module._check_type_dict(obj_filter['ipv4addr'])
|
||||
ipaddr = ipaddr_obj['old_ipv4addr']
|
||||
except TypeError:
|
||||
ipaddr = obj_filter['ipv4addr']
|
||||
test_obj_filter['ipv4addr'] = ipaddr
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_TXT_RECORD):
|
||||
# resolves issue where multiple txt_records with same name and different text
|
||||
test_obj_filter = obj_filter
|
||||
try:
|
||||
text_obj = self.module._check_type_dict(obj_filter['text'])
|
||||
txt = text_obj['old_text']
|
||||
except TypeError:
|
||||
txt = obj_filter['text']
|
||||
test_obj_filter['text'] = txt
|
||||
ib_obj = self.get_object(ib_obj_type, test_obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
elif (ib_obj_type == NIOS_ZONE):
|
||||
# del key 'restart_if_needed' as nios_zone get_object fails with the key present
|
||||
temp = ib_spec['restart_if_needed']
|
||||
del ib_spec['restart_if_needed']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
# reinstate restart_if_needed if ib_obj is none, meaning there's no existing nios_zone ref
|
||||
if not ib_obj:
|
||||
ib_spec['restart_if_needed'] = temp
|
||||
elif (ib_obj_type == NIOS_MEMBER):
|
||||
# del key 'create_token' as nios_member get_object fails with the key present
|
||||
temp = ib_spec['create_token']
|
||||
del ib_spec['create_token']
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
if temp:
|
||||
# reinstate 'create_token' key
|
||||
ib_spec['create_token'] = temp
|
||||
else:
|
||||
ib_obj = self.get_object(ib_obj_type, obj_filter.copy(), return_fields=ib_spec.keys())
|
||||
return ib_obj, update, new_name
|
||||
|
||||
def on_update(self, proposed_object, ib_spec):
|
||||
''' Event called before the update is sent to the API endpoing
|
||||
This method will allow the final proposed object to be changed
|
||||
and/or keys filtered before it is sent to the API endpoint to
|
||||
be processed.
|
||||
:args proposed_object: A dict item that will be encoded and sent
|
||||
the API endpoint with the updated data structure
|
||||
:returns: updated object to be sent to API endpoint
|
||||
'''
|
||||
keys = set()
|
||||
for key, value in iteritems(proposed_object):
|
||||
update = ib_spec[key].get('update', True)
|
||||
if not update:
|
||||
keys.add(key)
|
||||
return dict([(k, v) for k, v in iteritems(proposed_object) if k not in keys])
|
|
@ -1,232 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2019, Tom De Keyser (@tdekeyser)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: aws_step_functions_state_machine
|
||||
|
||||
short_description: Manage AWS Step Functions state machines
|
||||
|
||||
version_added: "2.10"
|
||||
|
||||
description:
|
||||
- Create, update and delete state machines in AWS Step Functions.
|
||||
- Calling the module in C(state=present) for an existing AWS Step Functions state machine
|
||||
will attempt to update the state machine definition, IAM Role, or tags with the provided data.
|
||||
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Name of the state machine
|
||||
required: true
|
||||
type: str
|
||||
definition:
|
||||
description:
|
||||
- The Amazon States Language definition of the state machine. See
|
||||
U(https://docs.aws.amazon.com/step-functions/latest/dg/concepts-amazon-states-language.html) for more
|
||||
information on the Amazon States Language.
|
||||
- "This parameter is required when C(state=present)."
|
||||
type: json
|
||||
role_arn:
|
||||
description:
|
||||
- The ARN of the IAM Role that will be used by the state machine for its executions.
|
||||
- "This parameter is required when C(state=present)."
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Desired state for the state machine
|
||||
default: present
|
||||
choices: [ present, absent ]
|
||||
type: str
|
||||
tags:
|
||||
description:
|
||||
- A hash/dictionary of tags to add to the new state machine or to add/remove from an existing one.
|
||||
type: dict
|
||||
purge_tags:
|
||||
description:
|
||||
- If yes, existing tags will be purged from the resource to match exactly what is defined by I(tags) parameter.
|
||||
If the I(tags) parameter is not set then tags will not be modified.
|
||||
default: yes
|
||||
type: bool
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
|
||||
author:
|
||||
- Tom De Keyser (@tdekeyser)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Create a new AWS Step Functions state machine
|
||||
- name: Setup HelloWorld state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: "HelloWorldStateMachine"
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: arn:aws:iam::987654321012:role/service-role/invokeLambdaStepFunctionsRole
|
||||
tags:
|
||||
project: helloWorld
|
||||
|
||||
# Update an existing state machine
|
||||
- name: Change IAM Role and tags of HelloWorld state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: HelloWorldStateMachine
|
||||
definition: "{{ lookup('file','state_machine.json') }}"
|
||||
role_arn: arn:aws:iam::987654321012:role/service-role/anotherStepFunctionsRole
|
||||
tags:
|
||||
otherTag: aDifferentTag
|
||||
|
||||
# Remove the AWS Step Functions state machine
|
||||
- name: Delete HelloWorld state machine
|
||||
aws_step_functions_state_machine:
|
||||
name: HelloWorldStateMachine
|
||||
state: absent
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
state_machine_arn:
|
||||
description: ARN of the AWS Step Functions state machine
|
||||
type: str
|
||||
returned: always
|
||||
'''
|
||||
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.ec2 import ansible_dict_to_boto3_tag_list, AWSRetry, compare_aws_tags, boto3_tag_list_to_ansible_dict
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError, BotoCoreError
|
||||
except ImportError:
|
||||
pass # caught by AnsibleAWSModule
|
||||
|
||||
|
||||
def manage_state_machine(state, sfn_client, module):
|
||||
state_machine_arn = get_state_machine_arn(sfn_client, module)
|
||||
|
||||
if state == 'present':
|
||||
if state_machine_arn is None:
|
||||
create(sfn_client, module)
|
||||
else:
|
||||
update(state_machine_arn, sfn_client, module)
|
||||
elif state == 'absent':
|
||||
if state_machine_arn is not None:
|
||||
remove(state_machine_arn, sfn_client, module)
|
||||
|
||||
check_mode(module, msg='State is up-to-date.')
|
||||
module.exit_json(changed=False)
|
||||
|
||||
|
||||
def create(sfn_client, module):
|
||||
check_mode(module, msg='State machine would be created.', changed=True)
|
||||
|
||||
tags = module.params.get('tags')
|
||||
sfn_tags = ansible_dict_to_boto3_tag_list(tags, tag_name_key_name='key', tag_value_key_name='value') if tags else []
|
||||
|
||||
state_machine = sfn_client.create_state_machine(
|
||||
name=module.params.get('name'),
|
||||
definition=module.params.get('definition'),
|
||||
roleArn=module.params.get('role_arn'),
|
||||
tags=sfn_tags
|
||||
)
|
||||
module.exit_json(changed=True, state_machine_arn=state_machine.get('stateMachineArn'))
|
||||
|
||||
|
||||
def remove(state_machine_arn, sfn_client, module):
|
||||
check_mode(module, msg='State machine would be deleted: {0}'.format(state_machine_arn), changed=True)
|
||||
|
||||
sfn_client.delete_state_machine(stateMachineArn=state_machine_arn)
|
||||
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
|
||||
|
||||
|
||||
def update(state_machine_arn, sfn_client, module):
|
||||
tags_to_add, tags_to_remove = compare_tags(state_machine_arn, sfn_client, module)
|
||||
|
||||
if params_changed(state_machine_arn, sfn_client, module) or tags_to_add or tags_to_remove:
|
||||
check_mode(module, msg='State machine would be updated: {0}'.format(state_machine_arn), changed=True)
|
||||
|
||||
sfn_client.update_state_machine(
|
||||
stateMachineArn=state_machine_arn,
|
||||
definition=module.params.get('definition'),
|
||||
roleArn=module.params.get('role_arn')
|
||||
)
|
||||
sfn_client.untag_resource(
|
||||
resourceArn=state_machine_arn,
|
||||
tagKeys=tags_to_remove
|
||||
)
|
||||
sfn_client.tag_resource(
|
||||
resourceArn=state_machine_arn,
|
||||
tags=ansible_dict_to_boto3_tag_list(tags_to_add, tag_name_key_name='key', tag_value_key_name='value')
|
||||
)
|
||||
|
||||
module.exit_json(changed=True, state_machine_arn=state_machine_arn)
|
||||
|
||||
|
||||
def compare_tags(state_machine_arn, sfn_client, module):
|
||||
new_tags = module.params.get('tags')
|
||||
current_tags = sfn_client.list_tags_for_resource(resourceArn=state_machine_arn).get('tags')
|
||||
return compare_aws_tags(boto3_tag_list_to_ansible_dict(current_tags), new_tags if new_tags else {}, module.params.get('purge_tags'))
|
||||
|
||||
|
||||
def params_changed(state_machine_arn, sfn_client, module):
|
||||
"""
|
||||
Check whether the state machine definition or IAM Role ARN is different
|
||||
from the existing state machine parameters.
|
||||
"""
|
||||
current = sfn_client.describe_state_machine(stateMachineArn=state_machine_arn)
|
||||
return current.get('definition') != module.params.get('definition') or current.get('roleArn') != module.params.get('role_arn')
|
||||
|
||||
|
||||
def get_state_machine_arn(sfn_client, module):
|
||||
"""
|
||||
Finds the state machine ARN based on the name parameter. Returns None if
|
||||
there is no state machine with this name.
|
||||
"""
|
||||
target_name = module.params.get('name')
|
||||
all_state_machines = sfn_client.list_state_machines(aws_retry=True).get('stateMachines')
|
||||
|
||||
for state_machine in all_state_machines:
|
||||
if state_machine.get('name') == target_name:
|
||||
return state_machine.get('stateMachineArn')
|
||||
|
||||
|
||||
def check_mode(module, msg='', changed=False):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=changed, output=msg)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = dict(
|
||||
name=dict(type='str', required=True),
|
||||
definition=dict(type='json'),
|
||||
role_arn=dict(type='str'),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
tags=dict(default=None, type='dict'),
|
||||
purge_tags=dict(default=True, type='bool'),
|
||||
)
|
||||
module = AnsibleAWSModule(
|
||||
argument_spec=module_args,
|
||||
required_if=[('state', 'present', ['role_arn']), ('state', 'present', ['definition'])],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
sfn_client = module.client('stepfunctions', retry_decorator=AWSRetry.jittered_backoff(retries=5))
|
||||
state = module.params.get('state')
|
||||
|
||||
try:
|
||||
manage_state_machine(state, sfn_client, module)
|
||||
except (BotoCoreError, ClientError) as e:
|
||||
module.fail_json_aws(e, msg='Failed to manage state machine')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,197 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2019, Prasad Katti (@prasadkatti)
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'
|
||||
}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: aws_step_functions_state_machine_execution
|
||||
|
||||
short_description: Start or stop execution of an AWS Step Functions state machine.
|
||||
|
||||
version_added: "2.10"
|
||||
|
||||
description:
|
||||
- Start or stop execution of a state machine in AWS Step Functions.
|
||||
|
||||
options:
|
||||
action:
|
||||
description: Desired action (start or stop) for a state machine execution.
|
||||
default: start
|
||||
choices: [ start, stop ]
|
||||
type: str
|
||||
name:
|
||||
description: Name of the execution.
|
||||
type: str
|
||||
execution_input:
|
||||
description: The JSON input data for the execution.
|
||||
type: json
|
||||
default: {}
|
||||
state_machine_arn:
|
||||
description: The ARN of the state machine that will be executed.
|
||||
type: str
|
||||
execution_arn:
|
||||
description: The ARN of the execution you wish to stop.
|
||||
type: str
|
||||
cause:
|
||||
description: A detailed explanation of the cause for stopping the execution.
|
||||
type: str
|
||||
default: ''
|
||||
error:
|
||||
description: The error code of the failure to pass in when stopping the execution.
|
||||
type: str
|
||||
default: ''
|
||||
|
||||
extends_documentation_fragment:
|
||||
- aws
|
||||
- ec2
|
||||
|
||||
author:
|
||||
- Prasad Katti (@prasadkatti)
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Start an execution of a state machine
|
||||
aws_step_functions_state_machine_execution:
|
||||
name: an_execution_name
|
||||
execution_input: '{ "IsHelloWorldExample": true }'
|
||||
state_machine_arn: "arn:aws:states:us-west-2:682285639423:stateMachine:HelloWorldStateMachine"
|
||||
|
||||
- name: Stop an execution of a state machine
|
||||
aws_step_functions_state_machine_execution:
|
||||
action: stop
|
||||
execution_arn: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
|
||||
cause: "cause of task failure"
|
||||
error: "error code of the failure"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
execution_arn:
|
||||
description: ARN of the AWS Step Functions state machine execution.
|
||||
type: str
|
||||
returned: if action == start and changed == True
|
||||
sample: "arn:aws:states:us-west-2:682285639423:execution:HelloWorldStateMachineCopy:a1e8e2b5-5dfe-d40e-d9e3-6201061047c8"
|
||||
start_date:
|
||||
description: The date the execution is started.
|
||||
type: str
|
||||
returned: if action == start and changed == True
|
||||
sample: "2019-11-02T22:39:49.071000-07:00"
|
||||
stop_date:
|
||||
description: The date the execution is stopped.
|
||||
type: str
|
||||
returned: if action == stop
|
||||
sample: "2019-11-02T22:39:49.071000-07:00"
|
||||
'''
|
||||
|
||||
|
||||
from ansible.module_utils.aws.core import AnsibleAWSModule
|
||||
from ansible.module_utils.ec2 import camel_dict_to_snake_dict
|
||||
|
||||
try:
|
||||
from botocore.exceptions import ClientError, BotoCoreError
|
||||
except ImportError:
|
||||
pass # caught by AnsibleAWSModule
|
||||
|
||||
|
||||
def start_execution(module, sfn_client):
|
||||
'''
|
||||
start_execution uses execution name to determine if a previous execution already exists.
|
||||
If an execution by the provided name exists, call client.start_execution will not be called.
|
||||
'''
|
||||
|
||||
state_machine_arn = module.params.get('state_machine_arn')
|
||||
name = module.params.get('name')
|
||||
execution_input = module.params.get('execution_input')
|
||||
|
||||
try:
|
||||
# list_executions is eventually consistent
|
||||
page_iterators = sfn_client.get_paginator('list_executions').paginate(stateMachineArn=state_machine_arn)
|
||||
|
||||
for execution in page_iterators.build_full_result()['executions']:
|
||||
if name == execution['name']:
|
||||
check_mode(module, msg='State machine execution already exists.', changed=False)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
check_mode(module, msg='State machine execution would be started.', changed=True)
|
||||
res_execution = sfn_client.start_execution(
|
||||
stateMachineArn=state_machine_arn,
|
||||
name=name,
|
||||
input=execution_input
|
||||
)
|
||||
except (ClientError, BotoCoreError) as e:
|
||||
if e.response['Error']['Code'] == 'ExecutionAlreadyExists':
|
||||
# this will never be executed anymore
|
||||
module.exit_json(changed=False)
|
||||
module.fail_json_aws(e, msg="Failed to start execution.")
|
||||
|
||||
module.exit_json(changed=True, **camel_dict_to_snake_dict(res_execution))
|
||||
|
||||
|
||||
def stop_execution(module, sfn_client):
|
||||
|
||||
cause = module.params.get('cause')
|
||||
error = module.params.get('error')
|
||||
execution_arn = module.params.get('execution_arn')
|
||||
|
||||
try:
|
||||
# describe_execution is eventually consistent
|
||||
execution_status = sfn_client.describe_execution(executionArn=execution_arn)['status']
|
||||
if execution_status != 'RUNNING':
|
||||
check_mode(module, msg='State machine execution is not running.', changed=False)
|
||||
module.exit_json(changed=False)
|
||||
|
||||
check_mode(module, msg='State machine execution would be stopped.', changed=True)
|
||||
res = sfn_client.stop_execution(
|
||||
executionArn=execution_arn,
|
||||
cause=cause,
|
||||
error=error
|
||||
)
|
||||
except (ClientError, BotoCoreError) as e:
|
||||
module.fail_json_aws(e, msg="Failed to stop execution.")
|
||||
|
||||
module.exit_json(changed=True, **camel_dict_to_snake_dict(res))
|
||||
|
||||
|
||||
def check_mode(module, msg='', changed=False):
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=changed, output=msg)
|
||||
|
||||
|
||||
def main():
|
||||
module_args = dict(
|
||||
action=dict(choices=['start', 'stop'], default='start'),
|
||||
name=dict(type='str'),
|
||||
execution_input=dict(type='json', default={}),
|
||||
state_machine_arn=dict(type='str'),
|
||||
cause=dict(type='str', default=''),
|
||||
error=dict(type='str', default=''),
|
||||
execution_arn=dict(type='str')
|
||||
)
|
||||
module = AnsibleAWSModule(
|
||||
argument_spec=module_args,
|
||||
required_if=[('action', 'start', ['name', 'state_machine_arn']),
|
||||
('action', 'stop', ['execution_arn']),
|
||||
],
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
sfn_client = module.client('stepfunctions')
|
||||
|
||||
action = module.params.get('action')
|
||||
if action == "start":
|
||||
start_execution(module, sfn_client)
|
||||
else:
|
||||
stop_execution(module, sfn_client)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,284 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2015, Steve Gargan <steve.gargan@gmail.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = """
|
||||
module: consul_session
|
||||
short_description: Manipulate consul sessions
|
||||
description:
|
||||
- Allows the addition, modification and deletion of sessions in a consul
|
||||
cluster. These sessions can then be used in conjunction with key value pairs
|
||||
to implement distributed locks. In depth documentation for working with
|
||||
sessions can be found at http://www.consul.io/docs/internals/sessions.html
|
||||
requirements:
|
||||
- python-consul
|
||||
- requests
|
||||
version_added: "2.0"
|
||||
author:
|
||||
- Steve Gargan (@sgargan)
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- ID of the session, required when I(state) is either C(info) or
|
||||
C(remove).
|
||||
type: str
|
||||
state:
|
||||
description:
|
||||
- Whether the session should be present i.e. created if it doesn't
|
||||
exist, or absent, removed if present. If created, the I(id) for the
|
||||
session is returned in the output. If C(absent), I(id) is
|
||||
required to remove the session. Info for a single session, all the
|
||||
sessions for a node or all available sessions can be retrieved by
|
||||
specifying C(info), C(node) or C(list) for the I(state); for C(node)
|
||||
or C(info), the node I(name) or session I(id) is required as parameter.
|
||||
choices: [ absent, info, list, node, present ]
|
||||
type: str
|
||||
default: present
|
||||
name:
|
||||
description:
|
||||
- The name that should be associated with the session. Required when
|
||||
I(state=node) is used.
|
||||
type: str
|
||||
delay:
|
||||
description:
|
||||
- The optional lock delay that can be attached to the session when it
|
||||
is created. Locks for invalidated sessions ar blocked from being
|
||||
acquired until this delay has expired. Durations are in seconds.
|
||||
type: int
|
||||
default: 15
|
||||
node:
|
||||
description:
|
||||
- The name of the node that with which the session will be associated.
|
||||
by default this is the name of the agent.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- The name of the datacenter in which the session exists or should be
|
||||
created.
|
||||
type: str
|
||||
checks:
|
||||
description:
|
||||
- Checks that will be used to verify the session health. If
|
||||
all the checks fail, the session will be invalidated and any locks
|
||||
associated with the session will be release and can be acquired once
|
||||
the associated lock delay has expired.
|
||||
type: list
|
||||
host:
|
||||
description:
|
||||
- The host of the consul agent defaults to localhost.
|
||||
type: str
|
||||
default: localhost
|
||||
port:
|
||||
description:
|
||||
- The port on which the consul agent is running.
|
||||
type: int
|
||||
default: 8500
|
||||
scheme:
|
||||
description:
|
||||
- The protocol scheme on which the consul agent is running.
|
||||
type: str
|
||||
default: http
|
||||
version_added: "2.1"
|
||||
validate_certs:
|
||||
description:
|
||||
- Whether to verify the TLS certificate of the consul agent.
|
||||
type: bool
|
||||
default: True
|
||||
version_added: "2.1"
|
||||
behavior:
|
||||
description:
|
||||
- The optional behavior that can be attached to the session when it
|
||||
is created. This controls the behavior when a session is invalidated.
|
||||
choices: [ delete, release ]
|
||||
type: str
|
||||
default: release
|
||||
version_added: "2.2"
|
||||
"""
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: register basic session with consul
|
||||
consul_session:
|
||||
name: session1
|
||||
|
||||
- name: register a session with an existing check
|
||||
consul_session:
|
||||
name: session_with_check
|
||||
checks:
|
||||
- existing_check_name
|
||||
|
||||
- name: register a session with lock_delay
|
||||
consul_session:
|
||||
name: session_with_delay
|
||||
delay: 20s
|
||||
|
||||
- name: retrieve info about session by id
|
||||
consul_session:
|
||||
id: session_id
|
||||
state: info
|
||||
|
||||
- name: retrieve active sessions
|
||||
consul_session:
|
||||
state: list
|
||||
'''
|
||||
|
||||
try:
|
||||
import consul
|
||||
from requests.exceptions import ConnectionError
|
||||
python_consul_installed = True
|
||||
except ImportError:
|
||||
python_consul_installed = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def execute(module):
|
||||
|
||||
state = module.params.get('state')
|
||||
|
||||
if state in ['info', 'list', 'node']:
|
||||
lookup_sessions(module)
|
||||
elif state == 'present':
|
||||
update_session(module)
|
||||
else:
|
||||
remove_session(module)
|
||||
|
||||
|
||||
def lookup_sessions(module):
|
||||
|
||||
datacenter = module.params.get('datacenter')
|
||||
|
||||
state = module.params.get('state')
|
||||
consul_client = get_consul_api(module)
|
||||
try:
|
||||
if state == 'list':
|
||||
sessions_list = consul_client.session.list(dc=datacenter)
|
||||
# Ditch the index, this can be grabbed from the results
|
||||
if sessions_list and len(sessions_list) >= 2:
|
||||
sessions_list = sessions_list[1]
|
||||
module.exit_json(changed=True,
|
||||
sessions=sessions_list)
|
||||
elif state == 'node':
|
||||
node = module.params.get('node')
|
||||
sessions = consul_client.session.node(node, dc=datacenter)
|
||||
module.exit_json(changed=True,
|
||||
node=node,
|
||||
sessions=sessions)
|
||||
elif state == 'info':
|
||||
session_id = module.params.get('id')
|
||||
|
||||
session_by_id = consul_client.session.info(session_id, dc=datacenter)
|
||||
module.exit_json(changed=True,
|
||||
session_id=session_id,
|
||||
sessions=session_by_id)
|
||||
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not retrieve session info %s" % e)
|
||||
|
||||
|
||||
def update_session(module):
|
||||
|
||||
name = module.params.get('name')
|
||||
delay = module.params.get('delay')
|
||||
checks = module.params.get('checks')
|
||||
datacenter = module.params.get('datacenter')
|
||||
node = module.params.get('node')
|
||||
behavior = module.params.get('behavior')
|
||||
|
||||
consul_client = get_consul_api(module)
|
||||
|
||||
try:
|
||||
session = consul_client.session.create(
|
||||
name=name,
|
||||
behavior=behavior,
|
||||
node=node,
|
||||
lock_delay=delay,
|
||||
dc=datacenter,
|
||||
checks=checks
|
||||
)
|
||||
module.exit_json(changed=True,
|
||||
session_id=session,
|
||||
name=name,
|
||||
behavior=behavior,
|
||||
delay=delay,
|
||||
checks=checks,
|
||||
node=node)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not create/update session %s" % e)
|
||||
|
||||
|
||||
def remove_session(module):
|
||||
session_id = module.params.get('id')
|
||||
|
||||
consul_client = get_consul_api(module)
|
||||
|
||||
try:
|
||||
consul_client.session.destroy(session_id)
|
||||
|
||||
module.exit_json(changed=True,
|
||||
session_id=session_id)
|
||||
except Exception as e:
|
||||
module.fail_json(msg="Could not remove session with id '%s' %s" % (
|
||||
session_id, e))
|
||||
|
||||
|
||||
def get_consul_api(module):
|
||||
return consul.Consul(host=module.params.get('host'),
|
||||
port=module.params.get('port'),
|
||||
scheme=module.params.get('scheme'),
|
||||
verify=module.params.get('validate_certs'))
|
||||
|
||||
|
||||
def test_dependencies(module):
|
||||
if not python_consul_installed:
|
||||
module.fail_json(msg="python-consul required for this module. "
|
||||
"see https://python-consul.readthedocs.io/en/latest/#installation")
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = dict(
|
||||
checks=dict(type='list'),
|
||||
delay=dict(type='int', default='15'),
|
||||
behavior=dict(type='str', default='release', choices=['release', 'delete']),
|
||||
host=dict(type='str', default='localhost'),
|
||||
port=dict(type='int', default=8500),
|
||||
scheme=dict(type='str', default='http'),
|
||||
validate_certs=dict(type='bool', default=True),
|
||||
id=dict(type='str'),
|
||||
name=dict(type='str'),
|
||||
node=dict(type='str'),
|
||||
state=dict(type='str', default='present', choices=['absent', 'info', 'list', 'node', 'present']),
|
||||
datacenter=dict(type='str'),
|
||||
)
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'node', ['name']),
|
||||
('state', 'info', ['id']),
|
||||
('state', 'remove', ['id']),
|
||||
],
|
||||
supports_check_mode=False
|
||||
)
|
||||
|
||||
test_dependencies(module)
|
||||
|
||||
try:
|
||||
execute(module)
|
||||
except ConnectionError as e:
|
||||
module.fail_json(msg='Could not connect to consul agent at %s:%s, error was %s' % (
|
||||
module.params.get('host'), module.params.get('port'), e))
|
||||
except Exception as e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,583 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# (c) 2017, René Moser <mail@renemoser.net>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import (absolute_import, division, print_function)
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: cs_service_offering
|
||||
description:
|
||||
- Create and delete service offerings for guest and system VMs.
|
||||
- Update display_text of existing service offering.
|
||||
short_description: Manages service offerings on Apache CloudStack based clouds.
|
||||
version_added: '2.5'
|
||||
author: René Moser (@resmo)
|
||||
options:
|
||||
disk_bytes_read_rate:
|
||||
description:
|
||||
- Bytes read rate of the disk offering.
|
||||
type: int
|
||||
aliases: [ bytes_read_rate ]
|
||||
disk_bytes_write_rate:
|
||||
description:
|
||||
- Bytes write rate of the disk offering.
|
||||
type: int
|
||||
aliases: [ bytes_write_rate ]
|
||||
cpu_number:
|
||||
description:
|
||||
- The number of CPUs of the service offering.
|
||||
type: int
|
||||
cpu_speed:
|
||||
description:
|
||||
- The CPU speed of the service offering in MHz.
|
||||
type: int
|
||||
limit_cpu_usage:
|
||||
description:
|
||||
- Restrict the CPU usage to committed service offering.
|
||||
type: bool
|
||||
deployment_planner:
|
||||
description:
|
||||
- The deployment planner heuristics used to deploy a VM of this offering.
|
||||
- If not set, the value of global config I(vm.deployment.planner) is used.
|
||||
type: str
|
||||
display_text:
|
||||
description:
|
||||
- Display text of the service offering.
|
||||
- If not set, I(name) will be used as I(display_text) while creating.
|
||||
type: str
|
||||
domain:
|
||||
description:
|
||||
- Domain the service offering is related to.
|
||||
- Public for all domains and subdomains if not set.
|
||||
type: str
|
||||
host_tags:
|
||||
description:
|
||||
- The host tags for this service offering.
|
||||
type: list
|
||||
aliases:
|
||||
- host_tag
|
||||
hypervisor_snapshot_reserve:
|
||||
description:
|
||||
- Hypervisor snapshot reserve space as a percent of a volume.
|
||||
- Only for managed storage using Xen or VMware.
|
||||
type: int
|
||||
is_iops_customized:
|
||||
description:
|
||||
- Whether compute offering iops is custom or not.
|
||||
type: bool
|
||||
aliases: [ disk_iops_customized ]
|
||||
disk_iops_read_rate:
|
||||
description:
|
||||
- IO requests read rate of the disk offering.
|
||||
type: int
|
||||
disk_iops_write_rate:
|
||||
description:
|
||||
- IO requests write rate of the disk offering.
|
||||
type: int
|
||||
disk_iops_max:
|
||||
description:
|
||||
- Max. iops of the compute offering.
|
||||
type: int
|
||||
disk_iops_min:
|
||||
description:
|
||||
- Min. iops of the compute offering.
|
||||
type: int
|
||||
is_system:
|
||||
description:
|
||||
- Whether it is a system VM offering or not.
|
||||
type: bool
|
||||
default: no
|
||||
is_volatile:
|
||||
description:
|
||||
- Whether the virtual machine needs to be volatile or not.
|
||||
- Every reboot of VM the root disk is detached then destroyed and a fresh root disk is created and attached to VM.
|
||||
type: bool
|
||||
memory:
|
||||
description:
|
||||
- The total memory of the service offering in MB.
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- Name of the service offering.
|
||||
type: str
|
||||
required: true
|
||||
network_rate:
|
||||
description:
|
||||
- Data transfer rate in Mb/s allowed.
|
||||
- Supported only for non-system offering and system offerings having I(system_vm_type=domainrouter).
|
||||
type: int
|
||||
offer_ha:
|
||||
description:
|
||||
- Whether HA is set for the service offering.
|
||||
type: bool
|
||||
default: no
|
||||
provisioning_type:
|
||||
description:
|
||||
- Provisioning type used to create volumes.
|
||||
type: str
|
||||
choices:
|
||||
- thin
|
||||
- sparse
|
||||
- fat
|
||||
service_offering_details:
|
||||
description:
|
||||
- Details for planner, used to store specific parameters.
|
||||
- A list of dictionaries having keys C(key) and C(value).
|
||||
type: list
|
||||
state:
|
||||
description:
|
||||
- State of the service offering.
|
||||
type: str
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
default: present
|
||||
storage_type:
|
||||
description:
|
||||
- The storage type of the service offering.
|
||||
type: str
|
||||
choices:
|
||||
- local
|
||||
- shared
|
||||
system_vm_type:
|
||||
description:
|
||||
- The system VM type.
|
||||
- Required if I(is_system=yes).
|
||||
type: str
|
||||
choices:
|
||||
- domainrouter
|
||||
- consoleproxy
|
||||
- secondarystoragevm
|
||||
storage_tags:
|
||||
description:
|
||||
- The storage tags for this service offering.
|
||||
type: list
|
||||
aliases:
|
||||
- storage_tag
|
||||
is_customized:
|
||||
description:
|
||||
- Whether the offering is customizable or not.
|
||||
type: bool
|
||||
version_added: '2.8'
|
||||
extends_documentation_fragment: cloudstack
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Create a non-volatile compute service offering with local storage
|
||||
cs_service_offering:
|
||||
name: Micro
|
||||
display_text: Micro 512mb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 512
|
||||
host_tags: eco
|
||||
storage_type: local
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create a volatile compute service offering with shared storage
|
||||
cs_service_offering:
|
||||
name: Tiny
|
||||
display_text: Tiny 1gb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 1024
|
||||
storage_type: shared
|
||||
is_volatile: yes
|
||||
host_tags: eco
|
||||
storage_tags: eco
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create or update a volatile compute service offering with shared storage
|
||||
cs_service_offering:
|
||||
name: Tiny
|
||||
display_text: Tiny 1gb 1cpu
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 1024
|
||||
storage_type: shared
|
||||
is_volatile: yes
|
||||
host_tags: eco
|
||||
storage_tags: eco
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create or update a custom compute service offering
|
||||
cs_service_offering:
|
||||
name: custom
|
||||
display_text: custom compute offer
|
||||
is_customized: yes
|
||||
storage_type: shared
|
||||
host_tags: eco
|
||||
storage_tags: eco
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Remove a compute service offering
|
||||
cs_service_offering:
|
||||
name: Tiny
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Create or update a system offering for the console proxy
|
||||
cs_service_offering:
|
||||
name: System Offering for Console Proxy 2GB
|
||||
display_text: System Offering for Console Proxy 2GB RAM
|
||||
is_system: yes
|
||||
system_vm_type: consoleproxy
|
||||
cpu_number: 1
|
||||
cpu_speed: 2198
|
||||
memory: 2048
|
||||
storage_type: shared
|
||||
storage_tags: perf
|
||||
delegate_to: localhost
|
||||
|
||||
- name: Remove a system offering
|
||||
cs_service_offering:
|
||||
name: System Offering for Console Proxy 2GB
|
||||
is_system: yes
|
||||
state: absent
|
||||
delegate_to: localhost
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
---
|
||||
id:
|
||||
description: UUID of the service offering
|
||||
returned: success
|
||||
type: str
|
||||
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
|
||||
cpu_number:
|
||||
description: Number of CPUs in the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 4
|
||||
cpu_speed:
|
||||
description: Speed of CPUs in MHz in the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 2198
|
||||
disk_iops_max:
|
||||
description: Max iops of the disk offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
disk_iops_min:
|
||||
description: Min iops of the disk offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 500
|
||||
disk_bytes_read_rate:
|
||||
description: Bytes read rate of the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
disk_bytes_write_rate:
|
||||
description: Bytes write rate of the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
disk_iops_read_rate:
|
||||
description: IO requests per second read rate of the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
disk_iops_write_rate:
|
||||
description: IO requests per second write rate of the service offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
created:
|
||||
description: Date the offering was created
|
||||
returned: success
|
||||
type: str
|
||||
sample: 2017-11-19T10:48:59+0000
|
||||
display_text:
|
||||
description: Display text of the offering
|
||||
returned: success
|
||||
type: str
|
||||
sample: Micro 512mb 1cpu
|
||||
domain:
|
||||
description: Domain the offering is into
|
||||
returned: success
|
||||
type: str
|
||||
sample: ROOT
|
||||
host_tags:
|
||||
description: List of host tags
|
||||
returned: success
|
||||
type: list
|
||||
sample: [ 'eco' ]
|
||||
storage_tags:
|
||||
description: List of storage tags
|
||||
returned: success
|
||||
type: list
|
||||
sample: [ 'eco' ]
|
||||
is_system:
|
||||
description: Whether the offering is for system VMs or not
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
is_iops_customized:
|
||||
description: Whether the offering uses custom IOPS or not
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
is_volatile:
|
||||
description: Whether the offering is volatile or not
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
limit_cpu_usage:
|
||||
description: Whether the CPU usage is restricted to committed service offering
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
memory:
|
||||
description: Memory of the system offering
|
||||
returned: success
|
||||
type: int
|
||||
sample: 512
|
||||
name:
|
||||
description: Name of the system offering
|
||||
returned: success
|
||||
type: str
|
||||
sample: Micro
|
||||
offer_ha:
|
||||
description: Whether HA support is enabled in the offering or not
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
provisioning_type:
|
||||
description: Provisioning type used to create volumes
|
||||
returned: success
|
||||
type: str
|
||||
sample: thin
|
||||
storage_type:
|
||||
description: Storage type used to create volumes
|
||||
returned: success
|
||||
type: str
|
||||
sample: shared
|
||||
system_vm_type:
|
||||
description: System VM type of this offering
|
||||
returned: success
|
||||
type: str
|
||||
sample: consoleproxy
|
||||
service_offering_details:
|
||||
description: Additioanl service offering details
|
||||
returned: success
|
||||
type: dict
|
||||
sample: "{'vgpuType': 'GRID K180Q','pciDevice':'Group of NVIDIA Corporation GK107GL [GRID K1] GPUs'}"
|
||||
network_rate:
|
||||
description: Data transfer rate in megabits per second allowed
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1000
|
||||
is_customized:
|
||||
description: Whether the offering is customizable or not
|
||||
returned: success
|
||||
type: bool
|
||||
sample: false
|
||||
version_added: '2.8'
|
||||
'''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.cloudstack import (
|
||||
AnsibleCloudStack,
|
||||
cs_argument_spec,
|
||||
cs_required_together,
|
||||
)
|
||||
|
||||
|
||||
class AnsibleCloudStackServiceOffering(AnsibleCloudStack):
|
||||
|
||||
def __init__(self, module):
|
||||
super(AnsibleCloudStackServiceOffering, self).__init__(module)
|
||||
self.returns = {
|
||||
'cpunumber': 'cpu_number',
|
||||
'cpuspeed': 'cpu_speed',
|
||||
'deploymentplanner': 'deployment_planner',
|
||||
'diskBytesReadRate': 'disk_bytes_read_rate',
|
||||
'diskBytesWriteRate': 'disk_bytes_write_rate',
|
||||
'diskIopsReadRate': 'disk_iops_read_rate',
|
||||
'diskIopsWriteRate': 'disk_iops_write_rate',
|
||||
'maxiops': 'disk_iops_max',
|
||||
'miniops': 'disk_iops_min',
|
||||
'hypervisorsnapshotreserve': 'hypervisor_snapshot_reserve',
|
||||
'iscustomized': 'is_customized',
|
||||
'iscustomizediops': 'is_iops_customized',
|
||||
'issystem': 'is_system',
|
||||
'isvolatile': 'is_volatile',
|
||||
'limitcpuuse': 'limit_cpu_usage',
|
||||
'memory': 'memory',
|
||||
'networkrate': 'network_rate',
|
||||
'offerha': 'offer_ha',
|
||||
'provisioningtype': 'provisioning_type',
|
||||
'serviceofferingdetails': 'service_offering_details',
|
||||
'storagetype': 'storage_type',
|
||||
'systemvmtype': 'system_vm_type',
|
||||
'tags': 'storage_tags',
|
||||
}
|
||||
|
||||
def get_service_offering(self):
|
||||
args = {
|
||||
'name': self.module.params.get('name'),
|
||||
'domainid': self.get_domain(key='id'),
|
||||
'issystem': self.module.params.get('is_system'),
|
||||
'systemvmtype': self.module.params.get('system_vm_type'),
|
||||
}
|
||||
service_offerings = self.query_api('listServiceOfferings', **args)
|
||||
if service_offerings:
|
||||
return service_offerings['serviceoffering'][0]
|
||||
|
||||
def present_service_offering(self):
|
||||
service_offering = self.get_service_offering()
|
||||
if not service_offering:
|
||||
service_offering = self._create_offering(service_offering)
|
||||
else:
|
||||
service_offering = self._update_offering(service_offering)
|
||||
|
||||
return service_offering
|
||||
|
||||
def absent_service_offering(self):
|
||||
service_offering = self.get_service_offering()
|
||||
if service_offering:
|
||||
self.result['changed'] = True
|
||||
if not self.module.check_mode:
|
||||
args = {
|
||||
'id': service_offering['id'],
|
||||
}
|
||||
self.query_api('deleteServiceOffering', **args)
|
||||
return service_offering
|
||||
|
||||
def _create_offering(self, service_offering):
|
||||
self.result['changed'] = True
|
||||
|
||||
system_vm_type = self.module.params.get('system_vm_type')
|
||||
is_system = self.module.params.get('is_system')
|
||||
|
||||
required_params = []
|
||||
if is_system and not system_vm_type:
|
||||
required_params.append('system_vm_type')
|
||||
self.module.fail_on_missing_params(required_params=required_params)
|
||||
|
||||
args = {
|
||||
'name': self.module.params.get('name'),
|
||||
'displaytext': self.get_or_fallback('display_text', 'name'),
|
||||
'bytesreadrate': self.module.params.get('disk_bytes_read_rate'),
|
||||
'byteswriterate': self.module.params.get('disk_bytes_write_rate'),
|
||||
'cpunumber': self.module.params.get('cpu_number'),
|
||||
'cpuspeed': self.module.params.get('cpu_speed'),
|
||||
'customizediops': self.module.params.get('is_iops_customized'),
|
||||
'deploymentplanner': self.module.params.get('deployment_planner'),
|
||||
'domainid': self.get_domain(key='id'),
|
||||
'hosttags': self.module.params.get('host_tags'),
|
||||
'hypervisorsnapshotreserve': self.module.params.get('hypervisor_snapshot_reserve'),
|
||||
'iopsreadrate': self.module.params.get('disk_iops_read_rate'),
|
||||
'iopswriterate': self.module.params.get('disk_iops_write_rate'),
|
||||
'maxiops': self.module.params.get('disk_iops_max'),
|
||||
'miniops': self.module.params.get('disk_iops_min'),
|
||||
'issystem': is_system,
|
||||
'isvolatile': self.module.params.get('is_volatile'),
|
||||
'memory': self.module.params.get('memory'),
|
||||
'networkrate': self.module.params.get('network_rate'),
|
||||
'offerha': self.module.params.get('offer_ha'),
|
||||
'provisioningtype': self.module.params.get('provisioning_type'),
|
||||
'serviceofferingdetails': self.module.params.get('service_offering_details'),
|
||||
'storagetype': self.module.params.get('storage_type'),
|
||||
'systemvmtype': system_vm_type,
|
||||
'tags': self.module.params.get('storage_tags'),
|
||||
'limitcpuuse': self.module.params.get('limit_cpu_usage'),
|
||||
'customized': self.module.params.get('is_customized')
|
||||
}
|
||||
if not self.module.check_mode:
|
||||
res = self.query_api('createServiceOffering', **args)
|
||||
service_offering = res['serviceoffering']
|
||||
return service_offering
|
||||
|
||||
def _update_offering(self, service_offering):
|
||||
args = {
|
||||
'id': service_offering['id'],
|
||||
'name': self.module.params.get('name'),
|
||||
'displaytext': self.get_or_fallback('display_text', 'name'),
|
||||
}
|
||||
if self.has_changed(args, service_offering):
|
||||
self.result['changed'] = True
|
||||
|
||||
if not self.module.check_mode:
|
||||
res = self.query_api('updateServiceOffering', **args)
|
||||
service_offering = res['serviceoffering']
|
||||
return service_offering
|
||||
|
||||
def get_result(self, service_offering):
|
||||
super(AnsibleCloudStackServiceOffering, self).get_result(service_offering)
|
||||
if service_offering:
|
||||
if 'hosttags' in service_offering:
|
||||
self.result['host_tags'] = service_offering['hosttags'].split(',') or [service_offering['hosttags']]
|
||||
|
||||
# Prevent confusion, the api returns a tags key for storage tags.
|
||||
if 'tags' in service_offering:
|
||||
self.result['storage_tags'] = service_offering['tags'].split(',') or [service_offering['tags']]
|
||||
if 'tags' in self.result:
|
||||
del self.result['tags']
|
||||
|
||||
return self.result
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = cs_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(required=True),
|
||||
display_text=dict(),
|
||||
cpu_number=dict(type='int'),
|
||||
cpu_speed=dict(type='int'),
|
||||
limit_cpu_usage=dict(type='bool'),
|
||||
deployment_planner=dict(),
|
||||
domain=dict(),
|
||||
host_tags=dict(type='list', aliases=['host_tag']),
|
||||
hypervisor_snapshot_reserve=dict(type='int'),
|
||||
disk_bytes_read_rate=dict(type='int', aliases=['bytes_read_rate']),
|
||||
disk_bytes_write_rate=dict(type='int', aliases=['bytes_write_rate']),
|
||||
disk_iops_read_rate=dict(type='int'),
|
||||
disk_iops_write_rate=dict(type='int'),
|
||||
disk_iops_max=dict(type='int'),
|
||||
disk_iops_min=dict(type='int'),
|
||||
is_system=dict(type='bool', default=False),
|
||||
is_volatile=dict(type='bool'),
|
||||
is_iops_customized=dict(type='bool', aliases=['disk_iops_customized']),
|
||||
memory=dict(type='int'),
|
||||
network_rate=dict(type='int'),
|
||||
offer_ha=dict(type='bool'),
|
||||
provisioning_type=dict(choices=['thin', 'sparse', 'fat']),
|
||||
service_offering_details=dict(type='list'),
|
||||
storage_type=dict(choices=['local', 'shared']),
|
||||
system_vm_type=dict(choices=['domainrouter', 'consoleproxy', 'secondarystoragevm']),
|
||||
storage_tags=dict(type='list', aliases=['storage_tag']),
|
||||
state=dict(choices=['present', 'absent'], default='present'),
|
||||
is_customized=dict(type='bool'),
|
||||
))
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_together=cs_required_together(),
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
acs_so = AnsibleCloudStackServiceOffering(module)
|
||||
|
||||
state = module.params.get('state')
|
||||
if state == "absent":
|
||||
service_offering = acs_so.absent_service_offering()
|
||||
else:
|
||||
service_offering = acs_so.present_service_offering()
|
||||
|
||||
result = acs_so.get_result(service_offering)
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,555 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2019, Hetzner Cloud GmbH <info@hetzner-cloud.de>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
"metadata_version": "1.1",
|
||||
"status": ["preview"],
|
||||
"supported_by": "community",
|
||||
}
|
||||
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: hcloud_server
|
||||
|
||||
short_description: Create and manage cloud servers on the Hetzner Cloud.
|
||||
|
||||
version_added: "2.8"
|
||||
|
||||
description:
|
||||
- Create, update and manage cloud servers on the Hetzner Cloud.
|
||||
|
||||
author:
|
||||
- Lukas Kaemmerling (@LKaemmerling)
|
||||
|
||||
options:
|
||||
id:
|
||||
description:
|
||||
- The ID of the Hetzner Cloud server to manage.
|
||||
- Only required if no server I(name) is given
|
||||
type: int
|
||||
name:
|
||||
description:
|
||||
- The Name of the Hetzner Cloud server to manage.
|
||||
- Only required if no server I(id) is given or a server does not exists.
|
||||
type: str
|
||||
server_type:
|
||||
description:
|
||||
- The Server Type of the Hetzner Cloud server to manage.
|
||||
- Required if server does not exists.
|
||||
type: str
|
||||
ssh_keys:
|
||||
description:
|
||||
- List of SSH key names
|
||||
- The key names correspond to the SSH keys configured for your
|
||||
Hetzner Cloud account access.
|
||||
type: list
|
||||
volumes:
|
||||
description:
|
||||
- List of Volumes IDs that should be attached to the server on server creation.
|
||||
type: list
|
||||
image:
|
||||
description:
|
||||
- Image the server should be created from.
|
||||
- Required if server does not exists.
|
||||
type: str
|
||||
location:
|
||||
description:
|
||||
- Location of Server.
|
||||
- Required if no I(datacenter) is given and server does not exists.
|
||||
type: str
|
||||
datacenter:
|
||||
description:
|
||||
- Datacenter of Server.
|
||||
- Required of no I(location) is given and server does not exists.
|
||||
type: str
|
||||
backups:
|
||||
description:
|
||||
- Enable or disable Backups for the given Server.
|
||||
type: bool
|
||||
default: no
|
||||
upgrade_disk:
|
||||
description:
|
||||
- Resize the disk size, when resizing a server.
|
||||
- If you want to downgrade the server later, this value should be False.
|
||||
type: bool
|
||||
default: no
|
||||
force_upgrade:
|
||||
description:
|
||||
- Force the upgrade of the server.
|
||||
- Power off the server if it is running on upgrade.
|
||||
type: bool
|
||||
default: no
|
||||
user_data:
|
||||
description:
|
||||
- User Data to be passed to the server on creation.
|
||||
- Only used if server does not exists.
|
||||
type: str
|
||||
rescue_mode:
|
||||
description:
|
||||
- Add the Hetzner rescue system type you want the server to be booted into.
|
||||
type: str
|
||||
version_added: 2.9
|
||||
labels:
|
||||
description:
|
||||
- User-defined labels (key-value pairs).
|
||||
type: dict
|
||||
delete_protection:
|
||||
description:
|
||||
- Protect the Server for deletion.
|
||||
- Needs to be the same as I(rebuild_protection).
|
||||
type: bool
|
||||
version_added: "2.10"
|
||||
rebuild_protection:
|
||||
description:
|
||||
- Protect the Server for rebuild.
|
||||
- Needs to be the same as I(delete_protection).
|
||||
type: bool
|
||||
version_added: "2.10"
|
||||
state:
|
||||
description:
|
||||
- State of the server.
|
||||
default: present
|
||||
choices: [ absent, present, restarted, started, stopped, rebuild ]
|
||||
type: str
|
||||
extends_documentation_fragment: hcloud
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Create a basic server
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
server_type: cx11
|
||||
image: ubuntu-18.04
|
||||
state: present
|
||||
|
||||
- name: Create a basic server with ssh key
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
server_type: cx11
|
||||
image: ubuntu-18.04
|
||||
location: fsn1
|
||||
ssh_keys:
|
||||
- me@myorganisation
|
||||
state: present
|
||||
|
||||
- name: Resize an existing server
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
server_type: cx21
|
||||
upgrade_disk: yes
|
||||
state: present
|
||||
|
||||
- name: Ensure the server is absent (remove if needed)
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
state: absent
|
||||
|
||||
- name: Ensure the server is started
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
state: started
|
||||
|
||||
- name: Ensure the server is stopped
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
state: stopped
|
||||
|
||||
- name: Ensure the server is restarted
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
state: restarted
|
||||
|
||||
- name: Ensure the server is will be booted in rescue mode and therefore restarted
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
rescue_mode: linux64
|
||||
state: restarted
|
||||
|
||||
- name: Ensure the server is rebuild
|
||||
hcloud_server:
|
||||
name: my-server
|
||||
image: ubuntu-18.04
|
||||
state: rebuild
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
hcloud_server:
|
||||
description: The server instance
|
||||
returned: Always
|
||||
type: complex
|
||||
contains:
|
||||
id:
|
||||
description: Numeric identifier of the server
|
||||
returned: always
|
||||
type: int
|
||||
sample: 1937415
|
||||
name:
|
||||
description: Name of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: my-server
|
||||
status:
|
||||
description: Status of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: running
|
||||
server_type:
|
||||
description: Name of the server type of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: cx11
|
||||
ipv4_address:
|
||||
description: Public IPv4 address of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: 116.203.104.109
|
||||
ipv6:
|
||||
description: IPv6 network of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: 2a01:4f8:1c1c:c140::/64
|
||||
location:
|
||||
description: Name of the location of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: fsn1
|
||||
datacenter:
|
||||
description: Name of the datacenter of the server
|
||||
returned: always
|
||||
type: str
|
||||
sample: fsn1-dc14
|
||||
rescue_enabled:
|
||||
description: True if rescue mode is enabled, Server will then boot into rescue system on next reboot
|
||||
returned: always
|
||||
type: bool
|
||||
sample: false
|
||||
backup_window:
|
||||
description: Time window (UTC) in which the backup will run, or null if the backups are not enabled
|
||||
returned: always
|
||||
type: bool
|
||||
sample: 22-02
|
||||
labels:
|
||||
description: User-defined labels (key-value pairs)
|
||||
returned: always
|
||||
type: dict
|
||||
delete_protection:
|
||||
description: True if server is protected for deletion
|
||||
type: bool
|
||||
returned: always
|
||||
sample: false
|
||||
version_added: "2.10"
|
||||
rebuild_protection:
|
||||
description: True if server is protected for rebuild
|
||||
type: bool
|
||||
returned: always
|
||||
sample: false
|
||||
version_added: "2.10"
|
||||
"""
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils._text import to_native
|
||||
from ansible.module_utils.hcloud import Hcloud
|
||||
|
||||
try:
|
||||
from hcloud.volumes.domain import Volume
|
||||
from hcloud.ssh_keys.domain import SSHKey
|
||||
from hcloud.servers.domain import Server
|
||||
from hcloud import APIException
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
class AnsibleHcloudServer(Hcloud):
|
||||
def __init__(self, module):
|
||||
Hcloud.__init__(self, module, "hcloud_server")
|
||||
self.hcloud_server = None
|
||||
|
||||
def _prepare_result(self):
|
||||
image = None if self.hcloud_server.image is None else to_native(self.hcloud_server.image.name)
|
||||
return {
|
||||
"id": to_native(self.hcloud_server.id),
|
||||
"name": to_native(self.hcloud_server.name),
|
||||
"ipv4_address": to_native(self.hcloud_server.public_net.ipv4.ip),
|
||||
"ipv6": to_native(self.hcloud_server.public_net.ipv6.ip),
|
||||
"image": image,
|
||||
"server_type": to_native(self.hcloud_server.server_type.name),
|
||||
"datacenter": to_native(self.hcloud_server.datacenter.name),
|
||||
"location": to_native(self.hcloud_server.datacenter.location.name),
|
||||
"rescue_enabled": self.hcloud_server.rescue_enabled,
|
||||
"backup_window": to_native(self.hcloud_server.backup_window),
|
||||
"labels": self.hcloud_server.labels,
|
||||
"delete_protection": self.hcloud_server.protection["delete"],
|
||||
"rebuild_protection": self.hcloud_server.protection["rebuild"],
|
||||
"status": to_native(self.hcloud_server.status),
|
||||
}
|
||||
|
||||
def _get_server(self):
|
||||
try:
|
||||
if self.module.params.get("id") is not None:
|
||||
self.hcloud_server = self.client.servers.get_by_id(
|
||||
self.module.params.get("id")
|
||||
)
|
||||
else:
|
||||
self.hcloud_server = self.client.servers.get_by_name(
|
||||
self.module.params.get("name")
|
||||
)
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
def _create_server(self):
|
||||
|
||||
self.module.fail_on_missing_params(
|
||||
required_params=["name", "server_type", "image"]
|
||||
)
|
||||
params = {
|
||||
"name": self.module.params.get("name"),
|
||||
"server_type": self.client.server_types.get_by_name(
|
||||
self.module.params.get("server_type")
|
||||
),
|
||||
"user_data": self.module.params.get("user_data"),
|
||||
"labels": self.module.params.get("labels"),
|
||||
}
|
||||
if self.client.images.get_by_name(self.module.params.get("image")) is not None:
|
||||
# When image name is not available look for id instead
|
||||
params["image"] = self.client.images.get_by_name(self.module.params.get("image"))
|
||||
else:
|
||||
params["image"] = self.client.images.get_by_id(self.module.params.get("image"))
|
||||
|
||||
if self.module.params.get("ssh_keys") is not None:
|
||||
params["ssh_keys"] = [
|
||||
SSHKey(name=ssh_key_name)
|
||||
for ssh_key_name in self.module.params.get("ssh_keys")
|
||||
]
|
||||
|
||||
if self.module.params.get("volumes") is not None:
|
||||
params["volumes"] = [
|
||||
Volume(id=volume_id) for volume_id in self.module.params.get("volumes")
|
||||
]
|
||||
|
||||
if self.module.params.get("location") is None and self.module.params.get("datacenter") is None:
|
||||
# When not given, the API will choose the location.
|
||||
params["location"] = None
|
||||
params["datacenter"] = None
|
||||
elif self.module.params.get("location") is not None and self.module.params.get("datacenter") is None:
|
||||
params["location"] = self.client.locations.get_by_name(
|
||||
self.module.params.get("location")
|
||||
)
|
||||
elif self.module.params.get("location") is None and self.module.params.get("datacenter") is not None:
|
||||
params["datacenter"] = self.client.datacenters.get_by_name(
|
||||
self.module.params.get("datacenter")
|
||||
)
|
||||
|
||||
if not self.module.check_mode:
|
||||
resp = self.client.servers.create(**params)
|
||||
self.result["root_password"] = resp.root_password
|
||||
resp.action.wait_until_finished(max_retries=1000)
|
||||
[action.wait_until_finished() for action in resp.next_actions]
|
||||
|
||||
rescue_mode = self.module.params.get("rescue_mode")
|
||||
if rescue_mode:
|
||||
self._get_server()
|
||||
self._set_rescue_mode(rescue_mode)
|
||||
|
||||
self._mark_as_changed()
|
||||
self._get_server()
|
||||
|
||||
def _update_server(self):
|
||||
try:
|
||||
rescue_mode = self.module.params.get("rescue_mode")
|
||||
if rescue_mode and self.hcloud_server.rescue_enabled is False:
|
||||
if not self.module.check_mode:
|
||||
self._set_rescue_mode(rescue_mode)
|
||||
self._mark_as_changed()
|
||||
elif not rescue_mode and self.hcloud_server.rescue_enabled is True:
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.disable_rescue().wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
|
||||
if self.module.params.get("backups") and self.hcloud_server.backup_window is None:
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.enable_backup().wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
elif not self.module.params.get("backups") and self.hcloud_server.backup_window is not None:
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.disable_backup().wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
|
||||
labels = self.module.params.get("labels")
|
||||
if labels is not None and labels != self.hcloud_server.labels:
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.update(labels=labels)
|
||||
self._mark_as_changed()
|
||||
|
||||
server_type = self.module.params.get("server_type")
|
||||
if server_type is not None and self.hcloud_server.server_type.name != server_type:
|
||||
previous_server_status = self.hcloud_server.status
|
||||
state = self.module.params.get("state")
|
||||
if previous_server_status == Server.STATUS_RUNNING:
|
||||
if not self.module.check_mode:
|
||||
if self.module.params.get("force_upgrade") or state == "stopped":
|
||||
self.stop_server() # Only stopped server can be upgraded
|
||||
else:
|
||||
self.module.warn(
|
||||
"You can not upgrade a running instance %s. You need to stop the instance or use force_upgrade=yes."
|
||||
% self.hcloud_server.name
|
||||
)
|
||||
timeout = 100
|
||||
if self.module.params.get("upgrade_disk"):
|
||||
timeout = (
|
||||
1000
|
||||
) # When we upgrade the disk too the resize progress takes some more time.
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.change_type(
|
||||
server_type=self.client.server_types.get_by_name(server_type),
|
||||
upgrade_disk=self.module.params.get("upgrade_disk"),
|
||||
).wait_until_finished(timeout)
|
||||
if state == "present" and previous_server_status == Server.STATUS_RUNNING or state == "started":
|
||||
self.start_server()
|
||||
|
||||
self._mark_as_changed()
|
||||
|
||||
delete_protection = self.module.params.get("delete_protection")
|
||||
rebuild_protection = self.module.params.get("rebuild_protection")
|
||||
if (delete_protection is not None and rebuild_protection is not None) and (
|
||||
delete_protection != self.hcloud_server.protection["delete"] or rebuild_protection !=
|
||||
self.hcloud_server.protection["rebuild"]):
|
||||
if not self.module.check_mode:
|
||||
self.hcloud_server.change_protection(delete=delete_protection,
|
||||
rebuild=rebuild_protection).wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
self._get_server()
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
def _set_rescue_mode(self, rescue_mode):
|
||||
if self.module.params.get("ssh_keys"):
|
||||
resp = self.hcloud_server.enable_rescue(type=rescue_mode,
|
||||
ssh_keys=[self.client.ssh_keys.get_by_name(ssh_key_name).id
|
||||
for
|
||||
ssh_key_name in
|
||||
self.module.params.get("ssh_keys")])
|
||||
else:
|
||||
resp = self.hcloud_server.enable_rescue(type=rescue_mode)
|
||||
resp.action.wait_until_finished()
|
||||
self.result["root_password"] = resp.root_password
|
||||
|
||||
def start_server(self):
|
||||
try:
|
||||
if self.hcloud_server.status != Server.STATUS_RUNNING:
|
||||
if not self.module.check_mode:
|
||||
self.client.servers.power_on(self.hcloud_server).wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
self._get_server()
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
def stop_server(self):
|
||||
try:
|
||||
if self.hcloud_server.status != Server.STATUS_OFF:
|
||||
if not self.module.check_mode:
|
||||
self.client.servers.power_off(self.hcloud_server).wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
self._get_server()
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
def rebuild_server(self):
|
||||
self.module.fail_on_missing_params(
|
||||
required_params=["image"]
|
||||
)
|
||||
try:
|
||||
if not self.module.check_mode:
|
||||
self.client.servers.rebuild(self.hcloud_server, self.client.images.get_by_name(
|
||||
self.module.params.get("image"))).wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
|
||||
self._get_server()
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
def present_server(self):
|
||||
self._get_server()
|
||||
if self.hcloud_server is None:
|
||||
self._create_server()
|
||||
else:
|
||||
self._update_server()
|
||||
|
||||
def delete_server(self):
|
||||
try:
|
||||
self._get_server()
|
||||
if self.hcloud_server is not None:
|
||||
if not self.module.check_mode:
|
||||
self.client.servers.delete(self.hcloud_server).wait_until_finished()
|
||||
self._mark_as_changed()
|
||||
self.hcloud_server = None
|
||||
except APIException as e:
|
||||
self.module.fail_json(msg=e.message)
|
||||
|
||||
@staticmethod
|
||||
def define_module():
|
||||
return AnsibleModule(
|
||||
argument_spec=dict(
|
||||
id={"type": "int"},
|
||||
name={"type": "str"},
|
||||
image={"type": "str"},
|
||||
server_type={"type": "str"},
|
||||
location={"type": "str"},
|
||||
datacenter={"type": "str"},
|
||||
user_data={"type": "str"},
|
||||
ssh_keys={"type": "list"},
|
||||
volumes={"type": "list"},
|
||||
labels={"type": "dict"},
|
||||
backups={"type": "bool", "default": False},
|
||||
upgrade_disk={"type": "bool", "default": False},
|
||||
force_upgrade={"type": "bool", "default": False},
|
||||
rescue_mode={"type": "str"},
|
||||
delete_protection={"type": "bool"},
|
||||
rebuild_protection={"type": "bool"},
|
||||
state={
|
||||
"choices": ["absent", "present", "restarted", "started", "stopped", "rebuild"],
|
||||
"default": "present",
|
||||
},
|
||||
**Hcloud.base_module_arguments()
|
||||
),
|
||||
required_one_of=[['id', 'name']],
|
||||
mutually_exclusive=[["location", "datacenter"]],
|
||||
required_together=[["delete_protection", "rebuild_protection"]],
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleHcloudServer.define_module()
|
||||
|
||||
hcloud = AnsibleHcloudServer(module)
|
||||
state = module.params.get("state")
|
||||
if state == "absent":
|
||||
hcloud.delete_server()
|
||||
elif state == "present":
|
||||
hcloud.present_server()
|
||||
elif state == "started":
|
||||
hcloud.present_server()
|
||||
hcloud.start_server()
|
||||
elif state == "stopped":
|
||||
hcloud.present_server()
|
||||
hcloud.stop_server()
|
||||
elif state == "restarted":
|
||||
hcloud.present_server()
|
||||
hcloud.stop_server()
|
||||
hcloud.start_server()
|
||||
elif state == "rebuild":
|
||||
hcloud.present_server()
|
||||
hcloud.rebuild_server()
|
||||
|
||||
module.exit_json(**hcloud.get_result())
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
|
@ -1,134 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'certified'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: nios_txt_record
|
||||
version_added: "2.7"
|
||||
author: "Corey Wanless (@coreywan)"
|
||||
short_description: Configure Infoblox NIOS txt records
|
||||
description:
|
||||
- Adds and/or removes instances of txt record objects from
|
||||
Infoblox NIOS servers. This module manages NIOS C(record:txt) objects
|
||||
using the Infoblox WAPI interface over REST.
|
||||
requirements:
|
||||
- infoblox_client
|
||||
extends_documentation_fragment: nios
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- Specifies the fully qualified hostname to add or remove from
|
||||
the system
|
||||
required: true
|
||||
view:
|
||||
description:
|
||||
- Sets the DNS view to associate this tst record with. The DNS
|
||||
view must already be configured on the system
|
||||
required: true
|
||||
default: default
|
||||
aliases:
|
||||
- dns_view
|
||||
text:
|
||||
description:
|
||||
- Text associated with the record. It can contain up to 255 bytes
|
||||
per substring, up to a total of 512 bytes. To enter leading,
|
||||
trailing, or embedded spaces in the text, add quotes around the
|
||||
text to preserve the spaces.
|
||||
required: true
|
||||
ttl:
|
||||
description:
|
||||
- Configures the TTL to be associated with this tst record
|
||||
extattrs:
|
||||
description:
|
||||
- Allows for the configuration of Extensible Attributes on the
|
||||
instance of the object. This argument accepts a set of key / value
|
||||
pairs for configuration.
|
||||
comment:
|
||||
description:
|
||||
- Configures a text string comment to be associated with the instance
|
||||
of this object. The provided text string will be configured on the
|
||||
object instance.
|
||||
state:
|
||||
description:
|
||||
- Configures the intended state of the instance of the object on
|
||||
the NIOS server. When this value is set to C(present), the object
|
||||
is configured on the device and when this value is set to C(absent)
|
||||
the value is removed (if necessary) from the device.
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Ensure a text Record Exists
|
||||
nios_txt_record:
|
||||
name: fqdn.txt.record.com
|
||||
text: mytext
|
||||
state: present
|
||||
view: External
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
|
||||
- name: Ensure a text Record does not exist
|
||||
nios_txt_record:
|
||||
name: fqdn.txt.record.com
|
||||
text: mytext
|
||||
state: absent
|
||||
view: External
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.net_tools.nios.api import WapiModule
|
||||
|
||||
|
||||
def main():
|
||||
''' Main entry point for module execution
|
||||
'''
|
||||
|
||||
ib_spec = dict(
|
||||
name=dict(required=True, ib_req=True),
|
||||
view=dict(default='default', aliases=['dns_view'], ib_req=True),
|
||||
text=dict(ib_req=True),
|
||||
ttl=dict(type='int'),
|
||||
extattrs=dict(type='dict'),
|
||||
comment=dict(),
|
||||
)
|
||||
|
||||
argument_spec = dict(
|
||||
provider=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
|
||||
argument_spec.update(ib_spec)
|
||||
argument_spec.update(WapiModule.provider_spec)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True)
|
||||
|
||||
wapi = WapiModule(module)
|
||||
result = wapi.run('record:txt', ib_spec)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,228 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# Copyright (c) 2018 Red Hat, Inc.
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'certified'}
|
||||
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: nios_zone
|
||||
version_added: "2.5"
|
||||
author: "Peter Sprygada (@privateip)"
|
||||
short_description: Configure Infoblox NIOS DNS zones
|
||||
description:
|
||||
- Adds and/or removes instances of DNS zone objects from
|
||||
Infoblox NIOS servers. This module manages NIOS C(zone_auth) objects
|
||||
using the Infoblox WAPI interface over REST.
|
||||
requirements:
|
||||
- infoblox-client
|
||||
extends_documentation_fragment: nios
|
||||
options:
|
||||
fqdn:
|
||||
description:
|
||||
- Specifies the qualified domain name to either add or remove from
|
||||
the NIOS instance based on the configured C(state) value.
|
||||
required: true
|
||||
aliases:
|
||||
- name
|
||||
view:
|
||||
description:
|
||||
- Configures the DNS view name for the configured resource. The
|
||||
specified DNS zone must already exist on the running NIOS instance
|
||||
prior to configuring zones.
|
||||
required: true
|
||||
default: default
|
||||
aliases:
|
||||
- dns_view
|
||||
grid_primary:
|
||||
description:
|
||||
- Configures the grid primary servers for this zone.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the grid primary server
|
||||
grid_secondaries:
|
||||
description:
|
||||
- Configures the grid secondary servers for this zone.
|
||||
suboptions:
|
||||
name:
|
||||
description:
|
||||
- The name of the grid secondary server
|
||||
ns_group:
|
||||
version_added: "2.6"
|
||||
description:
|
||||
- Configures the name server group for this zone. Name server group is
|
||||
mutually exclusive with grid primary and grid secondaries.
|
||||
restart_if_needed:
|
||||
version_added: "2.6"
|
||||
description:
|
||||
- If set to true, causes the NIOS DNS service to restart and load the
|
||||
new zone configuration
|
||||
type: bool
|
||||
zone_format:
|
||||
version_added: "2.7"
|
||||
description:
|
||||
- Create an authorative Reverse-Mapping Zone which is an area of network
|
||||
space for which one or more name servers-primary and secondary-have the
|
||||
responsibility to respond to address-to-name queries. It supports
|
||||
reverse-mapping zones for both IPv4 and IPv6 addresses.
|
||||
default: FORWARD
|
||||
extattrs:
|
||||
description:
|
||||
- Allows for the configuration of Extensible Attributes on the
|
||||
instance of the object. This argument accepts a set of key / value
|
||||
pairs for configuration.
|
||||
comment:
|
||||
description:
|
||||
- Configures a text string comment to be associated with the instance
|
||||
of this object. The provided text string will be configured on the
|
||||
object instance.
|
||||
state:
|
||||
description:
|
||||
- Configures the intended state of the instance of the object on
|
||||
the NIOS server. When this value is set to C(present), the object
|
||||
is configured on the device and when this value is set to C(absent)
|
||||
the value is removed (if necessary) from the device.
|
||||
default: present
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: configure a zone on the system using grid primary and secondaries
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
grid_primary:
|
||||
- name: gridprimary.grid.com
|
||||
grid_secondaries:
|
||||
- name: gridsecondary1.grid.com
|
||||
- name: gridsecondary2.grid.com
|
||||
restart_if_needed: true
|
||||
state: present
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: configure a zone on the system using a name server group
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
ns_group: examplensg
|
||||
restart_if_needed: true
|
||||
state: present
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: configure a reverse mapping zone on the system using IPV4 zone format
|
||||
nios_zone:
|
||||
name: 10.10.10.0/24
|
||||
zone_format: IPV4
|
||||
state: present
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: configure a reverse mapping zone on the system using IPV6 zone format
|
||||
nios_zone:
|
||||
name: 100::1/128
|
||||
zone_format: IPV6
|
||||
state: present
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: update the comment and ext attributes for an existing zone
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
comment: this is an example comment
|
||||
extattrs:
|
||||
Site: west-dc
|
||||
state: present
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: remove the dns zone
|
||||
nios_zone:
|
||||
name: ansible.com
|
||||
state: absent
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
- name: remove the reverse mapping dns zone from the system with IPV4 zone format
|
||||
nios_zone:
|
||||
name: 10.10.10.0/24
|
||||
zone_format: IPV4
|
||||
state: absent
|
||||
provider:
|
||||
host: "{{ inventory_hostname_short }}"
|
||||
username: admin
|
||||
password: admin
|
||||
connection: local
|
||||
'''
|
||||
|
||||
RETURN = ''' # '''
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.net_tools.nios.api import WapiModule
|
||||
from ansible.module_utils.net_tools.nios.api import NIOS_ZONE
|
||||
|
||||
|
||||
def main():
|
||||
''' Main entry point for module execution
|
||||
'''
|
||||
grid_spec = dict(
|
||||
name=dict(required=True),
|
||||
)
|
||||
|
||||
ib_spec = dict(
|
||||
fqdn=dict(required=True, aliases=['name'], ib_req=True, update=False),
|
||||
zone_format=dict(default='FORWARD', aliases=['zone_format'], ib_req=False),
|
||||
view=dict(default='default', aliases=['dns_view'], ib_req=True),
|
||||
|
||||
grid_primary=dict(type='list', elements='dict', options=grid_spec),
|
||||
grid_secondaries=dict(type='list', elements='dict', options=grid_spec),
|
||||
ns_group=dict(),
|
||||
restart_if_needed=dict(type='bool'),
|
||||
|
||||
extattrs=dict(type='dict'),
|
||||
comment=dict()
|
||||
)
|
||||
|
||||
argument_spec = dict(
|
||||
provider=dict(required=True),
|
||||
state=dict(default='present', choices=['present', 'absent'])
|
||||
)
|
||||
|
||||
argument_spec.update(ib_spec)
|
||||
argument_spec.update(WapiModule.provider_spec)
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec,
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['ns_group', 'grid_primary'],
|
||||
['ns_group', 'grid_secondaries']
|
||||
])
|
||||
|
||||
wapi = WapiModule(module)
|
||||
result = wapi.run(NIOS_ZONE, ib_spec)
|
||||
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
File diff suppressed because it is too large
Load diff
|
@ -1,864 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016-2017, Yanis Guenane <yanis+ansible@guenane.org>
|
||||
# Copyright: (c) 2017, Markus Teufelberger <mteufelberger+ansible@mgit.at>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: openssl_certificate_info
|
||||
version_added: '2.8'
|
||||
short_description: Provide information of OpenSSL X.509 certificates
|
||||
description:
|
||||
- This module allows one to query information on OpenSSL certificates.
|
||||
- It uses the pyOpenSSL or cryptography python library to interact with OpenSSL. If both the
|
||||
cryptography and PyOpenSSL libraries are available (and meet the minimum version requirements)
|
||||
cryptography will be preferred as a backend over PyOpenSSL (unless the backend is forced with
|
||||
C(select_crypto_backend)). Please note that the PyOpenSSL backend was deprecated in Ansible 2.9
|
||||
and will be removed in Ansible 2.13.
|
||||
requirements:
|
||||
- PyOpenSSL >= 0.15 or cryptography >= 1.6
|
||||
author:
|
||||
- Felix Fontein (@felixfontein)
|
||||
- Yanis Guenane (@Spredzy)
|
||||
- Markus Teufelberger (@MarkusTeufelberger)
|
||||
options:
|
||||
path:
|
||||
description:
|
||||
- Remote absolute path where the certificate file is loaded from.
|
||||
- Either I(path) or I(content) must be specified, but not both.
|
||||
type: path
|
||||
content:
|
||||
description:
|
||||
- Content of the X.509 certificate in PEM format.
|
||||
- Either I(path) or I(content) must be specified, but not both.
|
||||
type: str
|
||||
version_added: "2.10"
|
||||
valid_at:
|
||||
description:
|
||||
- A dict of names mapping to time specifications. Every time specified here
|
||||
will be checked whether the certificate is valid at this point. See the
|
||||
C(valid_at) return value for informations on the result.
|
||||
- Time can be specified either as relative time or as absolute timestamp.
|
||||
- Time will always be interpreted as UTC.
|
||||
- Valid format is C([+-]timespec | ASN.1 TIME) where timespec can be an integer
|
||||
+ C([w | d | h | m | s]) (e.g. C(+32w1d2h), and ASN.1 TIME (i.e. pattern C(YYYYMMDDHHMMSSZ)).
|
||||
Note that all timestamps will be treated as being in UTC.
|
||||
type: dict
|
||||
select_crypto_backend:
|
||||
description:
|
||||
- Determines which crypto backend to use.
|
||||
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
|
||||
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
|
||||
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
|
||||
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
|
||||
From that point on, only the C(cryptography) backend will be available.
|
||||
type: str
|
||||
default: auto
|
||||
choices: [ auto, cryptography, pyopenssl ]
|
||||
|
||||
notes:
|
||||
- All timestamp values are provided in ASN.1 TIME format, i.e. following the C(YYYYMMDDHHMMSSZ) pattern.
|
||||
They are all in UTC.
|
||||
seealso:
|
||||
- module: openssl_certificate
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Generate a Self Signed OpenSSL certificate
|
||||
openssl_certificate:
|
||||
path: /etc/ssl/crt/ansible.com.crt
|
||||
privatekey_path: /etc/ssl/private/ansible.com.pem
|
||||
csr_path: /etc/ssl/csr/ansible.com.csr
|
||||
provider: selfsigned
|
||||
|
||||
|
||||
# Get information on the certificate
|
||||
|
||||
- name: Get information on generated certificate
|
||||
openssl_certificate_info:
|
||||
path: /etc/ssl/crt/ansible.com.crt
|
||||
register: result
|
||||
|
||||
- name: Dump information
|
||||
debug:
|
||||
var: result
|
||||
|
||||
|
||||
# Check whether the certificate is valid or not valid at certain times, fail
|
||||
# if this is not the case. The first task (openssl_certificate_info) collects
|
||||
# the information, and the second task (assert) validates the result and
|
||||
# makes the playbook fail in case something is not as expected.
|
||||
|
||||
- name: Test whether that certificate is valid tomorrow and/or in three weeks
|
||||
openssl_certificate_info:
|
||||
path: /etc/ssl/crt/ansible.com.crt
|
||||
valid_at:
|
||||
point_1: "+1d"
|
||||
point_2: "+3w"
|
||||
register: result
|
||||
|
||||
- name: Validate that certificate is valid tomorrow, but not in three weeks
|
||||
assert:
|
||||
that:
|
||||
- result.valid_at.point_1 # valid in one day
|
||||
- not result.valid_at.point_2 # not valid in three weeks
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
expired:
|
||||
description: Whether the certificate is expired (i.e. C(notAfter) is in the past)
|
||||
returned: success
|
||||
type: bool
|
||||
basic_constraints:
|
||||
description: Entries in the C(basic_constraints) extension, or C(none) if extension is not present.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: "[CA:TRUE, pathlen:1]"
|
||||
basic_constraints_critical:
|
||||
description: Whether the C(basic_constraints) extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
extended_key_usage:
|
||||
description: Entries in the C(extended_key_usage) extension, or C(none) if extension is not present.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: "[Biometric Info, DVCS, Time Stamping]"
|
||||
extended_key_usage_critical:
|
||||
description: Whether the C(extended_key_usage) extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
extensions_by_oid:
|
||||
description: Returns a dictionary for every extension OID
|
||||
returned: success
|
||||
type: dict
|
||||
contains:
|
||||
critical:
|
||||
description: Whether the extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
value:
|
||||
description: The Base64 encoded value (in DER format) of the extension
|
||||
returned: success
|
||||
type: str
|
||||
sample: "MAMCAQU="
|
||||
sample: '{"1.3.6.1.5.5.7.1.24": { "critical": false, "value": "MAMCAQU="}}'
|
||||
key_usage:
|
||||
description: Entries in the C(key_usage) extension, or C(none) if extension is not present.
|
||||
returned: success
|
||||
type: str
|
||||
sample: "[Key Agreement, Data Encipherment]"
|
||||
key_usage_critical:
|
||||
description: Whether the C(key_usage) extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
subject_alt_name:
|
||||
description: Entries in the C(subject_alt_name) extension, or C(none) if extension is not present.
|
||||
returned: success
|
||||
type: list
|
||||
elements: str
|
||||
sample: "[DNS:www.ansible.com, IP:1.2.3.4]"
|
||||
subject_alt_name_critical:
|
||||
description: Whether the C(subject_alt_name) extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
ocsp_must_staple:
|
||||
description: C(yes) if the OCSP Must Staple extension is present, C(none) otherwise.
|
||||
returned: success
|
||||
type: bool
|
||||
ocsp_must_staple_critical:
|
||||
description: Whether the C(ocsp_must_staple) extension is critical.
|
||||
returned: success
|
||||
type: bool
|
||||
issuer:
|
||||
description:
|
||||
- The certificate's issuer.
|
||||
- Note that for repeated values, only the last one will be returned.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{"organizationName": "Ansible", "commonName": "ca.example.com"}'
|
||||
issuer_ordered:
|
||||
description: The certificate's issuer as an ordered list of tuples.
|
||||
returned: success
|
||||
type: list
|
||||
elements: list
|
||||
sample: '[["organizationName", "Ansible"], ["commonName": "ca.example.com"]]'
|
||||
version_added: "2.9"
|
||||
subject:
|
||||
description:
|
||||
- The certificate's subject as a dictionary.
|
||||
- Note that for repeated values, only the last one will be returned.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: '{"commonName": "www.example.com", "emailAddress": "test@example.com"}'
|
||||
subject_ordered:
|
||||
description: The certificate's subject as an ordered list of tuples.
|
||||
returned: success
|
||||
type: list
|
||||
elements: list
|
||||
sample: '[["commonName", "www.example.com"], ["emailAddress": "test@example.com"]]'
|
||||
version_added: "2.9"
|
||||
not_after:
|
||||
description: C(notAfter) date as ASN.1 TIME
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190413202428Z
|
||||
not_before:
|
||||
description: C(notBefore) date as ASN.1 TIME
|
||||
returned: success
|
||||
type: str
|
||||
sample: 20190331202428Z
|
||||
public_key:
|
||||
description: Certificate's public key in PEM format
|
||||
returned: success
|
||||
type: str
|
||||
sample: "-----BEGIN PUBLIC KEY-----\nMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A..."
|
||||
public_key_fingerprints:
|
||||
description:
|
||||
- Fingerprints of certificate's public key.
|
||||
- For every hash algorithm available, the fingerprint is computed.
|
||||
returned: success
|
||||
type: dict
|
||||
sample: "{'sha256': 'd4:b3:aa:6d:c8:04:ce:4e:ba:f6:29:4d:92:a3:94:b0:c2:ff:bd:bf:33:63:11:43:34:0f:51:b0:95:09:2f:63',
|
||||
'sha512': 'f7:07:4a:f0:b0:f0:e6:8b:95:5f:f9:e6:61:0a:32:68:f1..."
|
||||
signature_algorithm:
|
||||
description: The signature algorithm used to sign the certificate.
|
||||
returned: success
|
||||
type: str
|
||||
sample: sha256WithRSAEncryption
|
||||
serial_number:
|
||||
description: The certificate's serial number.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 1234
|
||||
version:
|
||||
description: The certificate version.
|
||||
returned: success
|
||||
type: int
|
||||
sample: 3
|
||||
valid_at:
|
||||
description: For every time stamp provided in the I(valid_at) option, a
|
||||
boolean whether the certificate is valid at that point in time
|
||||
or not.
|
||||
returned: success
|
||||
type: dict
|
||||
subject_key_identifier:
|
||||
description:
|
||||
- The certificate's subject key identifier.
|
||||
- The identifier is returned in hexadecimal, with C(:) used to separate bytes.
|
||||
- Is C(none) if the C(SubjectKeyIdentifier) extension is not present.
|
||||
returned: success and if the pyOpenSSL backend is I(not) used
|
||||
type: str
|
||||
sample: '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33'
|
||||
version_added: "2.9"
|
||||
authority_key_identifier:
|
||||
description:
|
||||
- The certificate's authority key identifier.
|
||||
- The identifier is returned in hexadecimal, with C(:) used to separate bytes.
|
||||
- Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
|
||||
returned: success and if the pyOpenSSL backend is I(not) used
|
||||
type: str
|
||||
sample: '00:11:22:33:44:55:66:77:88:99:aa:bb:cc:dd:ee:ff:00:11:22:33'
|
||||
version_added: "2.9"
|
||||
authority_cert_issuer:
|
||||
description:
|
||||
- The certificate's authority cert issuer as a list of general names.
|
||||
- Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
|
||||
returned: success and if the pyOpenSSL backend is I(not) used
|
||||
type: list
|
||||
elements: str
|
||||
sample: "[DNS:www.ansible.com, IP:1.2.3.4]"
|
||||
version_added: "2.9"
|
||||
authority_cert_serial_number:
|
||||
description:
|
||||
- The certificate's authority cert serial number.
|
||||
- Is C(none) if the C(AuthorityKeyIdentifier) extension is not present.
|
||||
returned: success and if the pyOpenSSL backend is I(not) used
|
||||
type: int
|
||||
sample: '12345'
|
||||
version_added: "2.9"
|
||||
ocsp_uri:
|
||||
description: The OCSP responder URI, if included in the certificate. Will be
|
||||
C(none) if no OCSP responder URI is included.
|
||||
returned: success
|
||||
type: str
|
||||
version_added: "2.9"
|
||||
'''
|
||||
|
||||
|
||||
import abc
|
||||
import binascii
|
||||
import datetime
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
from ansible.module_utils import crypto as crypto_utils
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.six import string_types
|
||||
from ansible.module_utils._text import to_native, to_text, to_bytes
|
||||
from ansible.module_utils.compat import ipaddress as compat_ipaddress
|
||||
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION = '1.6'
|
||||
MINIMAL_PYOPENSSL_VERSION = '0.15'
|
||||
|
||||
PYOPENSSL_IMP_ERR = None
|
||||
try:
|
||||
import OpenSSL
|
||||
from OpenSSL import crypto
|
||||
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
|
||||
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER >= 0x10100000:
|
||||
# OpenSSL 1.1.0 or newer
|
||||
OPENSSL_MUST_STAPLE_NAME = b"tlsfeature"
|
||||
OPENSSL_MUST_STAPLE_VALUE = b"status_request"
|
||||
else:
|
||||
# OpenSSL 1.0.x or older
|
||||
OPENSSL_MUST_STAPLE_NAME = b"1.3.6.1.5.5.7.1.24"
|
||||
OPENSSL_MUST_STAPLE_VALUE = b"DER:30:03:02:01:05"
|
||||
except ImportError:
|
||||
PYOPENSSL_IMP_ERR = traceback.format_exc()
|
||||
PYOPENSSL_FOUND = False
|
||||
else:
|
||||
PYOPENSSL_FOUND = True
|
||||
|
||||
CRYPTOGRAPHY_IMP_ERR = None
|
||||
try:
|
||||
import cryptography
|
||||
from cryptography import x509
|
||||
from cryptography.hazmat.primitives import serialization
|
||||
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
|
||||
except ImportError:
|
||||
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
|
||||
CRYPTOGRAPHY_FOUND = False
|
||||
else:
|
||||
CRYPTOGRAPHY_FOUND = True
|
||||
|
||||
|
||||
TIMESTAMP_FORMAT = "%Y%m%d%H%M%SZ"
|
||||
|
||||
|
||||
class CertificateInfo(crypto_utils.OpenSSLObject):
|
||||
def __init__(self, module, backend):
|
||||
super(CertificateInfo, self).__init__(
|
||||
module.params['path'] or '',
|
||||
'present',
|
||||
False,
|
||||
module.check_mode,
|
||||
)
|
||||
self.backend = backend
|
||||
self.module = module
|
||||
self.content = module.params['content']
|
||||
if self.content is not None:
|
||||
self.content = self.content.encode('utf-8')
|
||||
|
||||
self.valid_at = module.params['valid_at']
|
||||
if self.valid_at:
|
||||
for k, v in self.valid_at.items():
|
||||
if not isinstance(v, string_types):
|
||||
self.module.fail_json(
|
||||
msg='The value for valid_at.{0} must be of type string (got {1})'.format(k, type(v))
|
||||
)
|
||||
self.valid_at[k] = crypto_utils.get_relative_time_option(v, 'valid_at.{0}'.format(k))
|
||||
|
||||
def generate(self):
|
||||
# Empty method because crypto_utils.OpenSSLObject wants this
|
||||
pass
|
||||
|
||||
def dump(self):
|
||||
# Empty method because crypto_utils.OpenSSLObject wants this
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_signature_algorithm(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_subject_ordered(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_issuer_ordered(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_version(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_key_usage(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_extended_key_usage(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_basic_constraints(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_ocsp_must_staple(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_subject_alt_name(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_not_before(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_not_after(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_public_key(self, binary):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_subject_key_identifier(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_authority_key_identifier(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_serial_number(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_all_extensions(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_ocsp_uri(self):
|
||||
pass
|
||||
|
||||
def get_info(self):
|
||||
result = dict()
|
||||
self.cert = crypto_utils.load_certificate(self.path, content=self.content, backend=self.backend)
|
||||
|
||||
result['signature_algorithm'] = self._get_signature_algorithm()
|
||||
subject = self._get_subject_ordered()
|
||||
issuer = self._get_issuer_ordered()
|
||||
result['subject'] = dict()
|
||||
for k, v in subject:
|
||||
result['subject'][k] = v
|
||||
result['subject_ordered'] = subject
|
||||
result['issuer'] = dict()
|
||||
for k, v in issuer:
|
||||
result['issuer'][k] = v
|
||||
result['issuer_ordered'] = issuer
|
||||
result['version'] = self._get_version()
|
||||
result['key_usage'], result['key_usage_critical'] = self._get_key_usage()
|
||||
result['extended_key_usage'], result['extended_key_usage_critical'] = self._get_extended_key_usage()
|
||||
result['basic_constraints'], result['basic_constraints_critical'] = self._get_basic_constraints()
|
||||
result['ocsp_must_staple'], result['ocsp_must_staple_critical'] = self._get_ocsp_must_staple()
|
||||
result['subject_alt_name'], result['subject_alt_name_critical'] = self._get_subject_alt_name()
|
||||
|
||||
not_before = self._get_not_before()
|
||||
not_after = self._get_not_after()
|
||||
result['not_before'] = not_before.strftime(TIMESTAMP_FORMAT)
|
||||
result['not_after'] = not_after.strftime(TIMESTAMP_FORMAT)
|
||||
result['expired'] = not_after < datetime.datetime.utcnow()
|
||||
|
||||
result['valid_at'] = dict()
|
||||
if self.valid_at:
|
||||
for k, v in self.valid_at.items():
|
||||
result['valid_at'][k] = not_before <= v <= not_after
|
||||
|
||||
result['public_key'] = self._get_public_key(binary=False)
|
||||
pk = self._get_public_key(binary=True)
|
||||
result['public_key_fingerprints'] = crypto_utils.get_fingerprint_of_bytes(pk) if pk is not None else dict()
|
||||
|
||||
if self.backend != 'pyopenssl':
|
||||
ski = self._get_subject_key_identifier()
|
||||
if ski is not None:
|
||||
ski = to_native(binascii.hexlify(ski))
|
||||
ski = ':'.join([ski[i:i + 2] for i in range(0, len(ski), 2)])
|
||||
result['subject_key_identifier'] = ski
|
||||
|
||||
aki, aci, acsn = self._get_authority_key_identifier()
|
||||
if aki is not None:
|
||||
aki = to_native(binascii.hexlify(aki))
|
||||
aki = ':'.join([aki[i:i + 2] for i in range(0, len(aki), 2)])
|
||||
result['authority_key_identifier'] = aki
|
||||
result['authority_cert_issuer'] = aci
|
||||
result['authority_cert_serial_number'] = acsn
|
||||
|
||||
result['serial_number'] = self._get_serial_number()
|
||||
result['extensions_by_oid'] = self._get_all_extensions()
|
||||
result['ocsp_uri'] = self._get_ocsp_uri()
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class CertificateInfoCryptography(CertificateInfo):
|
||||
"""Validate the supplied cert, using the cryptography backend"""
|
||||
def __init__(self, module):
|
||||
super(CertificateInfoCryptography, self).__init__(module, 'cryptography')
|
||||
|
||||
def _get_signature_algorithm(self):
|
||||
return crypto_utils.cryptography_oid_to_name(self.cert.signature_algorithm_oid)
|
||||
|
||||
def _get_subject_ordered(self):
|
||||
result = []
|
||||
for attribute in self.cert.subject:
|
||||
result.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
|
||||
return result
|
||||
|
||||
def _get_issuer_ordered(self):
|
||||
result = []
|
||||
for attribute in self.cert.issuer:
|
||||
result.append([crypto_utils.cryptography_oid_to_name(attribute.oid), attribute.value])
|
||||
return result
|
||||
|
||||
def _get_version(self):
|
||||
if self.cert.version == x509.Version.v1:
|
||||
return 1
|
||||
if self.cert.version == x509.Version.v3:
|
||||
return 3
|
||||
return "unknown"
|
||||
|
||||
def _get_key_usage(self):
|
||||
try:
|
||||
current_key_ext = self.cert.extensions.get_extension_for_class(x509.KeyUsage)
|
||||
current_key_usage = current_key_ext.value
|
||||
key_usage = dict(
|
||||
digital_signature=current_key_usage.digital_signature,
|
||||
content_commitment=current_key_usage.content_commitment,
|
||||
key_encipherment=current_key_usage.key_encipherment,
|
||||
data_encipherment=current_key_usage.data_encipherment,
|
||||
key_agreement=current_key_usage.key_agreement,
|
||||
key_cert_sign=current_key_usage.key_cert_sign,
|
||||
crl_sign=current_key_usage.crl_sign,
|
||||
encipher_only=False,
|
||||
decipher_only=False,
|
||||
)
|
||||
if key_usage['key_agreement']:
|
||||
key_usage.update(dict(
|
||||
encipher_only=current_key_usage.encipher_only,
|
||||
decipher_only=current_key_usage.decipher_only
|
||||
))
|
||||
|
||||
key_usage_names = dict(
|
||||
digital_signature='Digital Signature',
|
||||
content_commitment='Non Repudiation',
|
||||
key_encipherment='Key Encipherment',
|
||||
data_encipherment='Data Encipherment',
|
||||
key_agreement='Key Agreement',
|
||||
key_cert_sign='Certificate Sign',
|
||||
crl_sign='CRL Sign',
|
||||
encipher_only='Encipher Only',
|
||||
decipher_only='Decipher Only',
|
||||
)
|
||||
return sorted([
|
||||
key_usage_names[name] for name, value in key_usage.items() if value
|
||||
]), current_key_ext.critical
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, False
|
||||
|
||||
def _get_extended_key_usage(self):
|
||||
try:
|
||||
ext_keyusage_ext = self.cert.extensions.get_extension_for_class(x509.ExtendedKeyUsage)
|
||||
return sorted([
|
||||
crypto_utils.cryptography_oid_to_name(eku) for eku in ext_keyusage_ext.value
|
||||
]), ext_keyusage_ext.critical
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, False
|
||||
|
||||
def _get_basic_constraints(self):
|
||||
try:
|
||||
ext_keyusage_ext = self.cert.extensions.get_extension_for_class(x509.BasicConstraints)
|
||||
result = []
|
||||
result.append('CA:{0}'.format('TRUE' if ext_keyusage_ext.value.ca else 'FALSE'))
|
||||
if ext_keyusage_ext.value.path_length is not None:
|
||||
result.append('pathlen:{0}'.format(ext_keyusage_ext.value.path_length))
|
||||
return sorted(result), ext_keyusage_ext.critical
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, False
|
||||
|
||||
def _get_ocsp_must_staple(self):
|
||||
try:
|
||||
try:
|
||||
# This only works with cryptography >= 2.1
|
||||
tlsfeature_ext = self.cert.extensions.get_extension_for_class(x509.TLSFeature)
|
||||
value = cryptography.x509.TLSFeatureType.status_request in tlsfeature_ext.value
|
||||
except AttributeError as dummy:
|
||||
# Fallback for cryptography < 2.1
|
||||
oid = x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.1.24")
|
||||
tlsfeature_ext = self.cert.extensions.get_extension_for_oid(oid)
|
||||
value = tlsfeature_ext.value.value == b"\x30\x03\x02\x01\x05"
|
||||
return value, tlsfeature_ext.critical
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, False
|
||||
|
||||
def _get_subject_alt_name(self):
|
||||
try:
|
||||
san_ext = self.cert.extensions.get_extension_for_class(x509.SubjectAlternativeName)
|
||||
result = [crypto_utils.cryptography_decode_name(san) for san in san_ext.value]
|
||||
return result, san_ext.critical
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, False
|
||||
|
||||
def _get_not_before(self):
|
||||
return self.cert.not_valid_before
|
||||
|
||||
def _get_not_after(self):
|
||||
return self.cert.not_valid_after
|
||||
|
||||
def _get_public_key(self, binary):
|
||||
return self.cert.public_key().public_bytes(
|
||||
serialization.Encoding.DER if binary else serialization.Encoding.PEM,
|
||||
serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
|
||||
def _get_subject_key_identifier(self):
|
||||
try:
|
||||
ext = self.cert.extensions.get_extension_for_class(x509.SubjectKeyIdentifier)
|
||||
return ext.value.digest
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None
|
||||
|
||||
def _get_authority_key_identifier(self):
|
||||
try:
|
||||
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityKeyIdentifier)
|
||||
issuer = None
|
||||
if ext.value.authority_cert_issuer is not None:
|
||||
issuer = [crypto_utils.cryptography_decode_name(san) for san in ext.value.authority_cert_issuer]
|
||||
return ext.value.key_identifier, issuer, ext.value.authority_cert_serial_number
|
||||
except cryptography.x509.ExtensionNotFound:
|
||||
return None, None, None
|
||||
|
||||
def _get_serial_number(self):
|
||||
return self.cert.serial_number
|
||||
|
||||
def _get_all_extensions(self):
|
||||
return crypto_utils.cryptography_get_extensions_from_cert(self.cert)
|
||||
|
||||
def _get_ocsp_uri(self):
|
||||
try:
|
||||
ext = self.cert.extensions.get_extension_for_class(x509.AuthorityInformationAccess)
|
||||
for desc in ext.value:
|
||||
if desc.access_method == x509.oid.AuthorityInformationAccessOID.OCSP:
|
||||
if isinstance(desc.access_location, x509.UniformResourceIdentifier):
|
||||
return desc.access_location.value
|
||||
except x509.ExtensionNotFound as dummy:
|
||||
pass
|
||||
return None
|
||||
|
||||
|
||||
class CertificateInfoPyOpenSSL(CertificateInfo):
|
||||
"""validate the supplied certificate."""
|
||||
|
||||
def __init__(self, module):
|
||||
super(CertificateInfoPyOpenSSL, self).__init__(module, 'pyopenssl')
|
||||
|
||||
def _get_signature_algorithm(self):
|
||||
return to_text(self.cert.get_signature_algorithm())
|
||||
|
||||
def __get_name(self, name):
|
||||
result = []
|
||||
for sub in name.get_components():
|
||||
result.append([crypto_utils.pyopenssl_normalize_name(sub[0]), to_text(sub[1])])
|
||||
return result
|
||||
|
||||
def _get_subject_ordered(self):
|
||||
return self.__get_name(self.cert.get_subject())
|
||||
|
||||
def _get_issuer_ordered(self):
|
||||
return self.__get_name(self.cert.get_issuer())
|
||||
|
||||
def _get_version(self):
|
||||
# Version numbers in certs are off by one:
|
||||
# v1: 0, v2: 1, v3: 2 ...
|
||||
return self.cert.get_version() + 1
|
||||
|
||||
def _get_extension(self, short_name):
|
||||
for extension_idx in range(0, self.cert.get_extension_count()):
|
||||
extension = self.cert.get_extension(extension_idx)
|
||||
if extension.get_short_name() == short_name:
|
||||
result = [
|
||||
crypto_utils.pyopenssl_normalize_name(usage.strip()) for usage in to_text(extension, errors='surrogate_or_strict').split(',')
|
||||
]
|
||||
return sorted(result), bool(extension.get_critical())
|
||||
return None, False
|
||||
|
||||
def _get_key_usage(self):
|
||||
return self._get_extension(b'keyUsage')
|
||||
|
||||
def _get_extended_key_usage(self):
|
||||
return self._get_extension(b'extendedKeyUsage')
|
||||
|
||||
def _get_basic_constraints(self):
|
||||
return self._get_extension(b'basicConstraints')
|
||||
|
||||
def _get_ocsp_must_staple(self):
|
||||
extensions = [self.cert.get_extension(i) for i in range(0, self.cert.get_extension_count())]
|
||||
oms_ext = [
|
||||
ext for ext in extensions
|
||||
if to_bytes(ext.get_short_name()) == OPENSSL_MUST_STAPLE_NAME and to_bytes(ext) == OPENSSL_MUST_STAPLE_VALUE
|
||||
]
|
||||
if OpenSSL.SSL.OPENSSL_VERSION_NUMBER < 0x10100000:
|
||||
# Older versions of libssl don't know about OCSP Must Staple
|
||||
oms_ext.extend([ext for ext in extensions if ext.get_short_name() == b'UNDEF' and ext.get_data() == b'\x30\x03\x02\x01\x05'])
|
||||
if oms_ext:
|
||||
return True, bool(oms_ext[0].get_critical())
|
||||
else:
|
||||
return None, False
|
||||
|
||||
def _normalize_san(self, san):
|
||||
if san.startswith('IP Address:'):
|
||||
san = 'IP:' + san[len('IP Address:'):]
|
||||
if san.startswith('IP:'):
|
||||
ip = compat_ipaddress.ip_address(san[3:])
|
||||
san = 'IP:{0}'.format(ip.compressed)
|
||||
return san
|
||||
|
||||
def _get_subject_alt_name(self):
|
||||
for extension_idx in range(0, self.cert.get_extension_count()):
|
||||
extension = self.cert.get_extension(extension_idx)
|
||||
if extension.get_short_name() == b'subjectAltName':
|
||||
result = [self._normalize_san(altname.strip()) for altname in
|
||||
to_text(extension, errors='surrogate_or_strict').split(', ')]
|
||||
return result, bool(extension.get_critical())
|
||||
return None, False
|
||||
|
||||
def _get_not_before(self):
|
||||
time_string = to_native(self.cert.get_notBefore())
|
||||
return datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
|
||||
|
||||
def _get_not_after(self):
|
||||
time_string = to_native(self.cert.get_notAfter())
|
||||
return datetime.datetime.strptime(time_string, "%Y%m%d%H%M%SZ")
|
||||
|
||||
def _get_public_key(self, binary):
|
||||
try:
|
||||
return crypto.dump_publickey(
|
||||
crypto.FILETYPE_ASN1 if binary else crypto.FILETYPE_PEM,
|
||||
self.cert.get_pubkey()
|
||||
)
|
||||
except AttributeError:
|
||||
try:
|
||||
# pyOpenSSL < 16.0:
|
||||
bio = crypto._new_mem_buf()
|
||||
if binary:
|
||||
rc = crypto._lib.i2d_PUBKEY_bio(bio, self.cert.get_pubkey()._pkey)
|
||||
else:
|
||||
rc = crypto._lib.PEM_write_bio_PUBKEY(bio, self.cert.get_pubkey()._pkey)
|
||||
if rc != 1:
|
||||
crypto._raise_current_error()
|
||||
return crypto._bio_to_string(bio)
|
||||
except AttributeError:
|
||||
self.module.warn('Your pyOpenSSL version does not support dumping public keys. '
|
||||
'Please upgrade to version 16.0 or newer, or use the cryptography backend.')
|
||||
|
||||
def _get_subject_key_identifier(self):
|
||||
# Won't be implemented
|
||||
return None
|
||||
|
||||
def _get_authority_key_identifier(self):
|
||||
# Won't be implemented
|
||||
return None, None, None
|
||||
|
||||
def _get_serial_number(self):
|
||||
return self.cert.get_serial_number()
|
||||
|
||||
def _get_all_extensions(self):
|
||||
return crypto_utils.pyopenssl_get_extensions_from_cert(self.cert)
|
||||
|
||||
def _get_ocsp_uri(self):
|
||||
for i in range(self.cert.get_extension_count()):
|
||||
ext = self.cert.get_extension(i)
|
||||
if ext.get_short_name() == b'authorityInfoAccess':
|
||||
v = str(ext)
|
||||
m = re.search('^OCSP - URI:(.*)$', v, flags=re.MULTILINE)
|
||||
if m:
|
||||
return m.group(1)
|
||||
return None
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
path=dict(type='path'),
|
||||
content=dict(type='str'),
|
||||
valid_at=dict(type='dict'),
|
||||
select_crypto_backend=dict(type='str', default='auto', choices=['auto', 'cryptography', 'pyopenssl']),
|
||||
),
|
||||
required_one_of=(
|
||||
['path', 'content'],
|
||||
),
|
||||
mutually_exclusive=(
|
||||
['path', 'content'],
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
try:
|
||||
if module.params['path'] is not None:
|
||||
base_dir = os.path.dirname(module.params['path']) or '.'
|
||||
if not os.path.isdir(base_dir):
|
||||
module.fail_json(
|
||||
name=base_dir,
|
||||
msg='The directory %s does not exist or the file is not a directory' % base_dir
|
||||
)
|
||||
|
||||
backend = module.params['select_crypto_backend']
|
||||
if backend == 'auto':
|
||||
# Detect what backend we can use
|
||||
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
|
||||
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
|
||||
|
||||
# If cryptography is available we'll use it
|
||||
if can_use_cryptography:
|
||||
backend = 'cryptography'
|
||||
elif can_use_pyopenssl:
|
||||
backend = 'pyopenssl'
|
||||
|
||||
# Fail if no backend has been found
|
||||
if backend == 'auto':
|
||||
module.fail_json(msg=("Can't detect any of the required Python libraries "
|
||||
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION,
|
||||
MINIMAL_PYOPENSSL_VERSION))
|
||||
|
||||
if backend == 'pyopenssl':
|
||||
if not PYOPENSSL_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
|
||||
exception=PYOPENSSL_IMP_ERR)
|
||||
try:
|
||||
getattr(crypto.X509Req, 'get_extensions')
|
||||
except AttributeError:
|
||||
module.fail_json(msg='You need to have PyOpenSSL>=0.15')
|
||||
|
||||
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
|
||||
version='2.13', collection_name='ansible.builtin')
|
||||
certificate = CertificateInfoPyOpenSSL(module)
|
||||
elif backend == 'cryptography':
|
||||
if not CRYPTOGRAPHY_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
|
||||
exception=CRYPTOGRAPHY_IMP_ERR)
|
||||
certificate = CertificateInfoCryptography(module)
|
||||
|
||||
result = certificate.get_info()
|
||||
module.exit_json(**result)
|
||||
except crypto_utils.OpenSSLObjectError as exc:
|
||||
module.fail_json(msg=to_native(exc))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
File diff suppressed because it is too large
Load diff
|
@ -1,944 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2016, Yanis Guenane <yanis+ansible@guenane.org>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: openssl_privatekey
|
||||
version_added: "2.3"
|
||||
short_description: Generate OpenSSL private keys
|
||||
description:
|
||||
- This module allows one to (re)generate OpenSSL private keys.
|
||||
- One can generate L(RSA,https://en.wikipedia.org/wiki/RSA_%28cryptosystem%29),
|
||||
L(DSA,https://en.wikipedia.org/wiki/Digital_Signature_Algorithm),
|
||||
L(ECC,https://en.wikipedia.org/wiki/Elliptic-curve_cryptography) or
|
||||
L(EdDSA,https://en.wikipedia.org/wiki/EdDSA) private keys.
|
||||
- Keys are generated in PEM format.
|
||||
- "Please note that the module regenerates private keys if they don't match
|
||||
the module's options. In particular, if you provide another passphrase
|
||||
(or specify none), change the keysize, etc., the private key will be
|
||||
regenerated. If you are concerned that this could **overwrite your private key**,
|
||||
consider using the I(backup) option."
|
||||
- The module can use the cryptography Python library, or the pyOpenSSL Python
|
||||
library. By default, it tries to detect which one is available. This can be
|
||||
overridden with the I(select_crypto_backend) option. Please note that the
|
||||
PyOpenSSL backend was deprecated in Ansible 2.9 and will be removed in Ansible 2.13."
|
||||
requirements:
|
||||
- Either cryptography >= 1.2.3 (older versions might work as well)
|
||||
- Or pyOpenSSL
|
||||
author:
|
||||
- Yanis Guenane (@Spredzy)
|
||||
- Felix Fontein (@felixfontein)
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- Whether the private key should exist or not, taking action if the state is different from what is stated.
|
||||
type: str
|
||||
default: present
|
||||
choices: [ absent, present ]
|
||||
size:
|
||||
description:
|
||||
- Size (in bits) of the TLS/SSL key to generate.
|
||||
type: int
|
||||
default: 4096
|
||||
type:
|
||||
description:
|
||||
- The algorithm used to generate the TLS/SSL private key.
|
||||
- Note that C(ECC), C(X25519), C(X448), C(Ed25519) and C(Ed448) require the C(cryptography) backend.
|
||||
C(X25519) needs cryptography 2.5 or newer, while C(X448), C(Ed25519) and C(Ed448) require
|
||||
cryptography 2.6 or newer. For C(ECC), the minimal cryptography version required depends on the
|
||||
I(curve) option.
|
||||
type: str
|
||||
default: RSA
|
||||
choices: [ DSA, ECC, Ed25519, Ed448, RSA, X25519, X448 ]
|
||||
curve:
|
||||
description:
|
||||
- Note that not all curves are supported by all versions of C(cryptography).
|
||||
- For maximal interoperability, C(secp384r1) or C(secp256r1) should be used.
|
||||
- We use the curve names as defined in the
|
||||
L(IANA registry for TLS,https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-8).
|
||||
type: str
|
||||
choices:
|
||||
- secp384r1
|
||||
- secp521r1
|
||||
- secp224r1
|
||||
- secp192r1
|
||||
- secp256r1
|
||||
- secp256k1
|
||||
- brainpoolP256r1
|
||||
- brainpoolP384r1
|
||||
- brainpoolP512r1
|
||||
- sect571k1
|
||||
- sect409k1
|
||||
- sect283k1
|
||||
- sect233k1
|
||||
- sect163k1
|
||||
- sect571r1
|
||||
- sect409r1
|
||||
- sect283r1
|
||||
- sect233r1
|
||||
- sect163r2
|
||||
version_added: "2.8"
|
||||
force:
|
||||
description:
|
||||
- Should the key be regenerated even if it already exists.
|
||||
type: bool
|
||||
default: no
|
||||
path:
|
||||
description:
|
||||
- Name of the file in which the generated TLS/SSL private key will be written. It will have 0600 mode.
|
||||
type: path
|
||||
required: true
|
||||
passphrase:
|
||||
description:
|
||||
- The passphrase for the private key.
|
||||
type: str
|
||||
version_added: "2.4"
|
||||
cipher:
|
||||
description:
|
||||
- The cipher to encrypt the private key. (Valid values can be found by
|
||||
running `openssl list -cipher-algorithms` or `openssl list-cipher-algorithms`,
|
||||
depending on your OpenSSL version.)
|
||||
- When using the C(cryptography) backend, use C(auto).
|
||||
type: str
|
||||
version_added: "2.4"
|
||||
select_crypto_backend:
|
||||
description:
|
||||
- Determines which crypto backend to use.
|
||||
- The default choice is C(auto), which tries to use C(cryptography) if available, and falls back to C(pyopenssl).
|
||||
- If set to C(pyopenssl), will try to use the L(pyOpenSSL,https://pypi.org/project/pyOpenSSL/) library.
|
||||
- If set to C(cryptography), will try to use the L(cryptography,https://cryptography.io/) library.
|
||||
- Please note that the C(pyopenssl) backend has been deprecated in Ansible 2.9, and will be removed in Ansible 2.13.
|
||||
From that point on, only the C(cryptography) backend will be available.
|
||||
type: str
|
||||
default: auto
|
||||
choices: [ auto, cryptography, pyopenssl ]
|
||||
version_added: "2.8"
|
||||
format:
|
||||
description:
|
||||
- Determines which format the private key is written in. By default, PKCS1 (traditional OpenSSL format)
|
||||
is used for all keys which support it. Please note that not every key can be exported in any format.
|
||||
- The value C(auto) selects a fromat based on the key format. The value C(auto_ignore) does the same,
|
||||
but for existing private key files, it will not force a regenerate when its format is not the automatically
|
||||
selected one for generation.
|
||||
- Note that if the format for an existing private key mismatches, the key is *regenerated* by default.
|
||||
To change this behavior, use the I(format_mismatch) option.
|
||||
- The I(format) option is only supported by the C(cryptography) backend. The C(pyopenssl) backend will
|
||||
fail if a value different from C(auto_ignore) is used.
|
||||
type: str
|
||||
default: auto_ignore
|
||||
choices: [ pkcs1, pkcs8, raw, auto, auto_ignore ]
|
||||
version_added: "2.10"
|
||||
format_mismatch:
|
||||
description:
|
||||
- Determines behavior of the module if the format of a private key does not match the expected format, but all
|
||||
other parameters are as expected.
|
||||
- If set to C(regenerate) (default), generates a new private key.
|
||||
- If set to C(convert), the key will be converted to the new format instead.
|
||||
- Only supported by the C(cryptography) backend.
|
||||
type: str
|
||||
default: regenerate
|
||||
choices: [ regenerate, convert ]
|
||||
version_added: "2.10"
|
||||
backup:
|
||||
description:
|
||||
- Create a backup file including a timestamp so you can get
|
||||
the original private key back if you overwrote it with a new one by accident.
|
||||
type: bool
|
||||
default: no
|
||||
version_added: "2.8"
|
||||
return_content:
|
||||
description:
|
||||
- If set to C(yes), will return the (current or generated) private key's content as I(privatekey).
|
||||
- Note that especially if the private key is not encrypted, you have to make sure that the returned
|
||||
value is treated appropriately and not accidentally written to logs etc.! Use with care!
|
||||
type: bool
|
||||
default: no
|
||||
version_added: "2.10"
|
||||
regenerate:
|
||||
description:
|
||||
- Allows to configure in which situations the module is allowed to regenerate private keys.
|
||||
The module will always generate a new key if the destination file does not exist.
|
||||
- By default, the key will be regenerated when it doesn't match the module's options,
|
||||
except when the key cannot be read or the passphrase does not match. Please note that
|
||||
this B(changed) for Ansible 2.10. For Ansible 2.9, the behavior was as if C(full_idempotence)
|
||||
is specified.
|
||||
- If set to C(never), the module will fail if the key cannot be read or the passphrase
|
||||
isn't matching, and will never regenerate an existing key.
|
||||
- If set to C(fail), the module will fail if the key does not correspond to the module's
|
||||
options.
|
||||
- If set to C(partial_idempotence), the key will be regenerated if it does not conform to
|
||||
the module's options. The key is B(not) regenerated if it cannot be read (broken file),
|
||||
the key is protected by an unknown passphrase, or when they key is not protected by a
|
||||
passphrase, but a passphrase is specified.
|
||||
- If set to C(full_idempotence), the key will be regenerated if it does not conform to the
|
||||
module's options. This is also the case if the key cannot be read (broken file), the key
|
||||
is protected by an unknown passphrase, or when they key is not protected by a passphrase,
|
||||
but a passphrase is specified. Make sure you have a B(backup) when using this option!
|
||||
- If set to C(always), the module will always regenerate the key. This is equivalent to
|
||||
setting I(force) to C(yes).
|
||||
- Note that if I(format_mismatch) is set to C(convert) and everything matches except the
|
||||
format, the key will always be converted, except if I(regenerate) is set to C(always).
|
||||
type: str
|
||||
choices:
|
||||
- never
|
||||
- fail
|
||||
- partial_idempotence
|
||||
- full_idempotence
|
||||
- always
|
||||
default: full_idempotence
|
||||
version_added: '2.10'
|
||||
extends_documentation_fragment:
|
||||
- files
|
||||
seealso:
|
||||
- module: openssl_certificate
|
||||
- module: openssl_csr
|
||||
- module: openssl_dhparam
|
||||
- module: openssl_pkcs12
|
||||
- module: openssl_publickey
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA)
|
||||
openssl_privatekey:
|
||||
path: /etc/ssl/private/ansible.com.pem
|
||||
|
||||
- name: Generate an OpenSSL private key with the default values (4096 bits, RSA) and a passphrase
|
||||
openssl_privatekey:
|
||||
path: /etc/ssl/private/ansible.com.pem
|
||||
passphrase: ansible
|
||||
cipher: aes256
|
||||
|
||||
- name: Generate an OpenSSL private key with a different size (2048 bits)
|
||||
openssl_privatekey:
|
||||
path: /etc/ssl/private/ansible.com.pem
|
||||
size: 2048
|
||||
|
||||
- name: Force regenerate an OpenSSL private key if it already exists
|
||||
openssl_privatekey:
|
||||
path: /etc/ssl/private/ansible.com.pem
|
||||
force: yes
|
||||
|
||||
- name: Generate an OpenSSL private key with a different algorithm (DSA)
|
||||
openssl_privatekey:
|
||||
path: /etc/ssl/private/ansible.com.pem
|
||||
type: DSA
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
size:
|
||||
description: Size (in bits) of the TLS/SSL private key.
|
||||
returned: changed or success
|
||||
type: int
|
||||
sample: 4096
|
||||
type:
|
||||
description: Algorithm used to generate the TLS/SSL private key.
|
||||
returned: changed or success
|
||||
type: str
|
||||
sample: RSA
|
||||
curve:
|
||||
description: Elliptic curve used to generate the TLS/SSL private key.
|
||||
returned: changed or success, and I(type) is C(ECC)
|
||||
type: str
|
||||
sample: secp256r1
|
||||
filename:
|
||||
description: Path to the generated TLS/SSL private key file.
|
||||
returned: changed or success
|
||||
type: str
|
||||
sample: /etc/ssl/private/ansible.com.pem
|
||||
fingerprint:
|
||||
description:
|
||||
- The fingerprint of the public key. Fingerprint will be generated for each C(hashlib.algorithms) available.
|
||||
- The PyOpenSSL backend requires PyOpenSSL >= 16.0 for meaningful output.
|
||||
returned: changed or success
|
||||
type: dict
|
||||
sample:
|
||||
md5: "84:75:71:72:8d:04:b5:6c:4d:37:6d:66:83:f5:4c:29"
|
||||
sha1: "51:cc:7c:68:5d:eb:41:43:88:7e:1a:ae:c7:f8:24:72:ee:71:f6:10"
|
||||
sha224: "b1:19:a6:6c:14:ac:33:1d:ed:18:50:d3:06:5c:b2:32:91:f1:f1:52:8c:cb:d5:75:e9:f5:9b:46"
|
||||
sha256: "41:ab:c7:cb:d5:5f:30:60:46:99:ac:d4:00:70:cf:a1:76:4f:24:5d:10:24:57:5d:51:6e:09:97:df:2f:de:c7"
|
||||
sha384: "85:39:50:4e:de:d9:19:33:40:70:ae:10:ab:59:24:19:51:c3:a2:e4:0b:1c:b1:6e:dd:b3:0c:d9:9e:6a:46:af:da:18:f8:ef:ae:2e:c0:9a:75:2c:9b:b3:0f:3a:5f:3d"
|
||||
sha512: "fd:ed:5e:39:48:5f:9f:fe:7f:25:06:3f:79:08:cd:ee:a5:e7:b3:3d:13:82:87:1f:84:e1:f5:c7:28:77:53:94:86:56:38:69:f0:d9:35:22:01:1e:a6:60:...:0f:9b"
|
||||
backup_file:
|
||||
description: Name of backup file created.
|
||||
returned: changed and if I(backup) is C(yes)
|
||||
type: str
|
||||
sample: /path/to/privatekey.pem.2019-03-09@11:22~
|
||||
privatekey:
|
||||
description:
|
||||
- The (current or generated) private key's content.
|
||||
- Will be Base64-encoded if the key is in raw format.
|
||||
returned: if I(state) is C(present) and I(return_content) is C(yes)
|
||||
type: str
|
||||
version_added: "2.10"
|
||||
'''
|
||||
|
||||
import abc
|
||||
import base64
|
||||
import os
|
||||
import traceback
|
||||
from distutils.version import LooseVersion
|
||||
|
||||
MINIMAL_PYOPENSSL_VERSION = '0.6'
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION = '1.2.3'
|
||||
|
||||
PYOPENSSL_IMP_ERR = None
|
||||
try:
|
||||
import OpenSSL
|
||||
from OpenSSL import crypto
|
||||
PYOPENSSL_VERSION = LooseVersion(OpenSSL.__version__)
|
||||
except ImportError:
|
||||
PYOPENSSL_IMP_ERR = traceback.format_exc()
|
||||
PYOPENSSL_FOUND = False
|
||||
else:
|
||||
PYOPENSSL_FOUND = True
|
||||
|
||||
CRYPTOGRAPHY_IMP_ERR = None
|
||||
try:
|
||||
import cryptography
|
||||
import cryptography.exceptions
|
||||
import cryptography.hazmat.backends
|
||||
import cryptography.hazmat.primitives.serialization
|
||||
import cryptography.hazmat.primitives.asymmetric.rsa
|
||||
import cryptography.hazmat.primitives.asymmetric.dsa
|
||||
import cryptography.hazmat.primitives.asymmetric.ec
|
||||
import cryptography.hazmat.primitives.asymmetric.utils
|
||||
CRYPTOGRAPHY_VERSION = LooseVersion(cryptography.__version__)
|
||||
except ImportError:
|
||||
CRYPTOGRAPHY_IMP_ERR = traceback.format_exc()
|
||||
CRYPTOGRAPHY_FOUND = False
|
||||
else:
|
||||
CRYPTOGRAPHY_FOUND = True
|
||||
|
||||
from ansible.module_utils.crypto import (
|
||||
CRYPTOGRAPHY_HAS_X25519,
|
||||
CRYPTOGRAPHY_HAS_X25519_FULL,
|
||||
CRYPTOGRAPHY_HAS_X448,
|
||||
CRYPTOGRAPHY_HAS_ED25519,
|
||||
CRYPTOGRAPHY_HAS_ED448,
|
||||
)
|
||||
|
||||
from ansible.module_utils import crypto as crypto_utils
|
||||
from ansible.module_utils._text import to_native, to_bytes
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
|
||||
|
||||
class PrivateKeyError(crypto_utils.OpenSSLObjectError):
|
||||
pass
|
||||
|
||||
|
||||
class PrivateKeyBase(crypto_utils.OpenSSLObject):
|
||||
|
||||
def __init__(self, module):
|
||||
super(PrivateKeyBase, self).__init__(
|
||||
module.params['path'],
|
||||
module.params['state'],
|
||||
module.params['force'],
|
||||
module.check_mode
|
||||
)
|
||||
self.size = module.params['size']
|
||||
self.passphrase = module.params['passphrase']
|
||||
self.cipher = module.params['cipher']
|
||||
self.privatekey = None
|
||||
self.fingerprint = {}
|
||||
self.format = module.params['format']
|
||||
self.format_mismatch = module.params['format_mismatch']
|
||||
self.privatekey_bytes = None
|
||||
self.return_content = module.params['return_content']
|
||||
self.regenerate = module.params['regenerate']
|
||||
if self.regenerate == 'always':
|
||||
self.force = True
|
||||
|
||||
self.backup = module.params['backup']
|
||||
self.backup_file = None
|
||||
|
||||
if module.params['mode'] is None:
|
||||
module.params['mode'] = '0600'
|
||||
|
||||
@abc.abstractmethod
|
||||
def _generate_private_key(self):
|
||||
"""(Re-)Generate private key."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _ensure_private_key_loaded(self):
|
||||
"""Make sure that the private key has been loaded."""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_private_key_data(self):
|
||||
"""Return bytes for self.privatekey"""
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _get_fingerprint(self):
|
||||
pass
|
||||
|
||||
def generate(self, module):
|
||||
"""Generate a keypair."""
|
||||
|
||||
if not self.check(module, perms_required=False, ignore_conversion=True) or self.force:
|
||||
# Regenerate
|
||||
if self.backup:
|
||||
self.backup_file = module.backup_local(self.path)
|
||||
self._generate_private_key()
|
||||
privatekey_data = self._get_private_key_data()
|
||||
if self.return_content:
|
||||
self.privatekey_bytes = privatekey_data
|
||||
crypto_utils.write_file(module, privatekey_data, 0o600)
|
||||
self.changed = True
|
||||
elif not self.check(module, perms_required=False, ignore_conversion=False):
|
||||
# Convert
|
||||
if self.backup:
|
||||
self.backup_file = module.backup_local(self.path)
|
||||
self._ensure_private_key_loaded()
|
||||
privatekey_data = self._get_private_key_data()
|
||||
if self.return_content:
|
||||
self.privatekey_bytes = privatekey_data
|
||||
crypto_utils.write_file(module, privatekey_data, 0o600)
|
||||
self.changed = True
|
||||
|
||||
self.fingerprint = self._get_fingerprint()
|
||||
file_args = module.load_file_common_arguments(module.params)
|
||||
if module.set_fs_attributes_if_different(file_args, False):
|
||||
self.changed = True
|
||||
|
||||
def remove(self, module):
|
||||
if self.backup:
|
||||
self.backup_file = module.backup_local(self.path)
|
||||
super(PrivateKeyBase, self).remove(module)
|
||||
|
||||
@abc.abstractmethod
|
||||
def _check_passphrase(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _check_size_and_type(self):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def _check_format(self):
|
||||
pass
|
||||
|
||||
def check(self, module, perms_required=True, ignore_conversion=True):
|
||||
"""Ensure the resource is in its desired state."""
|
||||
|
||||
state_and_perms = super(PrivateKeyBase, self).check(module, perms_required=False)
|
||||
|
||||
if not state_and_perms:
|
||||
# key does not exist
|
||||
return False
|
||||
|
||||
if not self._check_passphrase():
|
||||
if self.regenerate in ('full_idempotence', 'always'):
|
||||
return False
|
||||
module.fail_json(msg='Unable to read the key. The key is protected with a another passphrase / no passphrase or broken.'
|
||||
' Will not proceed. To force regeneration, call the module with `generate`'
|
||||
' set to `full_idempotence` or `always`, or with `force=yes`.')
|
||||
|
||||
if self.regenerate != 'never':
|
||||
if not self._check_size_and_type():
|
||||
if self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
|
||||
return False
|
||||
module.fail_json(msg='Key has wrong type and/or size.'
|
||||
' Will not proceed. To force regeneration, call the module with `generate`'
|
||||
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.')
|
||||
|
||||
if not self._check_format():
|
||||
# During conversion step, convert if format does not match and format_mismatch == 'convert'
|
||||
if not ignore_conversion and self.format_mismatch == 'convert':
|
||||
return False
|
||||
# During generation step, regenerate if format does not match and format_mismatch == 'regenerate'
|
||||
if ignore_conversion and self.format_mismatch == 'regenerate' and self.regenerate != 'never':
|
||||
if not ignore_conversion or self.regenerate in ('partial_idempotence', 'full_idempotence', 'always'):
|
||||
return False
|
||||
module.fail_json(msg='Key has wrong format.'
|
||||
' Will not proceed. To force regeneration, call the module with `generate`'
|
||||
' set to `partial_idempotence`, `full_idempotence` or `always`, or with `force=yes`.'
|
||||
' To convert the key, set `format_mismatch` to `convert`.')
|
||||
|
||||
# check whether permissions are correct (in case that needs to be checked)
|
||||
return not perms_required or super(PrivateKeyBase, self).check(module, perms_required=perms_required)
|
||||
|
||||
def dump(self):
|
||||
"""Serialize the object into a dictionary."""
|
||||
|
||||
result = {
|
||||
'size': self.size,
|
||||
'filename': self.path,
|
||||
'changed': self.changed,
|
||||
'fingerprint': self.fingerprint,
|
||||
}
|
||||
if self.backup_file:
|
||||
result['backup_file'] = self.backup_file
|
||||
if self.return_content:
|
||||
if self.privatekey_bytes is None:
|
||||
self.privatekey_bytes = crypto_utils.load_file_if_exists(self.path, ignore_errors=True)
|
||||
if self.privatekey_bytes:
|
||||
if crypto_utils.identify_private_key_format(self.privatekey_bytes) == 'raw':
|
||||
result['privatekey'] = base64.b64encode(self.privatekey_bytes)
|
||||
else:
|
||||
result['privatekey'] = self.privatekey_bytes.decode('utf-8')
|
||||
else:
|
||||
result['privatekey'] = None
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# Implementation with using pyOpenSSL
|
||||
class PrivateKeyPyOpenSSL(PrivateKeyBase):
|
||||
|
||||
def __init__(self, module):
|
||||
super(PrivateKeyPyOpenSSL, self).__init__(module)
|
||||
|
||||
if module.params['type'] == 'RSA':
|
||||
self.type = crypto.TYPE_RSA
|
||||
elif module.params['type'] == 'DSA':
|
||||
self.type = crypto.TYPE_DSA
|
||||
else:
|
||||
module.fail_json(msg="PyOpenSSL backend only supports RSA and DSA keys.")
|
||||
|
||||
if self.format != 'auto_ignore':
|
||||
module.fail_json(msg="PyOpenSSL backend only supports auto_ignore format.")
|
||||
|
||||
def _generate_private_key(self):
|
||||
"""(Re-)Generate private key."""
|
||||
self.privatekey = crypto.PKey()
|
||||
try:
|
||||
self.privatekey.generate_key(self.type, self.size)
|
||||
except (TypeError, ValueError) as exc:
|
||||
raise PrivateKeyError(exc)
|
||||
|
||||
def _ensure_private_key_loaded(self):
|
||||
"""Make sure that the private key has been loaded."""
|
||||
if self.privatekey is None:
|
||||
try:
|
||||
self.privatekey = privatekey = crypto_utils.load_privatekey(self.path, self.passphrase)
|
||||
except crypto_utils.OpenSSLBadPassphraseError as exc:
|
||||
raise PrivateKeyError(exc)
|
||||
|
||||
def _get_private_key_data(self):
|
||||
"""Return bytes for self.privatekey"""
|
||||
if self.cipher and self.passphrase:
|
||||
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey,
|
||||
self.cipher, to_bytes(self.passphrase))
|
||||
else:
|
||||
return crypto.dump_privatekey(crypto.FILETYPE_PEM, self.privatekey)
|
||||
|
||||
def _get_fingerprint(self):
|
||||
return crypto_utils.get_fingerprint(self.path, self.passphrase)
|
||||
|
||||
def _check_passphrase(self):
|
||||
try:
|
||||
crypto_utils.load_privatekey(self.path, self.passphrase)
|
||||
return True
|
||||
except Exception as dummy:
|
||||
return False
|
||||
|
||||
def _check_size_and_type(self):
|
||||
def _check_size(privatekey):
|
||||
return self.size == privatekey.bits()
|
||||
|
||||
def _check_type(privatekey):
|
||||
return self.type == privatekey.type()
|
||||
|
||||
self._ensure_private_key_loaded()
|
||||
return _check_size(self.privatekey) and _check_type(self.privatekey)
|
||||
|
||||
def _check_format(self):
|
||||
# Not supported by this backend
|
||||
return True
|
||||
|
||||
def dump(self):
|
||||
"""Serialize the object into a dictionary."""
|
||||
|
||||
result = super(PrivateKeyPyOpenSSL, self).dump()
|
||||
|
||||
if self.type == crypto.TYPE_RSA:
|
||||
result['type'] = 'RSA'
|
||||
else:
|
||||
result['type'] = 'DSA'
|
||||
|
||||
return result
|
||||
|
||||
|
||||
# Implementation with using cryptography
|
||||
class PrivateKeyCryptography(PrivateKeyBase):
|
||||
|
||||
def _get_ec_class(self, ectype):
|
||||
ecclass = cryptography.hazmat.primitives.asymmetric.ec.__dict__.get(ectype)
|
||||
if ecclass is None:
|
||||
self.module.fail_json(msg='Your cryptography version does not support {0}'.format(ectype))
|
||||
return ecclass
|
||||
|
||||
def _add_curve(self, name, ectype, deprecated=False):
|
||||
def create(size):
|
||||
ecclass = self._get_ec_class(ectype)
|
||||
return ecclass()
|
||||
|
||||
def verify(privatekey):
|
||||
ecclass = self._get_ec_class(ectype)
|
||||
return isinstance(privatekey.private_numbers().public_numbers.curve, ecclass)
|
||||
|
||||
self.curves[name] = {
|
||||
'create': create,
|
||||
'verify': verify,
|
||||
'deprecated': deprecated,
|
||||
}
|
||||
|
||||
def __init__(self, module):
|
||||
super(PrivateKeyCryptography, self).__init__(module)
|
||||
|
||||
self.curves = dict()
|
||||
self._add_curve('secp384r1', 'SECP384R1')
|
||||
self._add_curve('secp521r1', 'SECP521R1')
|
||||
self._add_curve('secp224r1', 'SECP224R1')
|
||||
self._add_curve('secp192r1', 'SECP192R1')
|
||||
self._add_curve('secp256r1', 'SECP256R1')
|
||||
self._add_curve('secp256k1', 'SECP256K1')
|
||||
self._add_curve('brainpoolP256r1', 'BrainpoolP256R1', deprecated=True)
|
||||
self._add_curve('brainpoolP384r1', 'BrainpoolP384R1', deprecated=True)
|
||||
self._add_curve('brainpoolP512r1', 'BrainpoolP512R1', deprecated=True)
|
||||
self._add_curve('sect571k1', 'SECT571K1', deprecated=True)
|
||||
self._add_curve('sect409k1', 'SECT409K1', deprecated=True)
|
||||
self._add_curve('sect283k1', 'SECT283K1', deprecated=True)
|
||||
self._add_curve('sect233k1', 'SECT233K1', deprecated=True)
|
||||
self._add_curve('sect163k1', 'SECT163K1', deprecated=True)
|
||||
self._add_curve('sect571r1', 'SECT571R1', deprecated=True)
|
||||
self._add_curve('sect409r1', 'SECT409R1', deprecated=True)
|
||||
self._add_curve('sect283r1', 'SECT283R1', deprecated=True)
|
||||
self._add_curve('sect233r1', 'SECT233R1', deprecated=True)
|
||||
self._add_curve('sect163r2', 'SECT163R2', deprecated=True)
|
||||
|
||||
self.module = module
|
||||
self.cryptography_backend = cryptography.hazmat.backends.default_backend()
|
||||
|
||||
self.type = module.params['type']
|
||||
self.curve = module.params['curve']
|
||||
if not CRYPTOGRAPHY_HAS_X25519 and self.type == 'X25519':
|
||||
self.module.fail_json(msg='Your cryptography version does not support X25519')
|
||||
if not CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
|
||||
self.module.fail_json(msg='Your cryptography version does not support X25519 serialization')
|
||||
if not CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
|
||||
self.module.fail_json(msg='Your cryptography version does not support X448')
|
||||
if not CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
|
||||
self.module.fail_json(msg='Your cryptography version does not support Ed25519')
|
||||
if not CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
|
||||
self.module.fail_json(msg='Your cryptography version does not support Ed448')
|
||||
|
||||
def _get_wanted_format(self):
|
||||
if self.format not in ('auto', 'auto_ignore'):
|
||||
return self.format
|
||||
if self.type in ('X25519', 'X448', 'Ed25519', 'Ed448'):
|
||||
return 'pkcs8'
|
||||
else:
|
||||
return 'pkcs1'
|
||||
|
||||
def _generate_private_key(self):
|
||||
"""(Re-)Generate private key."""
|
||||
try:
|
||||
if self.type == 'RSA':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.rsa.generate_private_key(
|
||||
public_exponent=65537, # OpenSSL always uses this
|
||||
key_size=self.size,
|
||||
backend=self.cryptography_backend
|
||||
)
|
||||
if self.type == 'DSA':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.dsa.generate_private_key(
|
||||
key_size=self.size,
|
||||
backend=self.cryptography_backend
|
||||
)
|
||||
if CRYPTOGRAPHY_HAS_X25519_FULL and self.type == 'X25519':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.generate()
|
||||
if CRYPTOGRAPHY_HAS_X448 and self.type == 'X448':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.generate()
|
||||
if CRYPTOGRAPHY_HAS_ED25519 and self.type == 'Ed25519':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.generate()
|
||||
if CRYPTOGRAPHY_HAS_ED448 and self.type == 'Ed448':
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.generate()
|
||||
if self.type == 'ECC' and self.curve in self.curves:
|
||||
if self.curves[self.curve]['deprecated']:
|
||||
self.module.warn('Elliptic curves of type {0} should not be used for new keys!'.format(self.curve))
|
||||
self.privatekey = cryptography.hazmat.primitives.asymmetric.ec.generate_private_key(
|
||||
curve=self.curves[self.curve]['create'](self.size),
|
||||
backend=self.cryptography_backend
|
||||
)
|
||||
except cryptography.exceptions.UnsupportedAlgorithm as dummy:
|
||||
self.module.fail_json(msg='Cryptography backend does not support the algorithm required for {0}'.format(self.type))
|
||||
|
||||
def _ensure_private_key_loaded(self):
|
||||
"""Make sure that the private key has been loaded."""
|
||||
if self.privatekey is None:
|
||||
self.privatekey = self._load_privatekey()
|
||||
|
||||
def _get_private_key_data(self):
|
||||
"""Return bytes for self.privatekey"""
|
||||
# Select export format and encoding
|
||||
try:
|
||||
export_format = self._get_wanted_format()
|
||||
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.PEM
|
||||
if export_format == 'pkcs1':
|
||||
# "TraditionalOpenSSL" format is PKCS1
|
||||
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.TraditionalOpenSSL
|
||||
elif export_format == 'pkcs8':
|
||||
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.PKCS8
|
||||
elif export_format == 'raw':
|
||||
export_format = cryptography.hazmat.primitives.serialization.PrivateFormat.Raw
|
||||
export_encoding = cryptography.hazmat.primitives.serialization.Encoding.Raw
|
||||
except AttributeError:
|
||||
self.module.fail_json(msg='Cryptography backend does not support the selected output format "{0}"'.format(self.format))
|
||||
|
||||
# Select key encryption
|
||||
encryption_algorithm = cryptography.hazmat.primitives.serialization.NoEncryption()
|
||||
if self.cipher and self.passphrase:
|
||||
if self.cipher == 'auto':
|
||||
encryption_algorithm = cryptography.hazmat.primitives.serialization.BestAvailableEncryption(to_bytes(self.passphrase))
|
||||
else:
|
||||
self.module.fail_json(msg='Cryptography backend can only use "auto" for cipher option.')
|
||||
|
||||
# Serialize key
|
||||
try:
|
||||
return self.privatekey.private_bytes(
|
||||
encoding=export_encoding,
|
||||
format=export_format,
|
||||
encryption_algorithm=encryption_algorithm
|
||||
)
|
||||
except ValueError as dummy:
|
||||
self.module.fail_json(
|
||||
msg='Cryptography backend cannot serialize the private key in the required format "{0}"'.format(self.format)
|
||||
)
|
||||
except Exception as dummy:
|
||||
self.module.fail_json(
|
||||
msg='Error while serializing the private key in the required format "{0}"'.format(self.format),
|
||||
exception=traceback.format_exc()
|
||||
)
|
||||
|
||||
def _load_privatekey(self):
|
||||
try:
|
||||
# Read bytes
|
||||
with open(self.path, 'rb') as f:
|
||||
data = f.read()
|
||||
# Interpret bytes depending on format.
|
||||
format = crypto_utils.identify_private_key_format(data)
|
||||
if format == 'raw':
|
||||
if len(data) == 56 and CRYPTOGRAPHY_HAS_X448:
|
||||
return cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey.from_private_bytes(data)
|
||||
if len(data) == 57 and CRYPTOGRAPHY_HAS_ED448:
|
||||
return cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey.from_private_bytes(data)
|
||||
if len(data) == 32:
|
||||
if CRYPTOGRAPHY_HAS_X25519 and (self.type == 'X25519' or not CRYPTOGRAPHY_HAS_ED25519):
|
||||
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
|
||||
if CRYPTOGRAPHY_HAS_ED25519 and (self.type == 'Ed25519' or not CRYPTOGRAPHY_HAS_X25519):
|
||||
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
|
||||
if CRYPTOGRAPHY_HAS_X25519 and CRYPTOGRAPHY_HAS_ED25519:
|
||||
try:
|
||||
return cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey.from_private_bytes(data)
|
||||
except Exception:
|
||||
return cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey.from_private_bytes(data)
|
||||
raise PrivateKeyError('Cannot load raw key')
|
||||
else:
|
||||
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
|
||||
data,
|
||||
None if self.passphrase is None else to_bytes(self.passphrase),
|
||||
backend=self.cryptography_backend
|
||||
)
|
||||
except Exception as e:
|
||||
raise PrivateKeyError(e)
|
||||
|
||||
def _get_fingerprint(self):
|
||||
# Get bytes of public key
|
||||
private_key = self._load_privatekey()
|
||||
public_key = private_key.public_key()
|
||||
public_key_bytes = public_key.public_bytes(
|
||||
cryptography.hazmat.primitives.serialization.Encoding.DER,
|
||||
cryptography.hazmat.primitives.serialization.PublicFormat.SubjectPublicKeyInfo
|
||||
)
|
||||
# Get fingerprints of public_key_bytes
|
||||
return crypto_utils.get_fingerprint_of_bytes(public_key_bytes)
|
||||
|
||||
def _check_passphrase(self):
|
||||
try:
|
||||
with open(self.path, 'rb') as f:
|
||||
data = f.read()
|
||||
format = crypto_utils.identify_private_key_format(data)
|
||||
if format == 'raw':
|
||||
# Raw keys cannot be encrypted. To avoid incompatibilities, we try to
|
||||
# actually load the key (and return False when this fails).
|
||||
self._load_privatekey()
|
||||
# Loading the key succeeded. Only return True when no passphrase was
|
||||
# provided.
|
||||
return self.passphrase is None
|
||||
else:
|
||||
return cryptography.hazmat.primitives.serialization.load_pem_private_key(
|
||||
data,
|
||||
None if self.passphrase is None else to_bytes(self.passphrase),
|
||||
backend=self.cryptography_backend
|
||||
)
|
||||
except Exception as dummy:
|
||||
return False
|
||||
|
||||
def _check_size_and_type(self):
|
||||
self._ensure_private_key_loaded()
|
||||
|
||||
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.rsa.RSAPrivateKey):
|
||||
return self.type == 'RSA' and self.size == self.privatekey.key_size
|
||||
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.dsa.DSAPrivateKey):
|
||||
return self.type == 'DSA' and self.size == self.privatekey.key_size
|
||||
if CRYPTOGRAPHY_HAS_X25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x25519.X25519PrivateKey):
|
||||
return self.type == 'X25519'
|
||||
if CRYPTOGRAPHY_HAS_X448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.x448.X448PrivateKey):
|
||||
return self.type == 'X448'
|
||||
if CRYPTOGRAPHY_HAS_ED25519 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed25519.Ed25519PrivateKey):
|
||||
return self.type == 'Ed25519'
|
||||
if CRYPTOGRAPHY_HAS_ED448 and isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ed448.Ed448PrivateKey):
|
||||
return self.type == 'Ed448'
|
||||
if isinstance(self.privatekey, cryptography.hazmat.primitives.asymmetric.ec.EllipticCurvePrivateKey):
|
||||
if self.type != 'ECC':
|
||||
return False
|
||||
if self.curve not in self.curves:
|
||||
return False
|
||||
return self.curves[self.curve]['verify'](self.privatekey)
|
||||
|
||||
return False
|
||||
|
||||
def _check_format(self):
|
||||
if self.format == 'auto_ignore':
|
||||
return True
|
||||
try:
|
||||
with open(self.path, 'rb') as f:
|
||||
content = f.read()
|
||||
format = crypto_utils.identify_private_key_format(content)
|
||||
return format == self._get_wanted_format()
|
||||
except Exception as dummy:
|
||||
return False
|
||||
|
||||
def dump(self):
|
||||
"""Serialize the object into a dictionary."""
|
||||
result = super(PrivateKeyCryptography, self).dump()
|
||||
result['type'] = self.type
|
||||
if self.type == 'ECC':
|
||||
result['curve'] = self.curve
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', default='present', choices=['present', 'absent']),
|
||||
size=dict(type='int', default=4096),
|
||||
type=dict(type='str', default='RSA', choices=[
|
||||
'DSA', 'ECC', 'Ed25519', 'Ed448', 'RSA', 'X25519', 'X448'
|
||||
]),
|
||||
curve=dict(type='str', choices=[
|
||||
'secp384r1', 'secp521r1', 'secp224r1', 'secp192r1', 'secp256r1',
|
||||
'secp256k1', 'brainpoolP256r1', 'brainpoolP384r1', 'brainpoolP512r1',
|
||||
'sect571k1', 'sect409k1', 'sect283k1', 'sect233k1', 'sect163k1',
|
||||
'sect571r1', 'sect409r1', 'sect283r1', 'sect233r1', 'sect163r2',
|
||||
]),
|
||||
force=dict(type='bool', default=False),
|
||||
path=dict(type='path', required=True),
|
||||
passphrase=dict(type='str', no_log=True),
|
||||
cipher=dict(type='str'),
|
||||
backup=dict(type='bool', default=False),
|
||||
format=dict(type='str', default='auto_ignore', choices=['pkcs1', 'pkcs8', 'raw', 'auto', 'auto_ignore']),
|
||||
format_mismatch=dict(type='str', default='regenerate', choices=['regenerate', 'convert']),
|
||||
select_crypto_backend=dict(type='str', choices=['auto', 'pyopenssl', 'cryptography'], default='auto'),
|
||||
return_content=dict(type='bool', default=False),
|
||||
regenerate=dict(
|
||||
type='str',
|
||||
default='full_idempotence',
|
||||
choices=['never', 'fail', 'partial_idempotence', 'full_idempotence', 'always']
|
||||
),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
add_file_common_args=True,
|
||||
required_together=[
|
||||
['cipher', 'passphrase']
|
||||
],
|
||||
required_if=[
|
||||
['type', 'ECC', ['curve']],
|
||||
],
|
||||
)
|
||||
|
||||
base_dir = os.path.dirname(module.params['path']) or '.'
|
||||
if not os.path.isdir(base_dir):
|
||||
module.fail_json(
|
||||
name=base_dir,
|
||||
msg='The directory %s does not exist or the file is not a directory' % base_dir
|
||||
)
|
||||
|
||||
backend = module.params['select_crypto_backend']
|
||||
if backend == 'auto':
|
||||
# Detection what is possible
|
||||
can_use_cryptography = CRYPTOGRAPHY_FOUND and CRYPTOGRAPHY_VERSION >= LooseVersion(MINIMAL_CRYPTOGRAPHY_VERSION)
|
||||
can_use_pyopenssl = PYOPENSSL_FOUND and PYOPENSSL_VERSION >= LooseVersion(MINIMAL_PYOPENSSL_VERSION)
|
||||
|
||||
# Decision
|
||||
if module.params['cipher'] and module.params['passphrase'] and module.params['cipher'] != 'auto':
|
||||
# First try pyOpenSSL, then cryptography
|
||||
if can_use_pyopenssl:
|
||||
backend = 'pyopenssl'
|
||||
elif can_use_cryptography:
|
||||
backend = 'cryptography'
|
||||
else:
|
||||
# First try cryptography, then pyOpenSSL
|
||||
if can_use_cryptography:
|
||||
backend = 'cryptography'
|
||||
elif can_use_pyopenssl:
|
||||
backend = 'pyopenssl'
|
||||
|
||||
# Success?
|
||||
if backend == 'auto':
|
||||
module.fail_json(msg=("Can't detect any of the required Python libraries "
|
||||
"cryptography (>= {0}) or PyOpenSSL (>= {1})").format(
|
||||
MINIMAL_CRYPTOGRAPHY_VERSION,
|
||||
MINIMAL_PYOPENSSL_VERSION))
|
||||
try:
|
||||
if backend == 'pyopenssl':
|
||||
if not PYOPENSSL_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('pyOpenSSL >= {0}'.format(MINIMAL_PYOPENSSL_VERSION)),
|
||||
exception=PYOPENSSL_IMP_ERR)
|
||||
module.deprecate('The module is using the PyOpenSSL backend. This backend has been deprecated',
|
||||
version='2.13', collection_name='ansible.builtin')
|
||||
private_key = PrivateKeyPyOpenSSL(module)
|
||||
elif backend == 'cryptography':
|
||||
if not CRYPTOGRAPHY_FOUND:
|
||||
module.fail_json(msg=missing_required_lib('cryptography >= {0}'.format(MINIMAL_CRYPTOGRAPHY_VERSION)),
|
||||
exception=CRYPTOGRAPHY_IMP_ERR)
|
||||
private_key = PrivateKeyCryptography(module)
|
||||
|
||||
if private_key.state == 'present':
|
||||
if module.check_mode:
|
||||
result = private_key.dump()
|
||||
result['changed'] = private_key.force \
|
||||
or not private_key.check(module, ignore_conversion=True) \
|
||||
or not private_key.check(module, ignore_conversion=False)
|
||||
module.exit_json(**result)
|
||||
|
||||
private_key.generate(module)
|
||||
else:
|
||||
if module.check_mode:
|
||||
result = private_key.dump()
|
||||
result['changed'] = os.path.exists(module.params['path'])
|
||||
module.exit_json(**result)
|
||||
|
||||
private_key.remove(module)
|
||||
|
||||
result = private_key.dump()
|
||||
module.exit_json(**result)
|
||||
except crypto_utils.OpenSSLObjectError as exc:
|
||||
module.fail_json(msg=to_native(exc))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,266 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2012, Derek Carter<goozbach@friocorte.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {
|
||||
'metadata_version': '1.1',
|
||||
'status': ['stableinterface'],
|
||||
'supported_by': 'core'
|
||||
}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: selinux
|
||||
short_description: Change policy and state of SELinux
|
||||
description:
|
||||
- Configures the SELinux mode and policy.
|
||||
- A reboot may be required after usage.
|
||||
- Ansible will not issue this reboot but will let you know when it is required.
|
||||
version_added: "0.7"
|
||||
options:
|
||||
policy:
|
||||
description:
|
||||
- The name of the SELinux policy to use (e.g. C(targeted)) will be required if state is not C(disabled).
|
||||
state:
|
||||
description:
|
||||
- The SELinux mode.
|
||||
required: true
|
||||
choices: [ disabled, enforcing, permissive ]
|
||||
configfile:
|
||||
description:
|
||||
- The path to the SELinux configuration file, if non-standard.
|
||||
default: /etc/selinux/config
|
||||
aliases: [ conf, file ]
|
||||
requirements: [ libselinux-python ]
|
||||
author:
|
||||
- Derek Carter (@goozbach) <goozbach@friocorte.com>
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Enable SELinux
|
||||
selinux:
|
||||
policy: targeted
|
||||
state: enforcing
|
||||
|
||||
- name: Put SELinux in permissive mode, logging actions that would be blocked.
|
||||
selinux:
|
||||
policy: targeted
|
||||
state: permissive
|
||||
|
||||
- name: Disable SELinux
|
||||
selinux:
|
||||
state: disabled
|
||||
'''
|
||||
|
||||
RETURN = r'''
|
||||
msg:
|
||||
description: Messages that describe changes that were made.
|
||||
returned: always
|
||||
type: str
|
||||
sample: Config SELinux state changed from 'disabled' to 'permissive'
|
||||
configfile:
|
||||
description: Path to SELinux configuration file.
|
||||
returned: always
|
||||
type: str
|
||||
sample: /etc/selinux/config
|
||||
policy:
|
||||
description: Name of the SELinux policy.
|
||||
returned: always
|
||||
type: str
|
||||
sample: targeted
|
||||
state:
|
||||
description: SELinux mode.
|
||||
returned: always
|
||||
type: str
|
||||
sample: enforcing
|
||||
reboot_required:
|
||||
description: Whether or not an reboot is required for the changes to take effect.
|
||||
returned: always
|
||||
type: bool
|
||||
sample: true
|
||||
'''
|
||||
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import traceback
|
||||
|
||||
SELINUX_IMP_ERR = None
|
||||
try:
|
||||
import selinux
|
||||
HAS_SELINUX = True
|
||||
except ImportError:
|
||||
SELINUX_IMP_ERR = traceback.format_exc()
|
||||
HAS_SELINUX = False
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
|
||||
from ansible.module_utils.facts.utils import get_file_lines
|
||||
|
||||
|
||||
# getter subroutines
|
||||
def get_config_state(configfile):
|
||||
lines = get_file_lines(configfile, strip=False)
|
||||
|
||||
for line in lines:
|
||||
stateline = re.match(r'^SELINUX=.*$', line)
|
||||
if stateline:
|
||||
return line.split('=')[1].strip()
|
||||
|
||||
|
||||
def get_config_policy(configfile):
|
||||
lines = get_file_lines(configfile, strip=False)
|
||||
|
||||
for line in lines:
|
||||
stateline = re.match(r'^SELINUXTYPE=.*$', line)
|
||||
if stateline:
|
||||
return line.split('=')[1].strip()
|
||||
|
||||
|
||||
# setter subroutines
|
||||
def set_config_state(module, state, configfile):
|
||||
# SELINUX=permissive
|
||||
# edit config file with state value
|
||||
stateline = 'SELINUX=%s' % state
|
||||
lines = get_file_lines(configfile, strip=False)
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
|
||||
with open(tmpfile, "w") as write_file:
|
||||
for line in lines:
|
||||
write_file.write(re.sub(r'^SELINUX=.*', stateline, line) + '\n')
|
||||
|
||||
module.atomic_move(tmpfile, configfile)
|
||||
|
||||
|
||||
def set_state(module, state):
|
||||
if state == 'enforcing':
|
||||
selinux.security_setenforce(1)
|
||||
elif state == 'permissive':
|
||||
selinux.security_setenforce(0)
|
||||
elif state == 'disabled':
|
||||
pass
|
||||
else:
|
||||
msg = 'trying to set invalid runtime state %s' % state
|
||||
module.fail_json(msg=msg)
|
||||
|
||||
|
||||
def set_config_policy(module, policy, configfile):
|
||||
if not os.path.exists('/etc/selinux/%s/policy' % policy):
|
||||
module.fail_json(msg='Policy %s does not exist in /etc/selinux/' % policy)
|
||||
|
||||
# edit config file with state value
|
||||
# SELINUXTYPE=targeted
|
||||
policyline = 'SELINUXTYPE=%s' % policy
|
||||
lines = get_file_lines(configfile, strip=False)
|
||||
|
||||
tmpfd, tmpfile = tempfile.mkstemp()
|
||||
|
||||
with open(tmpfile, "w") as write_file:
|
||||
for line in lines:
|
||||
write_file.write(re.sub(r'^SELINUXTYPE=.*', policyline, line) + '\n')
|
||||
|
||||
module.atomic_move(tmpfile, configfile)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
policy=dict(type='str'),
|
||||
state=dict(type='str', required='True', choices=['enforcing', 'permissive', 'disabled']),
|
||||
configfile=dict(type='str', default='/etc/selinux/config', aliases=['conf', 'file']),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
)
|
||||
|
||||
if not HAS_SELINUX:
|
||||
module.fail_json(msg=missing_required_lib('libselinux-python'), exception=SELINUX_IMP_ERR)
|
||||
|
||||
# global vars
|
||||
changed = False
|
||||
msgs = []
|
||||
configfile = module.params['configfile']
|
||||
policy = module.params['policy']
|
||||
state = module.params['state']
|
||||
runtime_enabled = selinux.is_selinux_enabled()
|
||||
runtime_policy = selinux.selinux_getpolicytype()[1]
|
||||
runtime_state = 'disabled'
|
||||
reboot_required = False
|
||||
|
||||
if runtime_enabled:
|
||||
# enabled means 'enforcing' or 'permissive'
|
||||
if selinux.security_getenforce():
|
||||
runtime_state = 'enforcing'
|
||||
else:
|
||||
runtime_state = 'permissive'
|
||||
|
||||
if not os.path.isfile(configfile):
|
||||
module.fail_json(msg="Unable to find file {0}".format(configfile),
|
||||
details="Please install SELinux-policy package, "
|
||||
"if this package is not installed previously.")
|
||||
|
||||
config_policy = get_config_policy(configfile)
|
||||
config_state = get_config_state(configfile)
|
||||
|
||||
# check to see if policy is set if state is not 'disabled'
|
||||
if state != 'disabled':
|
||||
if not policy:
|
||||
module.fail_json(msg="Policy is required if state is not 'disabled'")
|
||||
else:
|
||||
if not policy:
|
||||
policy = config_policy
|
||||
|
||||
# check changed values and run changes
|
||||
if policy != runtime_policy:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
# cannot change runtime policy
|
||||
msgs.append("Running SELinux policy changed from '%s' to '%s'" % (runtime_policy, policy))
|
||||
changed = True
|
||||
|
||||
if policy != config_policy:
|
||||
if module.check_mode:
|
||||
module.exit_json(changed=True)
|
||||
set_config_policy(module, policy, configfile)
|
||||
msgs.append("SELinux policy configuration in '%s' changed from '%s' to '%s'" % (configfile, config_policy, policy))
|
||||
changed = True
|
||||
|
||||
if state != runtime_state:
|
||||
if runtime_enabled:
|
||||
if state == 'disabled':
|
||||
if runtime_state != 'permissive':
|
||||
# Temporarily set state to permissive
|
||||
if not module.check_mode:
|
||||
set_state(module, 'permissive')
|
||||
module.warn("SELinux state temporarily changed from '%s' to 'permissive'. State change will take effect next reboot." % (runtime_state))
|
||||
changed = True
|
||||
else:
|
||||
module.warn('SELinux state change will take effect next reboot')
|
||||
reboot_required = True
|
||||
else:
|
||||
if not module.check_mode:
|
||||
set_state(module, state)
|
||||
msgs.append("SELinux state changed from '%s' to '%s'" % (runtime_state, state))
|
||||
|
||||
# Only report changes if the file is changed.
|
||||
# This prevents the task from reporting changes every time the task is run.
|
||||
changed = True
|
||||
else:
|
||||
module.warn("Reboot is required to set SELinux state to '%s'" % state)
|
||||
reboot_required = True
|
||||
|
||||
if state != config_state:
|
||||
if not module.check_mode:
|
||||
set_config_state(module, state, configfile)
|
||||
msgs.append("Config SELinux state changed from '%s' to '%s'" % (config_state, state))
|
||||
changed = True
|
||||
|
||||
module.exit_json(changed=changed, msg=', '.join(msgs), configfile=configfile, policy=policy, state=state, reboot_required=reboot_required)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,598 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# Copyright: (c) 2014, Ahti Kitsik <ak@ahtik.com>
|
||||
# Copyright: (c) 2014, Jarno Keskikangas <jarno.keskikangas@gmail.com>
|
||||
# Copyright: (c) 2013, Aleksey Ovcharenko <aleksey.ovcharenko@gmail.com>
|
||||
# Copyright: (c) 2013, James Martin <jmartin@basho.com>
|
||||
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
__metaclass__ = type
|
||||
|
||||
ANSIBLE_METADATA = {'metadata_version': '1.1',
|
||||
'status': ['preview'],
|
||||
'supported_by': 'community'}
|
||||
|
||||
DOCUMENTATION = r'''
|
||||
---
|
||||
module: ufw
|
||||
short_description: Manage firewall with UFW
|
||||
description:
|
||||
- Manage firewall with UFW.
|
||||
version_added: 1.6
|
||||
author:
|
||||
- Aleksey Ovcharenko (@ovcharenko)
|
||||
- Jarno Keskikangas (@pyykkis)
|
||||
- Ahti Kitsik (@ahtik)
|
||||
notes:
|
||||
- See C(man ufw) for more examples.
|
||||
requirements:
|
||||
- C(ufw) package
|
||||
options:
|
||||
state:
|
||||
description:
|
||||
- C(enabled) reloads firewall and enables firewall on boot.
|
||||
- C(disabled) unloads firewall and disables firewall on boot.
|
||||
- C(reloaded) reloads firewall.
|
||||
- C(reset) disables and resets firewall to installation defaults.
|
||||
type: str
|
||||
choices: [ disabled, enabled, reloaded, reset ]
|
||||
default:
|
||||
description:
|
||||
- Change the default policy for incoming or outgoing traffic.
|
||||
type: str
|
||||
choices: [ allow, deny, reject ]
|
||||
aliases: [ policy ]
|
||||
direction:
|
||||
description:
|
||||
- Select direction for a rule or default policy command. Mutually
|
||||
exclusive with I(interface_in) and I(interface_out).
|
||||
type: str
|
||||
choices: [ in, incoming, out, outgoing, routed ]
|
||||
logging:
|
||||
description:
|
||||
- Toggles logging. Logged packets use the LOG_KERN syslog facility.
|
||||
type: str
|
||||
choices: [ 'on', 'off', low, medium, high, full ]
|
||||
insert:
|
||||
description:
|
||||
- Insert the corresponding rule as rule number NUM.
|
||||
- Note that ufw numbers rules starting with 1.
|
||||
type: int
|
||||
insert_relative_to:
|
||||
description:
|
||||
- Allows to interpret the index in I(insert) relative to a position.
|
||||
- C(zero) interprets the rule number as an absolute index (i.e. 1 is
|
||||
the first rule).
|
||||
- C(first-ipv4) interprets the rule number relative to the index of the
|
||||
first IPv4 rule, or relative to the position where the first IPv4 rule
|
||||
would be if there is currently none.
|
||||
- C(last-ipv4) interprets the rule number relative to the index of the
|
||||
last IPv4 rule, or relative to the position where the last IPv4 rule
|
||||
would be if there is currently none.
|
||||
- C(first-ipv6) interprets the rule number relative to the index of the
|
||||
first IPv6 rule, or relative to the position where the first IPv6 rule
|
||||
would be if there is currently none.
|
||||
- C(last-ipv6) interprets the rule number relative to the index of the
|
||||
last IPv6 rule, or relative to the position where the last IPv6 rule
|
||||
would be if there is currently none.
|
||||
type: str
|
||||
choices: [ first-ipv4, first-ipv6, last-ipv4, last-ipv6, zero ]
|
||||
default: zero
|
||||
version_added: "2.8"
|
||||
rule:
|
||||
description:
|
||||
- Add firewall rule
|
||||
type: str
|
||||
choices: [ allow, deny, limit, reject ]
|
||||
log:
|
||||
description:
|
||||
- Log new connections matched to this rule
|
||||
type: bool
|
||||
from_ip:
|
||||
description:
|
||||
- Source IP address.
|
||||
type: str
|
||||
default: any
|
||||
aliases: [ from, src ]
|
||||
from_port:
|
||||
description:
|
||||
- Source port.
|
||||
type: str
|
||||
to_ip:
|
||||
description:
|
||||
- Destination IP address.
|
||||
type: str
|
||||
default: any
|
||||
aliases: [ dest, to]
|
||||
to_port:
|
||||
description:
|
||||
- Destination port.
|
||||
type: str
|
||||
aliases: [ port ]
|
||||
proto:
|
||||
description:
|
||||
- TCP/IP protocol.
|
||||
type: str
|
||||
choices: [ any, tcp, udp, ipv6, esp, ah, gre, igmp ]
|
||||
aliases: [ protocol ]
|
||||
name:
|
||||
description:
|
||||
- Use profile located in C(/etc/ufw/applications.d).
|
||||
type: str
|
||||
aliases: [ app ]
|
||||
delete:
|
||||
description:
|
||||
- Delete rule.
|
||||
type: bool
|
||||
interface:
|
||||
description:
|
||||
- Specify interface for the rule. The direction (in or out) used
|
||||
for the interface depends on the value of I(direction). See
|
||||
I(interface_in) and I(interface_out) for routed rules that needs
|
||||
to supply both an input and output interface. Mutually
|
||||
exclusive with I(interface_in) and I(interface_out).
|
||||
type: str
|
||||
aliases: [ if ]
|
||||
interface_in:
|
||||
description:
|
||||
- Specify input interface for the rule. This is mutually
|
||||
exclusive with I(direction) and I(interface). However, it is
|
||||
compatible with I(interface_out) for routed rules.
|
||||
type: str
|
||||
aliases: [ if_in ]
|
||||
version_added: "2.10"
|
||||
interface_out:
|
||||
description:
|
||||
- Specify output interface for the rule. This is mutually
|
||||
exclusive with I(direction) and I(interface). However, it is
|
||||
compatible with I(interface_in) for routed rules.
|
||||
type: str
|
||||
aliases: [ if_out ]
|
||||
version_added: "2.10"
|
||||
route:
|
||||
description:
|
||||
- Apply the rule to routed/forwarded packets.
|
||||
type: bool
|
||||
comment:
|
||||
description:
|
||||
- Add a comment to the rule. Requires UFW version >=0.35.
|
||||
type: str
|
||||
version_added: "2.4"
|
||||
'''
|
||||
|
||||
EXAMPLES = r'''
|
||||
- name: Allow everything and enable UFW
|
||||
ufw:
|
||||
state: enabled
|
||||
policy: allow
|
||||
|
||||
- name: Set logging
|
||||
ufw:
|
||||
logging: 'on'
|
||||
|
||||
# Sometimes it is desirable to let the sender know when traffic is
|
||||
# being denied, rather than simply ignoring it. In these cases, use
|
||||
# reject instead of deny. In addition, log rejected connections:
|
||||
- ufw:
|
||||
rule: reject
|
||||
port: auth
|
||||
log: yes
|
||||
|
||||
# ufw supports connection rate limiting, which is useful for protecting
|
||||
# against brute-force login attacks. ufw will deny connections if an IP
|
||||
# address has attempted to initiate 6 or more connections in the last
|
||||
# 30 seconds. See http://www.debian-administration.org/articles/187
|
||||
# for details. Typical usage is:
|
||||
- ufw:
|
||||
rule: limit
|
||||
port: ssh
|
||||
proto: tcp
|
||||
|
||||
# Allow OpenSSH. (Note that as ufw manages its own state, simply removing
|
||||
# a rule=allow task can leave those ports exposed. Either use delete=yes
|
||||
# or a separate state=reset task)
|
||||
- ufw:
|
||||
rule: allow
|
||||
name: OpenSSH
|
||||
|
||||
- name: Delete OpenSSH rule
|
||||
ufw:
|
||||
rule: allow
|
||||
name: OpenSSH
|
||||
delete: yes
|
||||
|
||||
- name: Deny all access to port 53
|
||||
ufw:
|
||||
rule: deny
|
||||
port: '53'
|
||||
|
||||
- name: Allow port range 60000-61000
|
||||
ufw:
|
||||
rule: allow
|
||||
port: 60000:61000
|
||||
proto: tcp
|
||||
|
||||
- name: Allow all access to tcp port 80
|
||||
ufw:
|
||||
rule: allow
|
||||
port: '80'
|
||||
proto: tcp
|
||||
|
||||
- name: Allow all access from RFC1918 networks to this host
|
||||
ufw:
|
||||
rule: allow
|
||||
src: '{{ item }}'
|
||||
loop:
|
||||
- 10.0.0.0/8
|
||||
- 172.16.0.0/12
|
||||
- 192.168.0.0/16
|
||||
|
||||
- name: Deny access to udp port 514 from host 1.2.3.4 and include a comment
|
||||
ufw:
|
||||
rule: deny
|
||||
proto: udp
|
||||
src: 1.2.3.4
|
||||
port: '514'
|
||||
comment: Block syslog
|
||||
|
||||
- name: Allow incoming access to eth0 from 1.2.3.5 port 5469 to 1.2.3.4 port 5469
|
||||
ufw:
|
||||
rule: allow
|
||||
interface: eth0
|
||||
direction: in
|
||||
proto: udp
|
||||
src: 1.2.3.5
|
||||
from_port: '5469'
|
||||
dest: 1.2.3.4
|
||||
to_port: '5469'
|
||||
|
||||
# Note that IPv6 must be enabled in /etc/default/ufw for IPv6 firewalling to work.
|
||||
- name: Deny all traffic from the IPv6 2001:db8::/32 to tcp port 25 on this host
|
||||
ufw:
|
||||
rule: deny
|
||||
proto: tcp
|
||||
src: 2001:db8::/32
|
||||
port: '25'
|
||||
|
||||
- name: Deny all IPv6 traffic to tcp port 20 on this host
|
||||
# this should be the first IPv6 rule
|
||||
ufw:
|
||||
rule: deny
|
||||
proto: tcp
|
||||
port: '20'
|
||||
to_ip: "::"
|
||||
insert: 0
|
||||
insert_relative_to: first-ipv6
|
||||
|
||||
- name: Deny all IPv4 traffic to tcp port 20 on this host
|
||||
# This should be the third to last IPv4 rule
|
||||
# (insert: -1 addresses the second to last IPv4 rule;
|
||||
# so the new rule will be inserted before the second
|
||||
# to last IPv4 rule, and will be come the third to last
|
||||
# IPv4 rule.)
|
||||
ufw:
|
||||
rule: deny
|
||||
proto: tcp
|
||||
port: '20'
|
||||
to_ip: "::"
|
||||
insert: -1
|
||||
insert_relative_to: last-ipv4
|
||||
|
||||
# Can be used to further restrict a global FORWARD policy set to allow
|
||||
- name: Deny forwarded/routed traffic from subnet 1.2.3.0/24 to subnet 4.5.6.0/24
|
||||
ufw:
|
||||
rule: deny
|
||||
route: yes
|
||||
src: 1.2.3.0/24
|
||||
dest: 4.5.6.0/24
|
||||
'''
|
||||
|
||||
import re
|
||||
|
||||
from operator import itemgetter
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
|
||||
def compile_ipv4_regexp():
|
||||
r = r"((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}"
|
||||
r += r"(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])"
|
||||
return re.compile(r)
|
||||
|
||||
|
||||
def compile_ipv6_regexp():
|
||||
"""
|
||||
validation pattern provided by :
|
||||
https://stackoverflow.com/questions/53497/regular-expression-that-matches-
|
||||
valid-ipv6-addresses#answer-17871737
|
||||
"""
|
||||
r = r"(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:"
|
||||
r += r"|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}"
|
||||
r += r"(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4})"
|
||||
r += r"{1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]"
|
||||
r += r"{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]"
|
||||
r += r"{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4})"
|
||||
r += r"{0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]"
|
||||
r += r"|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
|
||||
r += r"[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}"
|
||||
r += r"[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))"
|
||||
return re.compile(r)
|
||||
|
||||
|
||||
def main():
|
||||
command_keys = ['state', 'default', 'rule', 'logging']
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
state=dict(type='str', choices=['enabled', 'disabled', 'reloaded', 'reset']),
|
||||
default=dict(type='str', aliases=['policy'], choices=['allow', 'deny', 'reject']),
|
||||
logging=dict(type='str', choices=['full', 'high', 'low', 'medium', 'off', 'on']),
|
||||
direction=dict(type='str', choices=['in', 'incoming', 'out', 'outgoing', 'routed']),
|
||||
delete=dict(type='bool', default=False),
|
||||
route=dict(type='bool', default=False),
|
||||
insert=dict(type='int'),
|
||||
insert_relative_to=dict(choices=['zero', 'first-ipv4', 'last-ipv4', 'first-ipv6', 'last-ipv6'], default='zero'),
|
||||
rule=dict(type='str', choices=['allow', 'deny', 'limit', 'reject']),
|
||||
interface=dict(type='str', aliases=['if']),
|
||||
interface_in=dict(type='str', aliases=['if_in']),
|
||||
interface_out=dict(type='str', aliases=['if_out']),
|
||||
log=dict(type='bool', default=False),
|
||||
from_ip=dict(type='str', default='any', aliases=['from', 'src']),
|
||||
from_port=dict(type='str'),
|
||||
to_ip=dict(type='str', default='any', aliases=['dest', 'to']),
|
||||
to_port=dict(type='str', aliases=['port']),
|
||||
proto=dict(type='str', aliases=['protocol'], choices=['ah', 'any', 'esp', 'ipv6', 'tcp', 'udp', 'gre', 'igmp']),
|
||||
name=dict(type='str', aliases=['app']),
|
||||
comment=dict(type='str'),
|
||||
),
|
||||
supports_check_mode=True,
|
||||
mutually_exclusive=[
|
||||
['name', 'proto', 'logging'],
|
||||
# Mutual exclusivity with `interface` implied by `required_by`.
|
||||
['direction', 'interface_in'],
|
||||
['direction', 'interface_out'],
|
||||
],
|
||||
required_one_of=([command_keys]),
|
||||
required_by=dict(
|
||||
interface=('direction', ),
|
||||
),
|
||||
)
|
||||
|
||||
cmds = []
|
||||
|
||||
ipv4_regexp = compile_ipv4_regexp()
|
||||
ipv6_regexp = compile_ipv6_regexp()
|
||||
|
||||
def filter_line_that_not_start_with(pattern, content):
|
||||
return ''.join([line for line in content.splitlines(True) if line.startswith(pattern)])
|
||||
|
||||
def filter_line_that_contains(pattern, content):
|
||||
return [line for line in content.splitlines(True) if pattern in line]
|
||||
|
||||
def filter_line_that_not_contains(pattern, content):
|
||||
return ''.join([line for line in content.splitlines(True) if not line.contains(pattern)])
|
||||
|
||||
def filter_line_that_match_func(match_func, content):
|
||||
return ''.join([line for line in content.splitlines(True) if match_func(line) is not None])
|
||||
|
||||
def filter_line_that_contains_ipv4(content):
|
||||
return filter_line_that_match_func(ipv4_regexp.search, content)
|
||||
|
||||
def filter_line_that_contains_ipv6(content):
|
||||
return filter_line_that_match_func(ipv6_regexp.search, content)
|
||||
|
||||
def is_starting_by_ipv4(ip):
|
||||
return ipv4_regexp.match(ip) is not None
|
||||
|
||||
def is_starting_by_ipv6(ip):
|
||||
return ipv6_regexp.match(ip) is not None
|
||||
|
||||
def execute(cmd, ignore_error=False):
|
||||
cmd = ' '.join(map(itemgetter(-1), filter(itemgetter(0), cmd)))
|
||||
|
||||
cmds.append(cmd)
|
||||
(rc, out, err) = module.run_command(cmd, environ_update={"LANG": "C"})
|
||||
|
||||
if rc != 0 and not ignore_error:
|
||||
module.fail_json(msg=err or out, commands=cmds)
|
||||
|
||||
return out
|
||||
|
||||
def get_current_rules():
|
||||
user_rules_files = ["/lib/ufw/user.rules",
|
||||
"/lib/ufw/user6.rules",
|
||||
"/etc/ufw/user.rules",
|
||||
"/etc/ufw/user6.rules",
|
||||
"/var/lib/ufw/user.rules",
|
||||
"/var/lib/ufw/user6.rules"]
|
||||
|
||||
cmd = [[grep_bin], ["-h"], ["'^### tuple'"]]
|
||||
|
||||
cmd.extend([[f] for f in user_rules_files])
|
||||
return execute(cmd, ignore_error=True)
|
||||
|
||||
def ufw_version():
|
||||
"""
|
||||
Returns the major and minor version of ufw installed on the system.
|
||||
"""
|
||||
out = execute([[ufw_bin], ["--version"]])
|
||||
|
||||
lines = [x for x in out.split('\n') if x.strip() != '']
|
||||
if len(lines) == 0:
|
||||
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
|
||||
|
||||
matches = re.search(r'^ufw.+(\d+)\.(\d+)(?:\.(\d+))?.*$', lines[0])
|
||||
if matches is None:
|
||||
module.fail_json(msg="Failed to get ufw version.", rc=0, out=out)
|
||||
|
||||
# Convert version to numbers
|
||||
major = int(matches.group(1))
|
||||
minor = int(matches.group(2))
|
||||
rev = 0
|
||||
if matches.group(3) is not None:
|
||||
rev = int(matches.group(3))
|
||||
|
||||
return major, minor, rev
|
||||
|
||||
params = module.params
|
||||
|
||||
commands = dict((key, params[key]) for key in command_keys if params[key])
|
||||
|
||||
# Ensure ufw is available
|
||||
ufw_bin = module.get_bin_path('ufw', True)
|
||||
grep_bin = module.get_bin_path('grep', True)
|
||||
|
||||
# Save the pre state and rules in order to recognize changes
|
||||
pre_state = execute([[ufw_bin], ['status verbose']])
|
||||
pre_rules = get_current_rules()
|
||||
|
||||
changed = False
|
||||
|
||||
# Execute filter
|
||||
for (command, value) in commands.items():
|
||||
|
||||
cmd = [[ufw_bin], [module.check_mode, '--dry-run']]
|
||||
|
||||
if command == 'state':
|
||||
states = {'enabled': 'enable', 'disabled': 'disable',
|
||||
'reloaded': 'reload', 'reset': 'reset'}
|
||||
|
||||
if value in ['reloaded', 'reset']:
|
||||
changed = True
|
||||
|
||||
if module.check_mode:
|
||||
# "active" would also match "inactive", hence the space
|
||||
ufw_enabled = pre_state.find(" active") != -1
|
||||
if (value == 'disabled' and ufw_enabled) or (value == 'enabled' and not ufw_enabled):
|
||||
changed = True
|
||||
else:
|
||||
execute(cmd + [['-f'], [states[value]]])
|
||||
|
||||
elif command == 'logging':
|
||||
extract = re.search(r'Logging: (on|off)(?: \(([a-z]+)\))?', pre_state)
|
||||
if extract:
|
||||
current_level = extract.group(2)
|
||||
current_on_off_value = extract.group(1)
|
||||
if value != "off":
|
||||
if current_on_off_value == "off":
|
||||
changed = True
|
||||
elif value != "on" and value != current_level:
|
||||
changed = True
|
||||
elif current_on_off_value != "off":
|
||||
changed = True
|
||||
else:
|
||||
changed = True
|
||||
|
||||
if not module.check_mode:
|
||||
execute(cmd + [[command], [value]])
|
||||
|
||||
elif command == 'default':
|
||||
if params['direction'] not in ['outgoing', 'incoming', 'routed', None]:
|
||||
module.fail_json(msg='For default, direction must be one of "outgoing", "incoming" and "routed", or direction must not be specified.')
|
||||
if module.check_mode:
|
||||
regexp = r'Default: (deny|allow|reject) \(incoming\), (deny|allow|reject) \(outgoing\), (deny|allow|reject|disabled) \(routed\)'
|
||||
extract = re.search(regexp, pre_state)
|
||||
if extract is not None:
|
||||
current_default_values = {}
|
||||
current_default_values["incoming"] = extract.group(1)
|
||||
current_default_values["outgoing"] = extract.group(2)
|
||||
current_default_values["routed"] = extract.group(3)
|
||||
v = current_default_values[params['direction'] or 'incoming']
|
||||
if v not in (value, 'disabled'):
|
||||
changed = True
|
||||
else:
|
||||
changed = True
|
||||
else:
|
||||
execute(cmd + [[command], [value], [params['direction']]])
|
||||
|
||||
elif command == 'rule':
|
||||
if params['direction'] not in ['in', 'out', None]:
|
||||
module.fail_json(msg='For rules, direction must be one of "in" and "out", or direction must not be specified.')
|
||||
if not params['route'] and params['interface_in'] and params['interface_out']:
|
||||
module.fail_json(msg='Only route rules can combine '
|
||||
'interface_in and interface_out')
|
||||
# Rules are constructed according to the long format
|
||||
#
|
||||
# ufw [--dry-run] [route] [delete] [insert NUM] allow|deny|reject|limit [in|out on INTERFACE] [log|log-all] \
|
||||
# [from ADDRESS [port PORT]] [to ADDRESS [port PORT]] \
|
||||
# [proto protocol] [app application] [comment COMMENT]
|
||||
cmd.append([module.boolean(params['route']), 'route'])
|
||||
cmd.append([module.boolean(params['delete']), 'delete'])
|
||||
if params['insert'] is not None:
|
||||
relative_to_cmd = params['insert_relative_to']
|
||||
if relative_to_cmd == 'zero':
|
||||
insert_to = params['insert']
|
||||
else:
|
||||
(dummy, numbered_state, dummy) = module.run_command([ufw_bin, 'status', 'numbered'])
|
||||
numbered_line_re = re.compile(R'^\[ *([0-9]+)\] ')
|
||||
lines = [(numbered_line_re.match(line), '(v6)' in line) for line in numbered_state.splitlines()]
|
||||
lines = [(int(matcher.group(1)), ipv6) for (matcher, ipv6) in lines if matcher]
|
||||
last_number = max([no for (no, ipv6) in lines]) if lines else 0
|
||||
has_ipv4 = any([not ipv6 for (no, ipv6) in lines])
|
||||
has_ipv6 = any([ipv6 for (no, ipv6) in lines])
|
||||
if relative_to_cmd == 'first-ipv4':
|
||||
relative_to = 1
|
||||
elif relative_to_cmd == 'last-ipv4':
|
||||
relative_to = max([no for (no, ipv6) in lines if not ipv6]) if has_ipv4 else 1
|
||||
elif relative_to_cmd == 'first-ipv6':
|
||||
relative_to = max([no for (no, ipv6) in lines if not ipv6]) + 1 if has_ipv4 else 1
|
||||
elif relative_to_cmd == 'last-ipv6':
|
||||
relative_to = last_number if has_ipv6 else last_number + 1
|
||||
insert_to = params['insert'] + relative_to
|
||||
if insert_to > last_number:
|
||||
# ufw does not like it when the insert number is larger than the
|
||||
# maximal rule number for IPv4/IPv6.
|
||||
insert_to = None
|
||||
cmd.append([insert_to is not None, "insert %s" % insert_to])
|
||||
cmd.append([value])
|
||||
cmd.append([params['direction'], "%s" % params['direction']])
|
||||
cmd.append([params['interface'], "on %s" % params['interface']])
|
||||
cmd.append([params['interface_in'], "in on %s" % params['interface_in']])
|
||||
cmd.append([params['interface_out'], "out on %s" % params['interface_out']])
|
||||
cmd.append([module.boolean(params['log']), 'log'])
|
||||
|
||||
for (key, template) in [('from_ip', "from %s"), ('from_port', "port %s"),
|
||||
('to_ip', "to %s"), ('to_port', "port %s"),
|
||||
('proto', "proto %s"), ('name', "app '%s'")]:
|
||||
value = params[key]
|
||||
cmd.append([value, template % (value)])
|
||||
|
||||
ufw_major, ufw_minor, dummy = ufw_version()
|
||||
# comment is supported only in ufw version after 0.35
|
||||
if (ufw_major == 0 and ufw_minor >= 35) or ufw_major > 0:
|
||||
cmd.append([params['comment'], "comment '%s'" % params['comment']])
|
||||
|
||||
rules_dry = execute(cmd)
|
||||
|
||||
if module.check_mode:
|
||||
|
||||
nb_skipping_line = len(filter_line_that_contains("Skipping", rules_dry))
|
||||
|
||||
if not (nb_skipping_line > 0 and nb_skipping_line == len(rules_dry.splitlines(True))):
|
||||
|
||||
rules_dry = filter_line_that_not_start_with("### tuple", rules_dry)
|
||||
# ufw dry-run doesn't send all rules so have to compare ipv4 or ipv6 rules
|
||||
if is_starting_by_ipv4(params['from_ip']) or is_starting_by_ipv4(params['to_ip']):
|
||||
if filter_line_that_contains_ipv4(pre_rules) != filter_line_that_contains_ipv4(rules_dry):
|
||||
changed = True
|
||||
elif is_starting_by_ipv6(params['from_ip']) or is_starting_by_ipv6(params['to_ip']):
|
||||
if filter_line_that_contains_ipv6(pre_rules) != filter_line_that_contains_ipv6(rules_dry):
|
||||
changed = True
|
||||
elif pre_rules != rules_dry:
|
||||
changed = True
|
||||
|
||||
# Get the new state
|
||||
if module.check_mode:
|
||||
return module.exit_json(changed=changed, commands=cmds)
|
||||
else:
|
||||
post_state = execute([[ufw_bin], ['status'], ['verbose']])
|
||||
if not changed:
|
||||
post_rules = get_current_rules()
|
||||
changed = (pre_state != post_state) or (pre_rules != post_rules)
|
||||
return module.exit_json(changed=changed, commands=cmds, msg=post_state.rstrip())
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue