Add modules for NetApp SANtricity storage platform (#2929)
The modules prefixed with netapp_e* are built to support the SANtricity storage platform. The modules provide idempotent provisioning for volume groups, disk pools, standard volumes, thin volumes, LUN mapping, hosts, host groups (clusters), volume snapshots, consistency groups, and asynchronous mirroring. They require the SANtricity WebServices Proxy. The WebServices Proxy is free software available at the NetApp Software Download site: http://mysupport.netapp.com/NOW/download/software/eseries_webservices/1.40.X000.0009/ Starting with the E2800 platform (11.30 OS), the modules will work directly with the storage array. Starting with this platform, REST API requests are handled directly on the box. This array can still be managed by proxy for large scale deployments.
This commit is contained in:
parent
3dedecd81e
commit
68823a4e2d
17 changed files with 6063 additions and 14 deletions
454
lib/ansible/modules/extras/storage/netapp/README.md
Normal file
454
lib/ansible/modules/extras/storage/netapp/README.md
Normal file
|
@ -0,0 +1,454 @@
|
|||
#NetApp Storage Modules
|
||||
This directory contains modules that support the storage platforms in the NetApp portfolio.
|
||||
|
||||
##SANtricity Modules
|
||||
The modules prefixed with *netapp\_e* are built to support the SANtricity storage platform. They require the SANtricity
|
||||
WebServices Proxy. The WebServices Proxy is free software available at the [NetApp Software Download site](http://mysupport.netapp.com/NOW/download/software/eseries_webservices/1.40.X000.0009/).
|
||||
Starting with the E2800 platform (11.30 OS), the modules will work directly with the storage array. Starting with this
|
||||
platform, REST API requests are handled directly on the box. This array can still be managed by proxy for large scale deployments.
|
||||
The modules provide idempotent provisioning for volume groups, disk pools, standard volumes, thin volumes, LUN mapping,
|
||||
hosts, host groups (clusters), volume snapshots, consistency groups, and asynchronous mirroring.
|
||||
### Prerequisites
|
||||
| Software | Version |
|
||||
| -------- |:-------:|
|
||||
| SANtricity Web Services Proxy*|1.4 or 2.0|
|
||||
| Ansible | 2.2** |
|
||||
\* Not required for *E2800 with 11.30 OS*<br/>
|
||||
\*\*The modules where developed with this version. Ansible forward and backward compatibility applies.
|
||||
|
||||
###Questions and Contribution
|
||||
Please feel free to submit pull requests with improvements. Issues for these modules should be routed to @hulquest but
|
||||
we also try to keep an eye on the list for issues specific to these modules. General questions can be made to our [development team](mailto:ng-hsg-engcustomer-esolutions-support@netapp.com)
|
||||
|
||||
### Examples
|
||||
These examples are not comprehensive but are intended to help you get started when integrating storage provisioning into
|
||||
your playbooks.
|
||||
```yml
|
||||
- name: NetApp Test All Modules
|
||||
hosts: proxy20
|
||||
gather_facts: yes
|
||||
connection: local
|
||||
vars:
|
||||
storage_systems:
|
||||
ansible1:
|
||||
address1: "10.251.230.41"
|
||||
address2: "10.251.230.42"
|
||||
ansible2:
|
||||
address1: "10.251.230.43"
|
||||
address2: "10.251.230.44"
|
||||
ansible3:
|
||||
address1: "10.251.230.45"
|
||||
address2: "10.251.230.46"
|
||||
ansible4:
|
||||
address1: "10.251.230.47"
|
||||
address2: "10.251.230.48"
|
||||
storage_pools:
|
||||
Disk_Pool_1:
|
||||
raid_level: raidDiskPool
|
||||
criteria_drive_count: 11
|
||||
Disk_Pool_2:
|
||||
raid_level: raidDiskPool
|
||||
criteria_drive_count: 11
|
||||
Disk_Pool_3:
|
||||
raid_level: raid0
|
||||
criteria_drive_count: 2
|
||||
volumes:
|
||||
vol_1:
|
||||
storage_pool_name: Disk_Pool_1
|
||||
size: 10
|
||||
thin_provision: false
|
||||
thin_volume_repo_size: 7
|
||||
vol_2:
|
||||
storage_pool_name: Disk_Pool_2
|
||||
size: 10
|
||||
thin_provision: false
|
||||
thin_volume_repo_size: 7
|
||||
vol_3:
|
||||
storage_pool_name: Disk_Pool_3
|
||||
size: 10
|
||||
thin_provision: false
|
||||
thin_volume_repo_size: 7
|
||||
thin_vol_1:
|
||||
storage_pool_name: Disk_Pool_1
|
||||
size: 10
|
||||
thin_provision: true
|
||||
thin_volume_repo_size: 7
|
||||
hosts:
|
||||
ANSIBLE-1:
|
||||
host_type: 1
|
||||
index: 1
|
||||
ports:
|
||||
- type: 'fc'
|
||||
label: 'fpPort1'
|
||||
port: '2100000E1E191B01'
|
||||
|
||||
netapp_api_host: 10.251.230.29
|
||||
netapp_api_url: http://{{ netapp_api_host }}/devmgr/v2
|
||||
netapp_api_username: rw
|
||||
netapp_api_password: rw
|
||||
ssid: ansible1
|
||||
auth: no
|
||||
lun_mapping: no
|
||||
netapp_api_validate_certs: False
|
||||
snapshot: no
|
||||
gather_facts: no
|
||||
amg_create: no
|
||||
remove_volume: no
|
||||
make_volume: no
|
||||
check_thins: no
|
||||
remove_storage_pool: yes
|
||||
check_storage_pool: yes
|
||||
remove_storage_system: no
|
||||
check_storage_system: yes
|
||||
change_role: no
|
||||
flash_cache: False
|
||||
configure_hostgroup: no
|
||||
configure_async_mirror: False
|
||||
configure_snapshot: no
|
||||
copy_volume: False
|
||||
volume_copy_source_volume_id:
|
||||
volume_destination_source_volume_id:
|
||||
snapshot_volume_storage_pool_name: Disk_Pool_3
|
||||
snapshot_volume_image_id: 3400000060080E5000299B640063074057BC5C5E
|
||||
snapshot_volume: no
|
||||
snapshot_volume_name: vol_1_snap_vol
|
||||
host_type_index: 1
|
||||
host_name: ANSIBLE-1
|
||||
set_host: no
|
||||
remove_host: no
|
||||
amg_member_target_array:
|
||||
amg_member_primary_pool:
|
||||
amg_member_secondary_pool:
|
||||
amg_member_primary_volume:
|
||||
amg_member_secondary_volume:
|
||||
set_amg_member: False
|
||||
amg_array_name: foo
|
||||
amg_name: amg_made_by_ansible
|
||||
amg_secondaryArrayId: ansible2
|
||||
amg_sync_name: foo
|
||||
amg_sync: no
|
||||
|
||||
tasks:
|
||||
|
||||
- name: Get array facts
|
||||
netapp_e_facts:
|
||||
ssid: "{{ item.key }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
with_dict: "{{ storage_systems }}"
|
||||
when: gather_facts
|
||||
|
||||
- name: Presence of storage system
|
||||
netapp_e_storage_system:
|
||||
ssid: "{{ item.key }}"
|
||||
state: present
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
controller_addresses:
|
||||
- "{{ item.value.address1 }}"
|
||||
- "{{ item.value.address2 }}"
|
||||
with_dict: "{{ storage_systems }}"
|
||||
when: check_storage_system
|
||||
|
||||
- name: Create Snapshot
|
||||
netapp_e_snapshot_images:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
snapshot_group: "ansible_snapshot_group"
|
||||
state: 'create'
|
||||
when: snapshot
|
||||
|
||||
- name: Auth Module Example
|
||||
netapp_e_auth:
|
||||
ssid: "{{ ssid }}"
|
||||
current_password: 'Infinit2'
|
||||
new_password: 'Infinit1'
|
||||
set_admin: yes
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
when: auth
|
||||
|
||||
- name: No disk groups
|
||||
netapp_e_storagepool:
|
||||
ssid: "{{ ssid }}"
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
remove_volumes: yes
|
||||
with_items:
|
||||
- Disk_Pool_1
|
||||
- Disk_Pool_2
|
||||
- Disk_Pool_3
|
||||
when: remove_storage_pool
|
||||
|
||||
- name: Make disk groups
|
||||
netapp_e_storagepool:
|
||||
ssid: "{{ ssid }}"
|
||||
name: "{{ item.key }}"
|
||||
state: present
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
raid_level: "{{ item.value.raid_level }}"
|
||||
criteria_drive_count: "{{ item.value.criteria_drive_count }}"
|
||||
with_dict: " {{ storage_pools }}"
|
||||
when: check_storage_pool
|
||||
|
||||
- name: No thin volume
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: NewThinVolumeByAnsible
|
||||
state: absent
|
||||
thin_provision: yes
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
when: check_thins
|
||||
|
||||
- name: Make a thin volume
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: NewThinVolumeByAnsible
|
||||
state: present
|
||||
thin_provision: yes
|
||||
thin_volume_repo_size: 7
|
||||
size: 10
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
storage_pool_name: Disk_Pool_1
|
||||
when: check_thins
|
||||
|
||||
- name: Remove standard/thick volumes
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: "{{ item.key }}"
|
||||
state: absent
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
with_dict: "{{ volumes }}"
|
||||
when: remove_volume
|
||||
|
||||
- name: Make a volume
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: "{{ item.key }}"
|
||||
state: present
|
||||
storage_pool_name: "{{ item.value.storage_pool_name }}"
|
||||
size: "{{ item.value.size }}"
|
||||
thin_provision: "{{ item.value.thin_provision }}"
|
||||
thin_volume_repo_size: "{{ item.value.thin_volume_repo_size }}"
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
with_dict: "{{ volumes }}"
|
||||
when: make_volume
|
||||
|
||||
- name: No storage system
|
||||
netapp_e_storage_system:
|
||||
ssid: "{{ item.key }}"
|
||||
state: absent
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
with_dict: "{{ storage_systems }}"
|
||||
when: remove_storage_system
|
||||
|
||||
- name: Update the role of a storage array
|
||||
netapp_e_amg_role:
|
||||
name: "{{ amg_name }}"
|
||||
role: primary
|
||||
force: true
|
||||
noSync: true
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
when: change_role
|
||||
|
||||
- name: Flash Cache
|
||||
netapp_e_flashcache:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
name: SSDCacheBuiltByAnsible
|
||||
when: flash_cache
|
||||
|
||||
- name: Configure Hostgroup
|
||||
netapp_e_hostgroup:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
state: absent
|
||||
name: "ansible-host-group"
|
||||
when: configure_hostgroup
|
||||
|
||||
- name: Configure Snapshot group
|
||||
netapp_e_snapshot_group:
|
||||
ssid: "{{ ssid }}"
|
||||
state: present
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
base_volume_name: vol_3
|
||||
name: ansible_snapshot_group
|
||||
repo_pct: 20
|
||||
warning_threshold: 85
|
||||
delete_limit: 30
|
||||
full_policy: purgepit
|
||||
storage_pool_name: Disk_Pool_3
|
||||
rollback_priority: medium
|
||||
when: configure_snapshot
|
||||
|
||||
- name: Copy volume
|
||||
netapp_e_volume_copy:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
status: present
|
||||
source_volume_id: "{{ volume_copy_source_volume_id }}"
|
||||
destination_volume_id: "{{ volume_destination_source_volume_id }}"
|
||||
when: copy_volume
|
||||
|
||||
- name: Snapshot volume
|
||||
netapp_e_snapshot_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
state: present
|
||||
storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
|
||||
snapshot_image_id: "{{ snapshot_volume_image_id }}"
|
||||
name: "{{ snapshot_volume_name }}"
|
||||
when: snapshot_volume
|
||||
|
||||
- name: Remove hosts
|
||||
netapp_e_host:
|
||||
ssid: "{{ ssid }}"
|
||||
state: absent
|
||||
name: "{{ item.key }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
host_type_index: "{{ host_type_index }}"
|
||||
with_dict: "{{hosts}}"
|
||||
when: remove_host
|
||||
|
||||
- name: Ensure/add hosts
|
||||
netapp_e_host:
|
||||
ssid: "{{ ssid }}"
|
||||
state: present
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
name: "{{ item.key }}"
|
||||
host_type_index: "{{ item.value.index }}"
|
||||
ports:
|
||||
- type: 'fc'
|
||||
label: 'fpPort1'
|
||||
port: '2100000E1E191B01'
|
||||
with_dict: "{{hosts}}"
|
||||
when: set_host
|
||||
|
||||
- name: Unmap a volume
|
||||
netapp_e_lun_mapping:
|
||||
state: absent
|
||||
ssid: "{{ ssid }}"
|
||||
lun: 2
|
||||
target: "{{ host_name }}"
|
||||
volume_name: "thin_vol_1"
|
||||
target_type: host
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
when: lun_mapping
|
||||
|
||||
- name: Map a volume
|
||||
netapp_e_lun_mapping:
|
||||
state: present
|
||||
ssid: "{{ ssid }}"
|
||||
lun: 16
|
||||
target: "{{ host_name }}"
|
||||
volume_name: "thin_vol_1"
|
||||
target_type: host
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
when: lun_mapping
|
||||
|
||||
- name: Update LUN Id
|
||||
netapp_e_lun_mapping:
|
||||
state: present
|
||||
ssid: "{{ ssid }}"
|
||||
lun: 2
|
||||
target: "{{ host_name }}"
|
||||
volume_name: "thin_vol_1"
|
||||
target_type: host
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
when: lun_mapping
|
||||
|
||||
- name: AMG removal
|
||||
netapp_e_amg:
|
||||
state: absent
|
||||
ssid: "{{ ssid }}"
|
||||
secondaryArrayId: "{{amg_secondaryArrayId}}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
new_name: "{{amg_array_name}}"
|
||||
name: "{{amg_name}}"
|
||||
when: amg_create
|
||||
|
||||
- name: AMG create
|
||||
netapp_e_amg:
|
||||
state: present
|
||||
ssid: "{{ ssid }}"
|
||||
secondaryArrayId: "{{amg_secondaryArrayId}}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
new_name: "{{amg_array_name}}"
|
||||
name: "{{amg_name}}"
|
||||
when: amg_create
|
||||
|
||||
- name: start AMG async
|
||||
netapp_e_amg_sync:
|
||||
name: "{{ amg_name }}"
|
||||
state: running
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
when: amg_sync
|
||||
```
|
328
lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py
Normal file
328
lib/ansible/modules/extras/storage/netapp/netapp_e_amg.py
Normal file
|
@ -0,0 +1,328 @@
|
|||
#!/usr/bin/python
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_amg
|
||||
short_description: Create, Remove, and Update Asynchronous Mirror Groups
|
||||
description:
|
||||
- Allows for the creation, removal and updating of Asynchronous Mirror Groups for NetApp E-series storage arrays
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
name:
|
||||
description:
|
||||
- The name of the async array you wish to target, or create.
|
||||
- If C(state) is present and the name isn't found, it will attempt to create.
|
||||
required: yes
|
||||
secondaryArrayId:
|
||||
description:
|
||||
- The ID of the secondary array to be used in mirroing process
|
||||
required: yes
|
||||
syncIntervalMinutes:
|
||||
description:
|
||||
- The synchronization interval in minutes
|
||||
required: no
|
||||
default: 10
|
||||
manualSync:
|
||||
description:
|
||||
- Setting this to true will cause other synchronization values to be ignored
|
||||
required: no
|
||||
default: no
|
||||
recoveryWarnThresholdMinutes:
|
||||
description:
|
||||
- Recovery point warning threshold (minutes). The user will be warned when the age of the last good failures point exceeds this value
|
||||
required: no
|
||||
default: 20
|
||||
repoUtilizationWarnThreshold:
|
||||
description:
|
||||
- Recovery point warning threshold
|
||||
required: no
|
||||
default: 80
|
||||
interfaceType:
|
||||
description:
|
||||
- The intended protocol to use if both Fibre and iSCSI are available.
|
||||
choices:
|
||||
- iscsi
|
||||
- fibre
|
||||
required: no
|
||||
default: null
|
||||
syncWarnThresholdMinutes:
|
||||
description:
|
||||
- The threshold (in minutes) for notifying the user that periodic synchronization has taken too long to complete.
|
||||
required: no
|
||||
default: 10
|
||||
ssid:
|
||||
description:
|
||||
- The ID of the primary storage array for the async mirror action
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- A C(state) of present will either create or update the async mirror group.
|
||||
- A C(state) of absent will remove the async mirror group.
|
||||
required: yes
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: AMG removal
|
||||
na_eseries_amg:
|
||||
state: absent
|
||||
ssid: "{{ ssid }}"
|
||||
secondaryArrayId: "{{amg_secondaryArrayId}}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
new_name: "{{amg_array_name}}"
|
||||
name: "{{amg_name}}"
|
||||
when: amg_create
|
||||
|
||||
- name: AMG create
|
||||
netapp_e_amg:
|
||||
state: present
|
||||
ssid: "{{ ssid }}"
|
||||
secondaryArrayId: "{{amg_secondaryArrayId}}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
new_name: "{{amg_array_name}}"
|
||||
name: "{{amg_name}}"
|
||||
when: amg_create
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Successful removal
|
||||
returned: success
|
||||
type: string
|
||||
sample: "Async mirror group removed."
|
||||
|
||||
msg:
|
||||
description: Successful creation
|
||||
returned: success
|
||||
type: string
|
||||
sample: '{"changed": true, "connectionType": "fc", "groupRef": "3700000060080E5000299C24000006E857AC7EEC", "groupState": "optimal", "id": "3700000060080E5000299C24000006E857AC7EEC", "label": "amg_made_by_ansible", "localRole": "primary", "mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC", "orphanGroup": false, "recoveryPointAgeAlertThresholdMinutes": 20, "remoteRole": "secondary", "remoteTarget": {"nodeName": {"ioInterfaceType": "fc", "iscsiNodeName": null, "remoteNodeWWN": "20040080E5299F1C"}, "remoteRef": "9000000060080E5000299C24005B06E557AC7EEC", "scsiinitiatorTargetBaseProperties": {"ioInterfaceType": "fc", "iscsiinitiatorTargetBaseParameters": null}}, "remoteTargetId": "ansible2", "remoteTargetName": "Ansible2", "remoteTargetWwn": "60080E5000299F880000000056A25D56", "repositoryUtilizationWarnThreshold": 80, "roleChangeProgress": "none", "syncActivity": "idle", "syncCompletionTimeAlertThresholdMinutes": 10, "syncIntervalMinutes": 10, "worldWideName": "60080E5000299C24000006E857AC7EEC"}'
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule, get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=False, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def has_match(module, ssid, api_url, api_pwd, api_usr, body):
|
||||
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
|
||||
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
|
||||
desired_state = dict((x, (body.get(x))) for x in compare_keys)
|
||||
label_exists = False
|
||||
matches_spec = False
|
||||
current_state = None
|
||||
async_id = None
|
||||
api_data = None
|
||||
desired_name = body.get('name')
|
||||
endpoint = 'storage-systems/%s/async-mirrors' % ssid
|
||||
url = api_url + endpoint
|
||||
try:
|
||||
rc, data = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS)
|
||||
except Exception:
|
||||
error = get_exception()
|
||||
module.exit_json(exception="Error finding a match. Message: %s" % str(error))
|
||||
|
||||
for async_group in data:
|
||||
if async_group['label'] == desired_name:
|
||||
label_exists = True
|
||||
api_data = async_group
|
||||
async_id = async_group['groupRef']
|
||||
current_state = dict(
|
||||
syncIntervalMinutes=async_group['syncIntervalMinutes'],
|
||||
syncWarnThresholdMinutes=async_group['syncCompletionTimeAlertThresholdMinutes'],
|
||||
recoveryWarnThresholdMinutes=async_group['recoveryPointAgeAlertThresholdMinutes'],
|
||||
repoUtilizationWarnThreshold=async_group['repositoryUtilizationWarnThreshold'],
|
||||
)
|
||||
|
||||
if current_state == desired_state:
|
||||
matches_spec = True
|
||||
|
||||
return label_exists, matches_spec, api_data, async_id
|
||||
|
||||
|
||||
def create_async(module, ssid, api_url, api_pwd, api_usr, body):
|
||||
endpoint = 'storage-systems/%s/async-mirrors' % ssid
|
||||
url = api_url + endpoint
|
||||
post_data = json.dumps(body)
|
||||
try:
|
||||
rc, data = request(url, data=post_data, method='POST', url_username=api_usr, url_password=api_pwd,
|
||||
headers=HEADERS)
|
||||
except Exception:
|
||||
error = get_exception()
|
||||
module.exit_json(exception="Exception while creating aysnc mirror group. Message: %s" % str(error))
|
||||
return data
|
||||
|
||||
|
||||
def update_async(module, ssid, api_url, pwd, user, body, new_name, async_id):
|
||||
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
|
||||
url = api_url + endpoint
|
||||
compare_keys = ['syncIntervalMinutes', 'syncWarnThresholdMinutes',
|
||||
'recoveryWarnThresholdMinutes', 'repoUtilizationWarnThreshold']
|
||||
desired_state = dict((x, (body.get(x))) for x in compare_keys)
|
||||
|
||||
if new_name:
|
||||
desired_state['new_name'] = new_name
|
||||
|
||||
post_data = json.dumps(desired_state)
|
||||
|
||||
try:
|
||||
rc, data = request(url, data=post_data, method='POST', headers=HEADERS,
|
||||
url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
error = get_exception()
|
||||
module.exit_json(exception="Exception while updating async mirror group. Message: %s" % str(error))
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def remove_amg(module, ssid, api_url, pwd, user, async_id):
|
||||
endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, async_id)
|
||||
url = api_url + endpoint
|
||||
try:
|
||||
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd,
|
||||
headers=HEADERS)
|
||||
except Exception:
|
||||
error = get_exception()
|
||||
module.exit_json(exception="Exception while removing async mirror group. Message: %s" % str(error))
|
||||
|
||||
return
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
name=dict(required=True, type='str'),
|
||||
new_name=dict(required=False, type='str'),
|
||||
secondaryArrayId=dict(required=True, type='str'),
|
||||
syncIntervalMinutes=dict(required=False, default=10, type='int'),
|
||||
manualSync=dict(required=False, default=False, type='bool'),
|
||||
recoveryWarnThresholdMinutes=dict(required=False, default=20, type='int'),
|
||||
repoUtilizationWarnThreshold=dict(required=False, default=80, type='int'),
|
||||
interfaceType=dict(required=False, choices=['fibre', 'iscsi'], type='str'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
syncWarnThresholdMinutes=dict(required=False, default=10, type='int')
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
p = module.params
|
||||
|
||||
ssid = p.pop('ssid')
|
||||
api_url = p.pop('api_url')
|
||||
user = p.pop('api_username')
|
||||
pwd = p.pop('api_password')
|
||||
new_name = p.pop('new_name')
|
||||
state = p.pop('state')
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
name_exists, spec_matches, api_data, async_id = has_match(module, ssid, api_url, pwd, user, p)
|
||||
|
||||
if state == 'present':
|
||||
if name_exists and spec_matches:
|
||||
module.exit_json(changed=False, msg="Desired state met", **api_data)
|
||||
elif name_exists and not spec_matches:
|
||||
results = update_async(module, ssid, api_url, pwd, user,
|
||||
p, new_name, async_id)
|
||||
module.exit_json(changed=True,
|
||||
msg="Async mirror group updated", async_id=async_id,
|
||||
**results)
|
||||
elif not name_exists:
|
||||
results = create_async(module, ssid, api_url, user, pwd, p)
|
||||
module.exit_json(changed=True, **results)
|
||||
|
||||
elif state == 'absent':
|
||||
if name_exists:
|
||||
remove_amg(module, ssid, api_url, pwd, user, async_id)
|
||||
module.exit_json(changed=True, msg="Async mirror group removed.",
|
||||
async_id=async_id)
|
||||
else:
|
||||
module.exit_json(changed=False,
|
||||
msg="Async Mirror group: %s already absent" % p['name'])
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
239
lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py
Normal file
239
lib/ansible/modules/extras/storage/netapp/netapp_e_amg_role.py
Normal file
|
@ -0,0 +1,239 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_amg_role
|
||||
short_description: Update the role of a storage array within an Asynchronous Mirror Group (AMG).
|
||||
description:
|
||||
- Update a storage array to become the primary or secondary instance in an asynchronous mirror group
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
description:
|
||||
- The ID of the primary storage array for the async mirror action
|
||||
required: yes
|
||||
role:
|
||||
description:
|
||||
- Whether the array should be the primary or secondary array for the AMG
|
||||
required: yes
|
||||
choices: ['primary', 'secondary']
|
||||
noSync:
|
||||
description:
|
||||
- Whether to avoid synchronization prior to role reversal
|
||||
required: no
|
||||
default: no
|
||||
choices: [yes, no]
|
||||
force:
|
||||
description:
|
||||
- Whether to force the role reversal regardless of the online-state of the primary
|
||||
required: no
|
||||
default: no
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Update the role of a storage array
|
||||
netapp_e_amg_role:
|
||||
name: updating amg role
|
||||
role: primary
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Failure message
|
||||
returned: failure
|
||||
type: string
|
||||
sample: "No Async Mirror Group with the name."
|
||||
"""
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def has_match(module, ssid, api_url, api_pwd, api_usr, body, name):
|
||||
amg_exists = False
|
||||
has_desired_role = False
|
||||
amg_id = None
|
||||
amg_data = None
|
||||
get_amgs = 'storage-systems/%s/async-mirrors' % ssid
|
||||
url = api_url + get_amgs
|
||||
try:
|
||||
amg_rc, amgs = request(url, url_username=api_usr, url_password=api_pwd,
|
||||
headers=HEADERS)
|
||||
except:
|
||||
module.fail_json(msg="Failed to find AMGs on storage array. Id [%s]" % (ssid))
|
||||
|
||||
for amg in amgs:
|
||||
if amg['label'] == name:
|
||||
amg_exists = True
|
||||
amg_id = amg['id']
|
||||
amg_data = amg
|
||||
if amg['localRole'] == body.get('role'):
|
||||
has_desired_role = True
|
||||
|
||||
return amg_exists, has_desired_role, amg_id, amg_data
|
||||
|
||||
|
||||
def update_amg(module, ssid, api_url, api_usr, api_pwd, body, amg_id):
|
||||
endpoint = 'storage-systems/%s/async-mirrors/%s/role' % (ssid, amg_id)
|
||||
url = api_url + endpoint
|
||||
post_data = json.dumps(body)
|
||||
try:
|
||||
request(url, data=post_data, method='POST', url_username=api_usr,
|
||||
url_password=api_pwd, headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to change role of AMG. Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
|
||||
|
||||
status_endpoint = 'storage-systems/%s/async-mirrors/%s' % (ssid, amg_id)
|
||||
status_url = api_url + status_endpoint
|
||||
try:
|
||||
rc, status = request(status_url, method='GET', url_username=api_usr,
|
||||
url_password=api_pwd, headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to check status of AMG after role reversal. " +
|
||||
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
|
||||
|
||||
# Here we wait for the role reversal to complete
|
||||
if 'roleChangeProgress' in status:
|
||||
while status['roleChangeProgress'] != "none":
|
||||
try:
|
||||
rc, status = request(status_url, method='GET',
|
||||
url_username=api_usr, url_password=api_pwd, headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to check status of AMG after role reversal. " +
|
||||
"Id [%s]. AMG Id [%s]. Error [%s]" % (ssid, amg_id, str(err)))
|
||||
return status
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(required=True, type='str'),
|
||||
role=dict(required=True, choices=['primary', 'secondary']),
|
||||
noSync=dict(required=False, type='bool', default=False),
|
||||
force=dict(required=False, type='bool', default=False),
|
||||
ssid=dict(required=True, type='str'),
|
||||
api_url=dict(required=True),
|
||||
api_username=dict(required=False),
|
||||
api_password=dict(required=False, no_log=True),
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
p = module.params
|
||||
|
||||
ssid = p.pop('ssid')
|
||||
api_url = p.pop('api_url')
|
||||
user = p.pop('api_username')
|
||||
pwd = p.pop('api_password')
|
||||
name = p.pop('name')
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
agm_exists, has_desired_role, async_id, amg_data = has_match(module, ssid, api_url, pwd, user, p, name)
|
||||
|
||||
if not agm_exists:
|
||||
module.fail_json(msg="No Async Mirror Group with the name: '%s' was found" % name)
|
||||
elif has_desired_role:
|
||||
module.exit_json(changed=False, **amg_data)
|
||||
|
||||
else:
|
||||
amg_data = update_amg(module, ssid, api_url, user, pwd, p, async_id)
|
||||
if amg_data:
|
||||
module.exit_json(changed=True, **amg_data)
|
||||
else:
|
||||
module.exit_json(changed=True, msg="AMG role changed.")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
269
lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py
Normal file
269
lib/ansible/modules/extras/storage/netapp/netapp_e_amg_sync.py
Normal file
|
@ -0,0 +1,269 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_amg_sync
|
||||
short_description: Conduct synchronization actions on asynchronous member groups.
|
||||
description:
|
||||
- Allows for the initialization, suspension and resumption of an asynchronous mirror group's synchronization for NetApp E-series storage arrays.
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
description:
|
||||
- The ID of the storage array containing the AMG you wish to target
|
||||
name:
|
||||
description:
|
||||
- The name of the async mirror group you wish to target
|
||||
required: yes
|
||||
state:
|
||||
description:
|
||||
- The synchronization action you'd like to take.
|
||||
- If C(running) then it will begin syncing if there is no active sync or will resume a suspended sync. If there is already a sync in progress, it will return with an OK status.
|
||||
- If C(suspended) it will suspend any ongoing sync action, but return OK if there is no active sync or if the sync is already suspended
|
||||
choices:
|
||||
- running
|
||||
- suspended
|
||||
required: yes
|
||||
delete_recovery_point:
|
||||
description:
|
||||
- Indicates whether the failures point can be deleted on the secondary if necessary to achieve the synchronization.
|
||||
- If true, and if the amount of unsynchronized data exceeds the CoW repository capacity on the secondary for any member volume, the last failures point will be deleted and synchronization will continue.
|
||||
- If false, the synchronization will be suspended if the amount of unsynchronized data exceeds the CoW Repository capacity on the secondary and the failures point will be preserved.
|
||||
- "NOTE: This only has impact for newly launched syncs."
|
||||
choices:
|
||||
- yes
|
||||
- no
|
||||
default: no
|
||||
"""
|
||||
EXAMPLES = """
|
||||
- name: start AMG async
|
||||
netapp_e_amg_sync:
|
||||
name: "{{ amg_sync_name }}"
|
||||
state: running
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
"""
|
||||
RETURN = """
|
||||
json:
|
||||
description: The object attributes of the AMG.
|
||||
returned: success
|
||||
type: string
|
||||
example:
|
||||
{
|
||||
"changed": false,
|
||||
"connectionType": "fc",
|
||||
"groupRef": "3700000060080E5000299C24000006EF57ACAC70",
|
||||
"groupState": "optimal",
|
||||
"id": "3700000060080E5000299C24000006EF57ACAC70",
|
||||
"label": "made_with_ansible",
|
||||
"localRole": "primary",
|
||||
"mirrorChannelRemoteTarget": "9000000060080E5000299C24005B06E557AC7EEC",
|
||||
"orphanGroup": false,
|
||||
"recoveryPointAgeAlertThresholdMinutes": 20,
|
||||
"remoteRole": "secondary",
|
||||
"remoteTarget": {
|
||||
"nodeName": {
|
||||
"ioInterfaceType": "fc",
|
||||
"iscsiNodeName": null,
|
||||
"remoteNodeWWN": "20040080E5299F1C"
|
||||
},
|
||||
"remoteRef": "9000000060080E5000299C24005B06E557AC7EEC",
|
||||
"scsiinitiatorTargetBaseProperties": {
|
||||
"ioInterfaceType": "fc",
|
||||
"iscsiinitiatorTargetBaseParameters": null
|
||||
}
|
||||
},
|
||||
"remoteTargetId": "ansible2",
|
||||
"remoteTargetName": "Ansible2",
|
||||
"remoteTargetWwn": "60080E5000299F880000000056A25D56",
|
||||
"repositoryUtilizationWarnThreshold": 80,
|
||||
"roleChangeProgress": "none",
|
||||
"syncActivity": "idle",
|
||||
"syncCompletionTimeAlertThresholdMinutes": 10,
|
||||
"syncIntervalMinutes": 10,
|
||||
"worldWideName": "60080E5000299C24000006EF57ACAC70"
|
||||
}
|
||||
"""
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
class AMGsync(object):
|
||||
def __init__(self):
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
name=dict(required=True, type='str'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
state=dict(required=True, type='str', choices=['running', 'suspended']),
|
||||
delete_recovery_point=dict(required=False, type='bool', default=False)
|
||||
))
|
||||
self.module = AnsibleModule(argument_spec=argument_spec)
|
||||
args = self.module.params
|
||||
self.name = args['name']
|
||||
self.ssid = args['ssid']
|
||||
self.state = args['state']
|
||||
self.delete_recovery_point = args['delete_recovery_point']
|
||||
try:
|
||||
self.user = args['api_username']
|
||||
self.pwd = args['api_password']
|
||||
self.url = args['api_url']
|
||||
except KeyError:
|
||||
self.module.fail_json(msg="You must pass in api_username"
|
||||
"and api_password and api_url to the module.")
|
||||
self.certs = args['validate_certs']
|
||||
|
||||
self.post_headers = {
|
||||
"Accept": "application/json",
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
self.amg_id, self.amg_obj = self.get_amg()
|
||||
|
||||
def get_amg(self):
|
||||
endpoint = self.url + '/storage-systems/%s/async-mirrors' % self.ssid
|
||||
(rc, amg_objs) = request(endpoint, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
|
||||
headers=self.post_headers)
|
||||
try:
|
||||
amg_id = filter(lambda d: d['label'] == self.name, amg_objs)[0]['id']
|
||||
amg_obj = filter(lambda d: d['label'] == self.name, amg_objs)[0]
|
||||
except IndexError:
|
||||
self.module.fail_json(
|
||||
msg="There is no async mirror group %s associated with storage array %s" % (self.name, self.ssid))
|
||||
return amg_id, amg_obj
|
||||
|
||||
@property
|
||||
def current_state(self):
|
||||
amg_id, amg_obj = self.get_amg()
|
||||
return amg_obj['syncActivity']
|
||||
|
||||
def run_sync_action(self):
|
||||
# If we get to this point we know that the states differ, and there is no 'err' state,
|
||||
# so no need to revalidate
|
||||
|
||||
post_body = dict()
|
||||
if self.state == 'running':
|
||||
if self.current_state == 'idle':
|
||||
if self.delete_recovery_point:
|
||||
post_body.update(dict(deleteRecoveryPointIfNecessary=self.delete_recovery_point))
|
||||
suffix = 'sync'
|
||||
else:
|
||||
# In a suspended state
|
||||
suffix = 'resume'
|
||||
else:
|
||||
suffix = 'suspend'
|
||||
|
||||
endpoint = self.url + "/storage-systems/%s/async-mirrors/%s/%s" % (self.ssid, self.amg_id, suffix)
|
||||
|
||||
(rc, resp) = request(endpoint, method='POST', url_username=self.user, url_password=self.pwd,
|
||||
validate_certs=self.certs, data=json.dumps(post_body), headers=self.post_headers,
|
||||
ignore_errors=True)
|
||||
|
||||
if not str(rc).startswith('2'):
|
||||
self.module.fail_json(msg=str(resp['errorMessage']))
|
||||
|
||||
return resp
|
||||
|
||||
def apply(self):
|
||||
state_map = dict(
|
||||
running=['active'],
|
||||
suspended=['userSuspended', 'internallySuspended', 'paused'],
|
||||
err=['unkown', '_UNDEFINED'])
|
||||
|
||||
if self.current_state not in state_map[self.state]:
|
||||
if self.current_state in state_map['err']:
|
||||
self.module.fail_json(
|
||||
msg="The sync is a state of '%s', this requires manual intervention. " +
|
||||
"Please investigate and try again" % self.current_state)
|
||||
else:
|
||||
self.amg_obj = self.run_sync_action()
|
||||
|
||||
(ret, amg) = self.get_amg()
|
||||
self.module.exit_json(changed=False, **amg)
|
||||
|
||||
|
||||
def main():
|
||||
sync = AMGsync()
|
||||
sync.apply()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
283
lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py
Normal file
283
lib/ansible/modules/extras/storage/netapp/netapp_e_auth.py
Normal file
|
@ -0,0 +1,283 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: netapp_e_auth
|
||||
short_description: Sets or updates the password for a storage array.
|
||||
description:
|
||||
- Sets or updates the password for a storage array. When the password is updated on the storage array, it must be updated on the SANtricity Web Services proxy. Note, all storage arrays do not have a Monitor or RO role.
|
||||
version_added: "2.2"
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
name:
|
||||
description:
|
||||
- The name of the storage array. Note that if more than one storage array with this name is detected, the task will fail and you'll have to use the ID instead.
|
||||
required: False
|
||||
ssid:
|
||||
description:
|
||||
- the identifier of the storage array in the Web Services Proxy.
|
||||
required: False
|
||||
set_admin:
|
||||
description:
|
||||
- Boolean value on whether to update the admin password. If set to false then the RO account is updated.
|
||||
default: False
|
||||
current_password:
|
||||
description:
|
||||
- The current admin password. This is not required if the password hasn't been set before.
|
||||
required: False
|
||||
new_password:
|
||||
description:
|
||||
- The password you would like to set. Cannot be more than 30 characters.
|
||||
required: True
|
||||
api_url:
|
||||
description:
|
||||
- The full API url.
|
||||
- "Example: http://ENDPOINT:8080/devmgr/v2"
|
||||
- This can optionally be set via an environment variable, API_URL
|
||||
required: False
|
||||
api_username:
|
||||
description:
|
||||
- The username used to authenticate against the API
|
||||
- This can optionally be set via an environment variable, API_USERNAME
|
||||
required: False
|
||||
api_password:
|
||||
description:
|
||||
- The password used to authenticate against the API
|
||||
- This can optionally be set via an environment variable, API_PASSWORD
|
||||
required: False
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
- name: Test module
|
||||
netapp_e_auth:
|
||||
name: trex
|
||||
current_password: 'B4Dpwd'
|
||||
new_password: 'W0rs3P4sswd'
|
||||
set_admin: yes
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
'''
|
||||
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: "Password Updated Successfully"
|
||||
'''
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def get_ssid(module, name, api_url, user, pwd):
|
||||
count = 0
|
||||
all_systems = 'storage-systems'
|
||||
systems_url = api_url + all_systems
|
||||
rc, data = request(systems_url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
for system in data:
|
||||
if system['name'] == name:
|
||||
count += 1
|
||||
if count > 1:
|
||||
module.fail_json(
|
||||
msg="You supplied a name for the Storage Array but more than 1 array was found with that name. " +
|
||||
"Use the id instead")
|
||||
else:
|
||||
ssid = system['id']
|
||||
else:
|
||||
continue
|
||||
|
||||
if count == 0:
|
||||
module.fail_json(msg="No storage array with the name %s was found" % name)
|
||||
|
||||
else:
|
||||
return ssid
|
||||
|
||||
|
||||
def get_pwd_status(module, ssid, api_url, user, pwd):
|
||||
pwd_status = "storage-systems/%s/passwords" % ssid
|
||||
url = api_url + pwd_status
|
||||
try:
|
||||
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
return data['readOnlyPasswordSet'], data['adminPasswordSet']
|
||||
except HTTPError:
|
||||
error = get_exception()
|
||||
module.fail_json(msg="There was an issue with connecting, please check that your "
|
||||
"endpoint is properly defined and your credentials are correct: %s" % str(error))
|
||||
|
||||
|
||||
def update_storage_system_pwd(module, ssid, pwd, api_url, api_usr, api_pwd):
|
||||
update_pwd = 'storage-systems/%s' % ssid
|
||||
url = api_url + update_pwd
|
||||
post_body = json.dumps(dict(storedPassword=pwd))
|
||||
try:
|
||||
rc, data = request(url, data=post_body, method='POST', headers=HEADERS, url_username=api_usr,
|
||||
url_password=api_pwd)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to update system password. Id [%s]. Error [%s]" % (ssid, str(err)))
|
||||
return data
|
||||
|
||||
|
||||
def set_password(module, ssid, api_url, user, pwd, current_password=None, new_password=None, set_admin=False):
|
||||
set_pass = "storage-systems/%s/passwords" % ssid
|
||||
url = api_url + set_pass
|
||||
|
||||
if not current_password:
|
||||
current_password = ""
|
||||
|
||||
post_body = json.dumps(
|
||||
dict(currentAdminPassword=current_password, adminPassword=set_admin, newPassword=new_password))
|
||||
|
||||
try:
|
||||
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd,
|
||||
ignore_errors=True)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to set system password. Id [%s]. Error [%s]" % (ssid, str(err)))
|
||||
|
||||
if rc == 422:
|
||||
post_body = json.dumps(dict(currentAdminPassword='', adminPassword=set_admin, newPassword=new_password))
|
||||
try:
|
||||
rc, data = request(url, method='POST', data=post_body, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
module.fail_json(msg="Wrong or no admin password supplied. Please update your playbook and try again")
|
||||
|
||||
update_data = update_storage_system_pwd(module, ssid, new_password, api_url, user, pwd)
|
||||
|
||||
if int(rc) == 204:
|
||||
return update_data
|
||||
else:
|
||||
module.fail_json(msg="%s:%s" % (rc, data))
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name=dict(required=False, type='str'),
|
||||
ssid=dict(required=False, type='str'),
|
||||
current_password=dict(required=False, no_log=True),
|
||||
new_password=dict(required=True, no_log=True),
|
||||
set_admin=dict(required=True, type='bool'),
|
||||
api_url=dict(required=True),
|
||||
api_username=dict(required=False),
|
||||
api_password=dict(required=False, no_log=True)
|
||||
)
|
||||
)
|
||||
module = AnsibleModule(argument_spec=argument_spec, mutually_exclusive=[['name', 'ssid']],
|
||||
required_one_of=[['name', 'ssid']])
|
||||
|
||||
name = module.params['name']
|
||||
ssid = module.params['ssid']
|
||||
current_password = module.params['current_password']
|
||||
new_password = module.params['new_password']
|
||||
set_admin = module.params['set_admin']
|
||||
user = module.params['api_username']
|
||||
pwd = module.params['api_password']
|
||||
api_url = module.params['api_url']
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
if name:
|
||||
ssid = get_ssid(module, name, api_url, user, pwd)
|
||||
|
||||
ro_pwd, admin_pwd = get_pwd_status(module, ssid, api_url, user, pwd)
|
||||
|
||||
if admin_pwd and not current_password:
|
||||
module.fail_json(
|
||||
msg="Admin account has a password set. " +
|
||||
"You must supply current_password in order to update the RO or Admin passwords")
|
||||
|
||||
if len(new_password) > 30:
|
||||
module.fail_json(msg="Passwords must not be greater than 30 characters in length")
|
||||
|
||||
success = set_password(module, ssid, api_url, user, pwd, current_password=current_password,
|
||||
new_password=new_password,
|
||||
set_admin=set_admin)
|
||||
|
||||
module.exit_json(changed=True, msg="Password Updated Successfully", **success)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -18,7 +18,7 @@
|
|||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
module: na_eseries_facts
|
||||
module: netapp_e_facts
|
||||
version_added: '2.2'
|
||||
short_description: Get facts about NetApp E-Series arrays
|
||||
options:
|
||||
|
@ -55,7 +55,7 @@ author: Kevin Hulquest (@hulquest)
|
|||
EXAMPLES = """
|
||||
---
|
||||
- name: Get array facts
|
||||
na_eseries_facts:
|
||||
netapp_e_facts:
|
||||
array_id: "{{ netapp_array_id }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
|
@ -68,8 +68,6 @@ msg: Gathered facts for <StorageArrayId>.
|
|||
"""
|
||||
import json
|
||||
|
||||
import os
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule, get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
@ -173,8 +171,7 @@ def main():
|
|||
available_capacity=sp['freeSpace'],
|
||||
total_capacity=sp['totalRaidedSpace'],
|
||||
used_capacity=sp['usedSpace']
|
||||
) for sp in resp['volumeGroup']
|
||||
]
|
||||
) for sp in resp['volumeGroup']]
|
||||
|
||||
all_volumes = list(resp['volume'])
|
||||
# all_volumes.extend(resp['thinVolume'])
|
||||
|
@ -187,8 +184,7 @@ def main():
|
|||
parent_storage_pool_id=v['volumeGroupRef'],
|
||||
capacity=v['capacity'],
|
||||
is_thin_provisioned=v['thinProvisioned']
|
||||
) for v in all_volumes
|
||||
]
|
||||
) for v in all_volumes]
|
||||
|
||||
features = [f for f in resp['sa']['capabilities']]
|
||||
features.extend([f['capability'] for f in resp['sa']['premiumFeatures'] if f['isEnabled']])
|
||||
|
|
420
lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py
Normal file
420
lib/ansible/modules/extras/storage/netapp/netapp_e_flashcache.py
Normal file
|
@ -0,0 +1,420 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
module: netapp_e_flashcache
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
version_added: '2.2'
|
||||
short_description: Manage NetApp SSD caches
|
||||
description:
|
||||
- Create or remove SSD caches on a NetApp E-Series storage array.
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
required: true
|
||||
description:
|
||||
- The ID of the array to manage (as configured on the web services proxy).
|
||||
state:
|
||||
required: true
|
||||
description:
|
||||
- Whether the specified SSD cache should exist or not.
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The name of the SSD cache to manage
|
||||
io_type:
|
||||
description:
|
||||
- The type of workload to optimize the cache for.
|
||||
choices: ['filesystem','database','media']
|
||||
default: filesystem
|
||||
disk_count:
|
||||
description:
|
||||
- The minimum number of disks to use for building the cache. The cache will be expanded if this number exceeds the number of disks already in place
|
||||
size_unit:
|
||||
description:
|
||||
- The unit to be applied to size arguments
|
||||
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
|
||||
default: gb
|
||||
cache_size_min:
|
||||
description:
|
||||
- The minimum size (in size_units) of the ssd cache. The cache will be expanded if this exceeds the current size of the cache.
|
||||
'''
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Flash Cache
|
||||
netapp_e_flashcache:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
name: SSDCacheBuiltByAnsible
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: json for newly created flash cache
|
||||
"""
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
class NetAppESeriesFlashCache(object):
|
||||
def __init__(self):
|
||||
self.name = None
|
||||
self.log_mode = None
|
||||
self.log_path = None
|
||||
self.api_url = None
|
||||
self.api_username = None
|
||||
self.api_password = None
|
||||
self.ssid = None
|
||||
self.validate_certs = None
|
||||
self.disk_count = None
|
||||
self.size_unit = None
|
||||
self.cache_size_min = None
|
||||
self.io_type = None
|
||||
self.driveRefs = None
|
||||
self.state = None
|
||||
self._size_unit_map = dict(
|
||||
bytes=1,
|
||||
b=1,
|
||||
kb=1024,
|
||||
mb=1024 ** 2,
|
||||
gb=1024 ** 3,
|
||||
tb=1024 ** 4,
|
||||
pb=1024 ** 5,
|
||||
eb=1024 ** 6,
|
||||
zb=1024 ** 7,
|
||||
yb=1024 ** 8
|
||||
)
|
||||
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
state=dict(default='present', choices=['present', 'absent'], type='str'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
disk_count=dict(type='int'),
|
||||
disk_refs=dict(type='list'),
|
||||
cache_size_min=dict(type='int'),
|
||||
io_type=dict(default='filesystem', choices=['filesystem', 'database', 'media']),
|
||||
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
|
||||
type='str'),
|
||||
criteria_disk_phy_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
|
||||
type='str'),
|
||||
log_mode=dict(type='str'),
|
||||
log_path=dict(type='str'),
|
||||
))
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
|
||||
],
|
||||
mutually_exclusive=[
|
||||
|
||||
],
|
||||
# TODO: update validation for various selection criteria
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
self.__dict__.update(self.module.params)
|
||||
|
||||
# logging setup
|
||||
self._logger = logging.getLogger(self.__class__.__name__)
|
||||
self.debug = self._logger.debug
|
||||
|
||||
if self.log_mode == 'file' and self.log_path:
|
||||
logging.basicConfig(level=logging.DEBUG, filename=self.log_path)
|
||||
elif self.log_mode == 'stderr':
|
||||
logging.basicConfig(level=logging.DEBUG, stream=sys.stderr)
|
||||
|
||||
self.post_headers = dict(Accept="application/json")
|
||||
self.post_headers['Content-Type'] = 'application/json'
|
||||
|
||||
def get_candidate_disks(self, disk_count, size_unit='gb', capacity=None):
|
||||
self.debug("getting candidate disks...")
|
||||
|
||||
drives_req = dict(
|
||||
driveCount=disk_count,
|
||||
sizeUnit=size_unit,
|
||||
driveType='ssd',
|
||||
)
|
||||
|
||||
if capacity:
|
||||
drives_req['targetUsableCapacity'] = capacity
|
||||
|
||||
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid),
|
||||
data=json.dumps(drives_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
if rc == 204:
|
||||
self.module.fail_json(msg='Cannot find disks to match requested criteria for ssd cache')
|
||||
|
||||
disk_ids = [d['id'] for d in drives_resp]
|
||||
bytes = reduce(lambda s, d: s + int(d['usableCapacity']), drives_resp, 0)
|
||||
|
||||
return (disk_ids, bytes)
|
||||
|
||||
def create_cache(self):
|
||||
(disk_ids, bytes) = self.get_candidate_disks(disk_count=self.disk_count, size_unit=self.size_unit,
|
||||
capacity=self.cache_size_min)
|
||||
|
||||
self.debug("creating ssd cache...")
|
||||
|
||||
create_fc_req = dict(
|
||||
driveRefs=disk_ids,
|
||||
name=self.name
|
||||
)
|
||||
|
||||
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
|
||||
data=json.dumps(create_fc_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
def update_cache(self):
|
||||
self.debug('updating flash cache config...')
|
||||
update_fc_req = dict(
|
||||
name=self.name,
|
||||
configType=self.io_type
|
||||
)
|
||||
|
||||
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/configure" % (self.ssid),
|
||||
data=json.dumps(update_fc_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
def delete_cache(self):
|
||||
self.debug('deleting flash cache...')
|
||||
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid), method='DELETE',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs, ignore_errors=True)
|
||||
|
||||
@property
|
||||
def needs_more_disks(self):
|
||||
if len(self.cache_detail['driveRefs']) < self.disk_count:
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
||||
return True
|
||||
|
||||
@property
|
||||
def needs_less_disks(self):
|
||||
if len(self.cache_detail['driveRefs']) > self.disk_count:
|
||||
self.debug("needs resize: current disk count %s < requested requested count %s" % (
|
||||
len(self.cache_detail['driveRefs']), self.disk_count))
|
||||
return True
|
||||
|
||||
@property
|
||||
def current_size_bytes(self):
|
||||
return int(self.cache_detail['fcDriveInfo']['fcWithDrives']['usedCapacity'])
|
||||
|
||||
@property
|
||||
def requested_size_bytes(self):
|
||||
if self.cache_size_min:
|
||||
return self.cache_size_min * self._size_unit_map[self.size_unit]
|
||||
else:
|
||||
return 0
|
||||
|
||||
@property
|
||||
def needs_more_capacity(self):
|
||||
if self.current_size_bytes < self.requested_size_bytes:
|
||||
self.debug("needs resize: current capacity %sb is less than requested minimum %sb" % (
|
||||
self.current_size_bytes, self.requested_size_bytes))
|
||||
return True
|
||||
|
||||
@property
|
||||
def needs_resize(self):
|
||||
return self.needs_more_disks or self.needs_more_capacity or self.needs_less_disks
|
||||
|
||||
def resize_cache(self):
|
||||
# increase up to disk count first, then iteratively add disks until we meet requested capacity
|
||||
|
||||
# TODO: perform this calculation in check mode
|
||||
current_disk_count = len(self.cache_detail['driveRefs'])
|
||||
proposed_new_disks = 0
|
||||
|
||||
proposed_additional_bytes = 0
|
||||
proposed_disk_ids = []
|
||||
|
||||
if self.needs_more_disks:
|
||||
proposed_disk_count = self.disk_count - current_disk_count
|
||||
|
||||
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_disk_count)
|
||||
proposed_additional_bytes = bytes
|
||||
proposed_disk_ids = disk_ids
|
||||
|
||||
while self.current_size_bytes + proposed_additional_bytes < self.requested_size_bytes:
|
||||
proposed_new_disks += 1
|
||||
(disk_ids, bytes) = self.get_candidate_disks(disk_count=proposed_new_disks)
|
||||
proposed_disk_ids = disk_ids
|
||||
proposed_additional_bytes = bytes
|
||||
|
||||
add_drives_req = dict(
|
||||
driveRef=proposed_disk_ids
|
||||
)
|
||||
|
||||
self.debug("adding drives to flash-cache...")
|
||||
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/addDrives" % (self.ssid),
|
||||
data=json.dumps(add_drives_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
elif self.needs_less_disks and self.driveRefs:
|
||||
rm_drives = dict(driveRef=self.driveRefs)
|
||||
(rc, self.resp) = request(self.api_url + "/storage-systems/%s/flash-cache/removeDrives" % (self.ssid),
|
||||
data=json.dumps(rm_drives), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs)
|
||||
|
||||
def apply(self):
|
||||
result = dict(changed=False)
|
||||
(rc, cache_resp) = request(self.api_url + "/storage-systems/%s/flash-cache" % (self.ssid),
|
||||
url_username=self.api_username, url_password=self.api_password,
|
||||
validate_certs=self.validate_certs, ignore_errors=True)
|
||||
|
||||
if rc == 200:
|
||||
self.cache_detail = cache_resp
|
||||
else:
|
||||
self.cache_detail = None
|
||||
|
||||
if rc not in [200, 404]:
|
||||
raise Exception(
|
||||
"Unexpected error code %s fetching flash cache detail. Response data was %s" % (rc, cache_resp))
|
||||
|
||||
if self.state == 'present':
|
||||
if self.cache_detail:
|
||||
# TODO: verify parameters against detail for changes
|
||||
if self.cache_detail['name'] != self.name:
|
||||
self.debug("CHANGED: name differs")
|
||||
result['changed'] = True
|
||||
if self.cache_detail['flashCacheBase']['configType'] != self.io_type:
|
||||
self.debug("CHANGED: io_type differs")
|
||||
result['changed'] = True
|
||||
if self.needs_resize:
|
||||
self.debug("CHANGED: resize required")
|
||||
result['changed'] = True
|
||||
else:
|
||||
self.debug("CHANGED: requested state is 'present' but cache does not exist")
|
||||
result['changed'] = True
|
||||
else: # requested state is absent
|
||||
if self.cache_detail:
|
||||
self.debug("CHANGED: requested state is 'absent' but cache exists")
|
||||
result['changed'] = True
|
||||
|
||||
if not result['changed']:
|
||||
self.debug("no changes, exiting...")
|
||||
self.module.exit_json(**result)
|
||||
|
||||
if self.module.check_mode:
|
||||
self.debug("changes pending in check mode, exiting early...")
|
||||
self.module.exit_json(**result)
|
||||
|
||||
if self.state == 'present':
|
||||
if not self.cache_detail:
|
||||
self.create_cache()
|
||||
else:
|
||||
if self.needs_resize:
|
||||
self.resize_cache()
|
||||
|
||||
# run update here as well, since io_type can't be set on creation
|
||||
self.update_cache()
|
||||
|
||||
elif self.state == 'absent':
|
||||
self.delete_cache()
|
||||
|
||||
# TODO: include other details about the storage pool (size, type, id, etc)
|
||||
self.module.exit_json(changed=result['changed'], **self.resp)
|
||||
|
||||
|
||||
def main():
|
||||
sp = NetAppESeriesFlashCache()
|
||||
try:
|
||||
sp.apply()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
sp.debug("Exception in apply(): \n%s" % str(e))
|
||||
sp.module.fail_json(msg="Failed to create flash cache. Error[%s]" % str(e))
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
425
lib/ansible/modules/extras/storage/netapp/netapp_e_host.py
Normal file
425
lib/ansible/modules/extras/storage/netapp/netapp_e_host.py
Normal file
|
@ -0,0 +1,425 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_host
|
||||
short_description: manage eseries hosts
|
||||
description:
|
||||
- Create, update, remove hosts on NetApp E-series storage arrays
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
description:
|
||||
- the id of the storage array you wish to act against
|
||||
required: True
|
||||
name:
|
||||
description:
|
||||
- If the host doesnt yet exist, the label to assign at creation time.
|
||||
- If the hosts already exists, this is what is used to identify the host to apply any desired changes
|
||||
required: True
|
||||
host_type_index:
|
||||
description:
|
||||
- The index that maps to host type you wish to create. It is recommended to use the M(netapp_e_facts) module to gather this information. Alternatively you can use the WSP portal to retrieve the information.
|
||||
required: True
|
||||
ports:
|
||||
description:
|
||||
- a list of of dictionaries of host ports you wish to associate with the newly created host
|
||||
required: False
|
||||
group:
|
||||
description:
|
||||
- the group you want the host to be a member of
|
||||
required: False
|
||||
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Set Host Info
|
||||
netapp_e_host:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
name: "{{ host_name }}"
|
||||
host_type_index: "{{ host_type_index }}"
|
||||
"""
|
||||
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: The host has been created.
|
||||
"""
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data is None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
class Host(object):
|
||||
def __init__(self):
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
ssid=dict(type='str', required=True),
|
||||
state=dict(type='str', required=True, choices=['absent', 'present']),
|
||||
group=dict(type='str', required=False),
|
||||
ports=dict(type='list', required=False),
|
||||
force_port=dict(type='bool', default=False),
|
||||
name=dict(type='str', required=True),
|
||||
host_type_index=dict(type='int', required=True)
|
||||
))
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec)
|
||||
args = self.module.params
|
||||
self.group = args['group']
|
||||
self.ports = args['ports']
|
||||
self.force_port = args['force_port']
|
||||
self.name = args['name']
|
||||
self.host_type_index = args['host_type_index']
|
||||
self.state = args['state']
|
||||
self.ssid = args['ssid']
|
||||
self.url = args['api_url']
|
||||
self.user = args['api_username']
|
||||
self.pwd = args['api_password']
|
||||
self.certs = args['validate_certs']
|
||||
self.ports = args['ports']
|
||||
self.post_body = dict()
|
||||
|
||||
if not self.url.endswith('/'):
|
||||
self.url += '/'
|
||||
|
||||
@property
|
||||
def valid_host_type(self):
|
||||
try:
|
||||
(rc, host_types) = request(self.url + 'storage-systems/%s/host-types' % self.ssid, url_password=self.pwd,
|
||||
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to get host types. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
try:
|
||||
match = filter(lambda host_type: host_type['index'] == self.host_type_index, host_types)[0]
|
||||
return True
|
||||
except IndexError:
|
||||
self.module.fail_json(msg="There is no host type with index %s" % self.host_type_index)
|
||||
|
||||
@property
|
||||
def hostports_available(self):
|
||||
used_ids = list()
|
||||
try:
|
||||
(rc, self.available_ports) = request(self.url + 'storage-systems/%s/unassociated-host-ports' % self.ssid,
|
||||
url_password=self.pwd, url_username=self.user,
|
||||
validate_certs=self.certs,
|
||||
headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to get unassociated host ports. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
if len(self.available_ports) > 0 and len(self.ports) <= len(self.available_ports):
|
||||
for port in self.ports:
|
||||
for free_port in self.available_ports:
|
||||
# Desired Type matches but also make sure we havent already used the ID
|
||||
if not free_port['id'] in used_ids:
|
||||
# update the port arg to have an id attribute
|
||||
used_ids.append(free_port['id'])
|
||||
break
|
||||
|
||||
if len(used_ids) != len(self.ports) and not self.force_port:
|
||||
self.module.fail_json(
|
||||
msg="There are not enough free host ports with the specified port types to proceed")
|
||||
else:
|
||||
return True
|
||||
|
||||
else:
|
||||
self.module.fail_json(msg="There are no host ports available OR there are not enough unassigned host ports")
|
||||
|
||||
@property
|
||||
def group_id(self):
|
||||
if self.group:
|
||||
try:
|
||||
(rc, all_groups) = request(self.url + 'storage-systems/%s/host-groups' % self.ssid,
|
||||
url_password=self.pwd,
|
||||
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to get host groups. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
try:
|
||||
group_obj = filter(lambda group: group['name'] == self.group, all_groups)[0]
|
||||
return group_obj['id']
|
||||
except IndexError:
|
||||
self.module.fail_json(msg="No group with the name: %s exists" % self.group)
|
||||
else:
|
||||
# Return the value equivalent of no group
|
||||
return "0000000000000000000000000000000000000000"
|
||||
|
||||
@property
|
||||
def host_exists(self):
|
||||
try:
|
||||
(rc, all_hosts) = request(self.url + 'storage-systems/%s/hosts' % self.ssid, url_password=self.pwd,
|
||||
url_username=self.user, validate_certs=self.certs, headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to determine host existence. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
self.all_hosts = all_hosts
|
||||
try: # Try to grab the host object
|
||||
self.host_obj = filter(lambda host: host['label'] == self.name, all_hosts)[0]
|
||||
return True
|
||||
except IndexError:
|
||||
# Host with the name passed in does not exist
|
||||
return False
|
||||
|
||||
@property
|
||||
def needs_update(self):
|
||||
needs_update = False
|
||||
self.force_port_update = False
|
||||
|
||||
if self.host_obj['clusterRef'] != self.group_id or \
|
||||
self.host_obj['hostTypeIndex'] != self.host_type_index:
|
||||
needs_update = True
|
||||
|
||||
if self.ports:
|
||||
if not self.host_obj['ports']:
|
||||
needs_update = True
|
||||
for arg_port in self.ports:
|
||||
# First a quick check to see if the port is mapped to a different host
|
||||
if not self.port_on_diff_host(arg_port):
|
||||
for obj_port in self.host_obj['ports']:
|
||||
if arg_port['label'] == obj_port['label']:
|
||||
# Confirmed that port arg passed in exists on the host
|
||||
# port_id = self.get_port_id(obj_port['label'])
|
||||
if arg_port['type'] != obj_port['portId']['ioInterfaceType']:
|
||||
needs_update = True
|
||||
if 'iscsiChapSecret' in arg_port:
|
||||
# No way to know the current secret attr, so always return True just in case
|
||||
needs_update = True
|
||||
else:
|
||||
# If the user wants the ports to be reassigned, do it
|
||||
if self.force_port:
|
||||
self.force_port_update = True
|
||||
needs_update = True
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg="The port you specified:\n%s\n is associated with a different host. Specify force_port as True or try a different port spec" % arg_port)
|
||||
|
||||
return needs_update
|
||||
|
||||
def port_on_diff_host(self, arg_port):
|
||||
""" Checks to see if a passed in port arg is present on a different host """
|
||||
for host in self.all_hosts:
|
||||
# Only check 'other' hosts
|
||||
if self.host_obj['name'] != self.name:
|
||||
for port in host['ports']:
|
||||
# Check if the port label is found in the port dict list of each host
|
||||
if arg_port['label'] == port['label']:
|
||||
self.other_host = host
|
||||
return True
|
||||
return False
|
||||
|
||||
def reassign_ports(self, apply=True):
|
||||
if not self.post_body:
|
||||
self.post_body = dict(
|
||||
portsToUpdate=dict()
|
||||
)
|
||||
|
||||
for port in self.ports:
|
||||
if self.port_on_diff_host(port):
|
||||
self.post_body['portsToUpdate'].update(dict(
|
||||
portRef=self.other_host['hostPortRef'],
|
||||
hostRef=self.host_obj['id'],
|
||||
# Doesnt yet address port identifier or chap secret
|
||||
))
|
||||
|
||||
if apply:
|
||||
try:
|
||||
(rc, self.host_obj) = request(
|
||||
self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
|
||||
url_username=self.user, url_password=self.pwd, headers=HEADERS,
|
||||
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to reassign host port. Host Id [%s]. Array Id [%s]. Error [%s]." % (
|
||||
self.host_obj['id'], self.ssid, str(err)))
|
||||
|
||||
def update_host(self):
|
||||
if self.ports:
|
||||
if self.hostports_available:
|
||||
if self.force_port_update is True:
|
||||
self.reassign_ports(apply=False)
|
||||
# Make sure that only ports that arent being reassigned are passed into the ports attr
|
||||
self.ports = [port for port in self.ports if not self.port_on_diff_host(port)]
|
||||
|
||||
self.post_body['ports'] = self.ports
|
||||
|
||||
if self.group:
|
||||
self.post_body['groupId'] = self.group_id
|
||||
|
||||
self.post_body['hostType'] = dict(index=self.host_type_index)
|
||||
|
||||
try:
|
||||
(rc, self.host_obj) = request(self.url + 'storage-systems/%s/hosts/%s' % (self.ssid, self.host_obj['id']),
|
||||
url_username=self.user, url_password=self.pwd, headers=HEADERS,
|
||||
validate_certs=self.certs, method='POST', data=json.dumps(self.post_body))
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Failed to update host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
self.module.exit_json(changed=True, **self.host_obj)
|
||||
|
||||
def create_host(self):
|
||||
post_body = dict(
|
||||
name=self.name,
|
||||
host_type=dict(index=self.host_type_index),
|
||||
groupId=self.group_id,
|
||||
ports=self.ports
|
||||
)
|
||||
if self.ports:
|
||||
# Check that all supplied port args are valid
|
||||
if self.hostports_available:
|
||||
post_body.update(ports=self.ports)
|
||||
elif not self.force_port:
|
||||
self.module.fail_json(
|
||||
msg="You supplied ports that are already in use. Supply force_port to True if you wish to reassign the ports")
|
||||
|
||||
if not self.host_exists:
|
||||
try:
|
||||
(rc, create_resp) = request(self.url + "storage-systems/%s/hosts" % self.ssid, method='POST',
|
||||
url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
|
||||
data=json.dumps(post_body), headers=HEADERS)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to create host. Array Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
else:
|
||||
self.module.exit_json(changed=False,
|
||||
msg="Host already exists. Id [%s]. Host [%s]." % (self.ssid, self.name))
|
||||
|
||||
self.host_obj = create_resp
|
||||
|
||||
if self.ports and self.force_port:
|
||||
self.reassign_ports()
|
||||
|
||||
self.module.exit_json(changed=True, **self.host_obj)
|
||||
|
||||
def remove_host(self):
|
||||
try:
|
||||
(rc, resp) = request(self.url + "storage-systems/%s/hosts/%s" % (self.ssid, self.host_obj['id']),
|
||||
method='DELETE',
|
||||
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to remote host. Host[%s]. Array Id [%s]. Error [%s]." % (self.host_obj['id'],
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
def apply(self):
|
||||
if self.state == 'present':
|
||||
if self.host_exists:
|
||||
if self.needs_update and self.valid_host_type:
|
||||
self.update_host()
|
||||
else:
|
||||
self.module.exit_json(changed=False, msg="Host already present.", id=self.ssid, label=self.name)
|
||||
elif self.valid_host_type:
|
||||
self.create_host()
|
||||
else:
|
||||
if self.host_exists:
|
||||
self.remove_host()
|
||||
self.module.exit_json(changed=True, msg="Host removed.")
|
||||
else:
|
||||
self.module.exit_json(changed=False, msg="Host already absent.", id=self.ssid, label=self.name)
|
||||
|
||||
|
||||
def main():
|
||||
host = Host()
|
||||
host.apply()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
413
lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py
Normal file
413
lib/ansible/modules/extras/storage/netapp/netapp_e_hostgroup.py
Normal file
|
@ -0,0 +1,413 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: netapp_e_hostgroup
|
||||
version_added: "2.2"
|
||||
short_description: Manage NetApp Storage Array Host Groups
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
description:
|
||||
- Create, update or destroy host groups on a NetApp E-Series storage array.
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
required: true
|
||||
description:
|
||||
- The ID of the array to manage (as configured on the web services proxy).
|
||||
state:
|
||||
required: true
|
||||
description:
|
||||
- Whether the specified host group should exist or not.
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
required: false
|
||||
description:
|
||||
- The name of the host group to manage. Either this or C(id_num) must be supplied.
|
||||
new_name:
|
||||
required: false
|
||||
description:
|
||||
- specify this when you need to update the name of a host group
|
||||
id:
|
||||
required: false
|
||||
description:
|
||||
- The id number of the host group to manage. Either this or C(name) must be supplied.
|
||||
hosts::
|
||||
required: false
|
||||
description:
|
||||
- a list of host names/labels to add to the group
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
- name: Configure Hostgroup
|
||||
netapp_e_hostgroup:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
state: present
|
||||
'''
|
||||
RETURN = '''
|
||||
clusterRef:
|
||||
description: The unique identification value for this object. Other objects may use this reference value to refer to the cluster.
|
||||
returned: always except when state is absent
|
||||
type: string
|
||||
sample: "3233343536373839303132333100000000000000"
|
||||
confirmLUNMappingCreation:
|
||||
description: If true, indicates that creation of LUN-to-volume mappings should require careful confirmation from the end-user, since such a mapping will alter the volume access rights of other clusters, in addition to this one.
|
||||
returned: always
|
||||
type: boolean
|
||||
sample: false
|
||||
hosts:
|
||||
description: A list of the hosts that are part of the host group after all operations.
|
||||
returned: always except when state is absent
|
||||
type: list
|
||||
sample: ["HostA","HostB"]
|
||||
id:
|
||||
description: The id number of the hostgroup
|
||||
returned: always except when state is absent
|
||||
type: string
|
||||
sample: "3233343536373839303132333100000000000000"
|
||||
isSAControlled:
|
||||
description: If true, indicates that I/O accesses from this cluster are subject to the storage array's default LUN-to-volume mappings. If false, indicates that I/O accesses from the cluster are subject to cluster-specific LUN-to-volume mappings.
|
||||
returned: always except when state is absent
|
||||
type: boolean
|
||||
sample: false
|
||||
label:
|
||||
description: The user-assigned, descriptive label string for the cluster.
|
||||
returned: always
|
||||
type: string
|
||||
sample: "MyHostGroup"
|
||||
name:
|
||||
description: same as label
|
||||
returned: always except when state is absent
|
||||
type: string
|
||||
sample: "MyHostGroup"
|
||||
protectionInformationCapableAccessMethod:
|
||||
description: This field is true if the host has a PI capable access method.
|
||||
returned: always except when state is absent
|
||||
type: boolean
|
||||
sample: true
|
||||
'''
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def group_exists(module, id_type, ident, ssid, api_url, user, pwd):
|
||||
rc, data = get_hostgroups(module, ssid, api_url, user, pwd)
|
||||
for group in data:
|
||||
if group[id_type] == ident:
|
||||
return True, data
|
||||
else:
|
||||
continue
|
||||
|
||||
return False, data
|
||||
|
||||
|
||||
def get_hostgroups(module, ssid, api_url, user, pwd):
|
||||
groups = "storage-systems/%s/host-groups" % ssid
|
||||
url = api_url + groups
|
||||
try:
|
||||
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
return rc, data
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]." % (ssid, str(err)))
|
||||
|
||||
|
||||
def get_hostref(module, ssid, name, api_url, user, pwd):
|
||||
all_hosts = 'storage-systems/%s/hosts' % ssid
|
||||
url = api_url + all_hosts
|
||||
try:
|
||||
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]." % (ssid, str(err)))
|
||||
|
||||
for host in data:
|
||||
if host['name'] == name:
|
||||
return host['hostRef']
|
||||
else:
|
||||
continue
|
||||
|
||||
module.fail_json(msg="No host with the name %s could be found" % name)
|
||||
|
||||
|
||||
def create_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None):
|
||||
groups = "storage-systems/%s/host-groups" % ssid
|
||||
url = api_url + groups
|
||||
hostrefs = []
|
||||
|
||||
if hosts:
|
||||
for host in hosts:
|
||||
href = get_hostref(module, ssid, host, api_url, user, pwd)
|
||||
hostrefs.append(href)
|
||||
|
||||
post_data = json.dumps(dict(name=name, hosts=hostrefs))
|
||||
try:
|
||||
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to create host group. Id [%s]. Error [%s]." % (ssid, str(err)))
|
||||
|
||||
return rc, data
|
||||
|
||||
|
||||
def update_hostgroup(module, ssid, name, api_url, user, pwd, hosts=None, new_name=None):
|
||||
gid = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
|
||||
groups = "storage-systems/%s/host-groups/%s" % (ssid, gid)
|
||||
url = api_url + groups
|
||||
hostrefs = []
|
||||
|
||||
if hosts:
|
||||
for host in hosts:
|
||||
href = get_hostref(module, ssid, host, api_url, user, pwd)
|
||||
hostrefs.append(href)
|
||||
|
||||
if new_name:
|
||||
post_data = json.dumps(dict(name=new_name, hosts=hostrefs))
|
||||
else:
|
||||
post_data = json.dumps(dict(hosts=hostrefs))
|
||||
|
||||
try:
|
||||
rc, data = request(url, method='POST', data=post_data, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to update host group. Group [%s]. Id [%s]. Error [%s]." % (gid, ssid,
|
||||
str(err)))
|
||||
|
||||
return rc, data
|
||||
|
||||
|
||||
def delete_hostgroup(module, ssid, group_id, api_url, user, pwd):
|
||||
groups = "storage-systems/%s/host-groups/%s" % (ssid, group_id)
|
||||
url = api_url + groups
|
||||
# TODO: Loop through hosts, do mapping to href, make new list to pass to data
|
||||
try:
|
||||
rc, data = request(url, method='DELETE', headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to delete host group. Group [%s]. Id [%s]. Error [%s]." % (group_id, ssid, str(err)))
|
||||
|
||||
return rc, data
|
||||
|
||||
|
||||
def get_hostgroup_id(module, ssid, name, api_url, user, pwd):
|
||||
all_groups = 'storage-systems/%s/host-groups' % ssid
|
||||
url = api_url + all_groups
|
||||
rc, data = request(url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
|
||||
for hg in data:
|
||||
if hg['name'] == name:
|
||||
return hg['id']
|
||||
else:
|
||||
continue
|
||||
|
||||
module.fail_json(msg="A hostgroup with the name %s could not be found" % name)
|
||||
|
||||
|
||||
def get_hosts_in_group(module, ssid, group_name, api_url, user, pwd):
|
||||
all_groups = 'storage-systems/%s/host-groups' % ssid
|
||||
g_url = api_url + all_groups
|
||||
try:
|
||||
g_rc, g_data = request(g_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed in first step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (group_name,
|
||||
ssid,
|
||||
str(err)))
|
||||
|
||||
all_hosts = 'storage-systems/%s/hosts' % ssid
|
||||
h_url = api_url + all_hosts
|
||||
try:
|
||||
h_rc, h_data = request(h_url, method='GET', headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed in second step getting hosts from group. Group: [%s]. Id [%s]. Error [%s]." % (
|
||||
group_name,
|
||||
ssid,
|
||||
str(err)))
|
||||
|
||||
hosts_in_group = []
|
||||
|
||||
for hg in g_data:
|
||||
if hg['name'] == group_name:
|
||||
clusterRef = hg['clusterRef']
|
||||
|
||||
for host in h_data:
|
||||
if host['clusterRef'] == clusterRef:
|
||||
hosts_in_group.append(host['name'])
|
||||
|
||||
return hosts_in_group
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(
|
||||
argument_spec=dict(
|
||||
name=dict(required=False),
|
||||
new_name=dict(required=False),
|
||||
ssid=dict(required=True),
|
||||
id=dict(required=False),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
hosts=dict(required=False, type='list'),
|
||||
api_url=dict(required=True),
|
||||
api_username=dict(required=True),
|
||||
validate_certs=dict(required=False, default=True),
|
||||
api_password=dict(required=True, no_log=True)
|
||||
),
|
||||
supports_check_mode=False,
|
||||
mutually_exclusive=[['name', 'id']],
|
||||
required_one_of=[['name', 'id']]
|
||||
)
|
||||
|
||||
name = module.params['name']
|
||||
new_name = module.params['new_name']
|
||||
ssid = module.params['ssid']
|
||||
id_num = module.params['id']
|
||||
state = module.params['state']
|
||||
hosts = module.params['hosts']
|
||||
user = module.params['api_username']
|
||||
pwd = module.params['api_password']
|
||||
api_url = module.params['api_url']
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
if name:
|
||||
id_type = 'name'
|
||||
id_key = name
|
||||
elif id_num:
|
||||
id_type = 'id'
|
||||
id_key = id_num
|
||||
|
||||
exists, group_data = group_exists(module, id_type, id_key, ssid, api_url, user, pwd)
|
||||
|
||||
if state == 'present':
|
||||
if not exists:
|
||||
try:
|
||||
rc, data = create_hostgroup(module, ssid, name, api_url, user, pwd, hosts)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to create a host group. Id [%s]. Error [%s]." % (ssid, str(err)))
|
||||
|
||||
hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
|
||||
module.exit_json(changed=True, hosts=hosts, **data)
|
||||
else:
|
||||
current_hosts = get_hosts_in_group(module, ssid, name, api_url, user, pwd)
|
||||
|
||||
if not current_hosts:
|
||||
current_hosts = []
|
||||
|
||||
if not hosts:
|
||||
hosts = []
|
||||
|
||||
if set(current_hosts) != set(hosts):
|
||||
try:
|
||||
rc, data = update_hostgroup(module, ssid, name, api_url, user, pwd, hosts, new_name)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to update host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
|
||||
module.exit_json(changed=True, hosts=hosts, **data)
|
||||
else:
|
||||
for group in group_data:
|
||||
if group['name'] == name:
|
||||
module.exit_json(changed=False, hosts=current_hosts, **group)
|
||||
|
||||
elif state == 'absent':
|
||||
if exists:
|
||||
hg_id = get_hostgroup_id(module, ssid, name, api_url, user, pwd)
|
||||
try:
|
||||
rc, data = delete_hostgroup(module, ssid, hg_id, api_url, user, pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to delete host group. Group: [%s]. Id [%s]. Error [%s]." % (name, ssid, str(err)))
|
||||
|
||||
module.exit_json(changed=True, msg="Host Group deleted")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="Host Group is already absent")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,365 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: netapp_e_lun_mapping
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
short_description:
|
||||
- Create or Remove LUN Mappings
|
||||
description:
|
||||
- Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
|
||||
version_added: "2.2"
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
description:
|
||||
- "The storage system array identifier."
|
||||
required: False
|
||||
lun:
|
||||
description:
|
||||
- The LUN number you wish to give the mapping
|
||||
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
|
||||
required: False
|
||||
default: 0
|
||||
target:
|
||||
description:
|
||||
- The name of host or hostgroup you wish to assign to the mapping
|
||||
- If omitted, the default hostgroup is used.
|
||||
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
|
||||
required: False
|
||||
volume_name:
|
||||
description:
|
||||
- The name of the volume you wish to include in the mapping.
|
||||
required: True
|
||||
target_type:
|
||||
description:
|
||||
- Whether the target is a host or group.
|
||||
- Required if supplying an explicit target.
|
||||
required: False
|
||||
choices: ["host", "group"]
|
||||
state:
|
||||
description:
|
||||
- Present will ensure the mapping exists, absent will remove the mapping.
|
||||
- All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
|
||||
required: True
|
||||
choices: ["present", "absent"]
|
||||
api_url:
|
||||
description:
|
||||
- "The full API url. Example: http://ENDPOINT:8080/devmgr/v2"
|
||||
- This can optionally be set via an environment variable, API_URL
|
||||
required: False
|
||||
api_username:
|
||||
description:
|
||||
- The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME
|
||||
required: False
|
||||
api_password:
|
||||
description:
|
||||
- The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD
|
||||
required: False
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
---
|
||||
- name: Lun Mapping Example
|
||||
netapp_e_lun_mapping:
|
||||
state: present
|
||||
ssid: 1
|
||||
lun: 12
|
||||
target: Wilson
|
||||
volume_name: Colby1
|
||||
target_type: group
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
'''
|
||||
RETURN = '''
|
||||
msg: Mapping exists.
|
||||
msg: Mapping removed.
|
||||
'''
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json"
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def get_host_and_group_map(module, ssid, api_url, user, pwd):
|
||||
mapping = dict(host=dict(), group=dict())
|
||||
|
||||
hostgroups = 'storage-systems/%s/host-groups' % ssid
|
||||
groups_url = api_url + hostgroups
|
||||
try:
|
||||
hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
|
||||
|
||||
for group in hg_data:
|
||||
mapping['group'][group['name']] = group['id']
|
||||
|
||||
hosts = 'storage-systems/%s/hosts' % ssid
|
||||
hosts_url = api_url + hosts
|
||||
try:
|
||||
h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
|
||||
|
||||
for host in h_data:
|
||||
mapping['host'][host['name']] = host['id']
|
||||
|
||||
return mapping
|
||||
|
||||
|
||||
def get_volume_id(module, data, ssid, name, api_url, user, pwd):
|
||||
qty = 0
|
||||
for volume in data:
|
||||
if volume['name'] == name:
|
||||
qty += 1
|
||||
|
||||
if qty > 1:
|
||||
module.fail_json(msg="More than one volume with the name: %s was found, "
|
||||
"please use the volume WWN instead" % name)
|
||||
else:
|
||||
wwn = volume['wwn']
|
||||
|
||||
try:
|
||||
return wwn
|
||||
except NameError:
|
||||
module.fail_json(msg="No volume with the name: %s, was found" % (name))
|
||||
|
||||
|
||||
def get_hostgroups(module, ssid, api_url, user, pwd):
|
||||
groups = "storage-systems/%s/host-groups" % ssid
|
||||
url = api_url + groups
|
||||
try:
|
||||
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
|
||||
return data
|
||||
except Exception:
|
||||
module.fail_json(msg="There was an issue with connecting, please check that your"
|
||||
"endpoint is properly defined and your credentials are correct")
|
||||
|
||||
|
||||
def get_volumes(module, ssid, api_url, user, pwd, mappable):
|
||||
volumes = 'storage-systems/%s/%s' % (ssid, mappable)
|
||||
url = api_url + volumes
|
||||
try:
|
||||
rc, data = request(url, url_username=user, url_password=pwd)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
module.fail_json(
|
||||
msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
|
||||
return data
|
||||
|
||||
|
||||
def get_lun_mappings(ssid, api_url, user, pwd, get_all=None):
|
||||
mappings = 'storage-systems/%s/volume-mappings' % ssid
|
||||
url = api_url + mappings
|
||||
rc, data = request(url, url_username=user, url_password=pwd)
|
||||
|
||||
if not get_all:
|
||||
remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
|
||||
|
||||
for key in remove_keys:
|
||||
for mapping in data:
|
||||
del mapping[key]
|
||||
|
||||
return data
|
||||
|
||||
|
||||
def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd):
|
||||
mappings = 'storage-systems/%s/volume-mappings' % ssid
|
||||
url = api_url + mappings
|
||||
post_body = json.dumps(dict(
|
||||
mappableObjectId=lun_map['volumeRef'],
|
||||
targetId=lun_map['mapRef'],
|
||||
lun=lun_map['lun']
|
||||
))
|
||||
|
||||
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
|
||||
ignore_errors=True)
|
||||
|
||||
if rc == 422:
|
||||
data = move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd)
|
||||
# module.fail_json(msg="The volume you specified '%s' is already "
|
||||
# "part of a different LUN mapping. If you "
|
||||
# "want to move it to a different host or "
|
||||
# "hostgroup, then please use the "
|
||||
# "netapp_e_move_lun module" % vol_name)
|
||||
return data
|
||||
|
||||
|
||||
def move_lun(module, ssid, lun_map, vol_name, api_url, user, pwd):
|
||||
lun_id = get_lun_id(module, ssid, lun_map, api_url, user, pwd)
|
||||
move_lun = "storage-systems/%s/volume-mappings/%s/move" % (ssid, lun_id)
|
||||
url = api_url + move_lun
|
||||
post_body = json.dumps(dict(targetId=lun_map['mapRef'], lun=lun_map['lun']))
|
||||
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEADERS)
|
||||
return data
|
||||
|
||||
|
||||
def get_lun_id(module, ssid, lun_mapping, api_url, user, pwd):
|
||||
data = get_lun_mappings(ssid, api_url, user, pwd, get_all=True)
|
||||
|
||||
for lun_map in data:
|
||||
if lun_map['volumeRef'] == lun_mapping['volumeRef']:
|
||||
return lun_map['id']
|
||||
# This shouldn't ever get called
|
||||
module.fail_json(msg="No LUN map found.")
|
||||
|
||||
|
||||
def remove_mapping(module, ssid, lun_mapping, api_url, user, pwd):
|
||||
lun_id = get_lun_id(module, ssid, lun_mapping, api_url, user, pwd)
|
||||
lun_del = "storage-systems/%s/volume-mappings/%s" % (ssid, lun_id)
|
||||
url = api_url + lun_del
|
||||
rc, data = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS)
|
||||
return data
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
target=dict(required=False, default=None),
|
||||
target_type=dict(required=False, choices=['host', 'group']),
|
||||
lun=dict(required=False, type='int', default=0),
|
||||
ssid=dict(required=False),
|
||||
volume_name=dict(required=True),
|
||||
))
|
||||
|
||||
module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
state = module.params['state']
|
||||
target = module.params['target']
|
||||
target_type = module.params['target_type']
|
||||
lun = module.params['lun']
|
||||
ssid = module.params['ssid']
|
||||
vol_name = module.params['volume_name']
|
||||
user = module.params['api_username']
|
||||
pwd = module.params['api_password']
|
||||
api_url = module.params['api_url']
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
volume_map = get_volumes(module, ssid, api_url, user, pwd, "volumes")
|
||||
thin_volume_map = get_volumes(module, ssid, api_url, user, pwd, "thin-volumes")
|
||||
volref = None
|
||||
|
||||
for vol in volume_map:
|
||||
if vol['label'] == vol_name:
|
||||
volref = vol['volumeRef']
|
||||
|
||||
if not volref:
|
||||
for vol in thin_volume_map:
|
||||
if vol['label'] == vol_name:
|
||||
volref = vol['volumeRef']
|
||||
|
||||
if not volref:
|
||||
module.fail_json(changed=False, msg="No volume with the name %s was found" % vol_name)
|
||||
|
||||
host_and_group_mapping = get_host_and_group_map(module, ssid, api_url, user, pwd)
|
||||
|
||||
desired_lun_mapping = dict(
|
||||
mapRef=host_and_group_mapping[target_type][target],
|
||||
lun=lun,
|
||||
volumeRef=volref
|
||||
)
|
||||
|
||||
lun_mappings = get_lun_mappings(ssid, api_url, user, pwd)
|
||||
|
||||
if state == 'present':
|
||||
if desired_lun_mapping in lun_mappings:
|
||||
module.exit_json(changed=False, msg="Mapping exists")
|
||||
else:
|
||||
result = create_mapping(module, ssid, desired_lun_mapping, vol_name, api_url, user, pwd)
|
||||
module.exit_json(changed=True, **result)
|
||||
|
||||
elif state == 'absent':
|
||||
if desired_lun_mapping in lun_mappings:
|
||||
result = remove_mapping(module, ssid, desired_lun_mapping, api_url, user, pwd)
|
||||
module.exit_json(changed=True, msg="Mapping removed")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="Mapping absent")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,382 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_snapshot_group
|
||||
short_description: Manage snapshot groups
|
||||
description:
|
||||
- Create, update, delete snapshot groups for NetApp E-series storage arrays
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
state:
|
||||
description:
|
||||
- Whether to ensure the group is present or absent.
|
||||
required: True
|
||||
choices:
|
||||
- present
|
||||
- absent
|
||||
name:
|
||||
description:
|
||||
- The name to give the snapshot group
|
||||
required: True
|
||||
base_volume_name:
|
||||
description:
|
||||
- The name of the base volume or thin volume to use as the base for the new snapshot group.
|
||||
- If a snapshot group with an identical C(name) already exists but with a different base volume
|
||||
an error will be returned.
|
||||
required: True
|
||||
repo_pct:
|
||||
description:
|
||||
- The size of the repository in relation to the size of the base volume
|
||||
required: False
|
||||
default: 20
|
||||
warning_threshold:
|
||||
description:
|
||||
- The repository utilization warning threshold, as a percentage of the repository volume capacity.
|
||||
required: False
|
||||
default: 80
|
||||
delete_limit:
|
||||
description:
|
||||
- The automatic deletion indicator.
|
||||
- If non-zero, the oldest snapshot image will be automatically deleted when creating a new snapshot image to keep the total number of snapshot images limited to the number specified.
|
||||
- This value is overridden by the consistency group setting if this snapshot group is associated with a consistency group.
|
||||
required: False
|
||||
default: 30
|
||||
full_policy:
|
||||
description:
|
||||
- The behavior on when the data repository becomes full.
|
||||
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
|
||||
required: False
|
||||
default: purgepit
|
||||
choices:
|
||||
- purgepit
|
||||
- unknown
|
||||
- failbasewrites
|
||||
- __UNDEFINED
|
||||
storage_pool_name:
|
||||
required: True
|
||||
description:
|
||||
- The name of the storage pool on which to allocate the repository volume.
|
||||
rollback_priority:
|
||||
required: False
|
||||
description:
|
||||
- The importance of the rollback operation.
|
||||
- This value is overridden by consistency group setting if this snapshot group is associated with a consistency group
|
||||
choices:
|
||||
- highest
|
||||
- high
|
||||
- medium
|
||||
- low
|
||||
- lowest
|
||||
- __UNDEFINED
|
||||
default: medium
|
||||
"""
|
||||
|
||||
EXAMPLES = """
|
||||
- name: Configure Snapshot group
|
||||
netapp_e_snapshot_group:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
base_volume_name: SSGroup_test
|
||||
name=: OOSS_Group
|
||||
repo_pct: 20
|
||||
warning_threshold: 85
|
||||
delete_limit: 30
|
||||
full_policy: purgepit
|
||||
storage_pool_name: Disk_Pool_1
|
||||
rollback_priority: medium
|
||||
"""
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: json facts for newly created snapshot group.
|
||||
"""
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
class SnapshotGroup(object):
|
||||
def __init__(self):
|
||||
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
base_volume_name=dict(required=True),
|
||||
name=dict(required=True),
|
||||
repo_pct=dict(default=20, type='int'),
|
||||
warning_threshold=dict(default=80, type='int'),
|
||||
delete_limit=dict(default=30, type='int'),
|
||||
full_policy=dict(default='purgepit', choices=['unknown', 'failbasewrites', 'purgepit']),
|
||||
rollback_priority=dict(default='medium', choices=['highest', 'high', 'medium', 'low', 'lowest']),
|
||||
storage_pool_name=dict(type='str'),
|
||||
ssid=dict(required=True),
|
||||
)
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec)
|
||||
|
||||
self.post_data = dict()
|
||||
self.warning_threshold = self.module.params['warning_threshold']
|
||||
self.base_volume_name = self.module.params['base_volume_name']
|
||||
self.name = self.module.params['name']
|
||||
self.repo_pct = self.module.params['repo_pct']
|
||||
self.delete_limit = self.module.params['delete_limit']
|
||||
self.full_policy = self.module.params['full_policy']
|
||||
self.rollback_priority = self.module.params['rollback_priority']
|
||||
self.storage_pool_name = self.module.params['storage_pool_name']
|
||||
self.state = self.module.params['state']
|
||||
|
||||
self.url = self.module.params['api_url']
|
||||
self.user = self.module.params['api_username']
|
||||
self.pwd = self.module.params['api_password']
|
||||
self.certs = self.module.params['validate_certs']
|
||||
self.ssid = self.module.params['ssid']
|
||||
|
||||
if not self.url.endswith('/'):
|
||||
self.url += '/'
|
||||
|
||||
self.changed = False
|
||||
|
||||
@property
|
||||
def pool_id(self):
|
||||
pools = 'storage-systems/%s/storage-pools' % self.ssid
|
||||
url = self.url + pools
|
||||
try:
|
||||
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Snapshot group module - Failed to fetch storage pools. " +
|
||||
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
for pool in data:
|
||||
if pool['name'] == self.storage_pool_name:
|
||||
self.pool_data = pool
|
||||
return pool['id']
|
||||
|
||||
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
|
||||
|
||||
@property
|
||||
def volume_id(self):
|
||||
volumes = 'storage-systems/%s/volumes' % self.ssid
|
||||
url = self.url + volumes
|
||||
try:
|
||||
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
|
||||
validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Snapshot group module - Failed to fetch volumes. " +
|
||||
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
qty = 0
|
||||
for volume in data:
|
||||
if volume['name'] == self.base_volume_name:
|
||||
qty += 1
|
||||
|
||||
if qty > 1:
|
||||
self.module.fail_json(msg="More than one volume with the name: %s was found, "
|
||||
"please ensure your volume has a unique name" % self.base_volume_name)
|
||||
else:
|
||||
Id = volume['id']
|
||||
self.volume = volume
|
||||
|
||||
try:
|
||||
return Id
|
||||
except NameError:
|
||||
self.module.fail_json(msg="No volume with the name: %s, was found" % self.base_volume_name)
|
||||
|
||||
@property
|
||||
def snapshot_group_id(self):
|
||||
url = self.url + 'storage-systems/%s/snapshot-groups' % self.ssid
|
||||
try:
|
||||
rc, data = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
|
||||
validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Failed to fetch snapshot groups. " +
|
||||
"Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
for ssg in data:
|
||||
if ssg['name'] == self.name:
|
||||
self.ssg_data = ssg
|
||||
return ssg['id']
|
||||
|
||||
return None
|
||||
|
||||
@property
|
||||
def ssg_needs_update(self):
|
||||
if self.ssg_data['fullWarnThreshold'] != self.warning_threshold or \
|
||||
self.ssg_data['autoDeleteLimit'] != self.delete_limit or \
|
||||
self.ssg_data['repFullPolicy'] != self.full_policy or \
|
||||
self.ssg_data['rollbackPriority'] != self.rollback_priority:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def create_snapshot_group(self):
|
||||
self.post_data = dict(
|
||||
baseMappableObjectId=self.volume_id,
|
||||
name=self.name,
|
||||
repositoryPercentage=self.repo_pct,
|
||||
warningThreshold=self.warning_threshold,
|
||||
autoDeleteLimit=self.delete_limit,
|
||||
fullPolicy=self.full_policy,
|
||||
storagePoolId=self.pool_id,
|
||||
)
|
||||
snapshot = 'storage-systems/%s/snapshot-groups' % self.ssid
|
||||
url = self.url + snapshot
|
||||
try:
|
||||
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
|
||||
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Failed to create snapshot group. " +
|
||||
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
if not self.snapshot_group_id:
|
||||
self.snapshot_group_id = self.ssg_data['id']
|
||||
|
||||
if self.ssg_needs_update:
|
||||
self.update_ssg()
|
||||
else:
|
||||
self.module.exit_json(changed=True, **self.ssg_data)
|
||||
|
||||
def update_ssg(self):
|
||||
self.post_data = dict(
|
||||
warningThreshold=self.warning_threshold,
|
||||
autoDeleteLimit=self.delete_limit,
|
||||
fullPolicy=self.full_policy,
|
||||
rollbackPriority=self.rollback_priority
|
||||
)
|
||||
|
||||
url = self.url + "storage-systems/%s/snapshot-groups/%s" % (self.ssid, self.snapshot_group_id)
|
||||
try:
|
||||
rc, self.ssg_data = request(url, data=json.dumps(self.post_data), method='POST', headers=HEADERS,
|
||||
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Failed to update snapshot group. " +
|
||||
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
def apply(self):
|
||||
if self.state == 'absent':
|
||||
if self.snapshot_group_id:
|
||||
try:
|
||||
rc, resp = request(
|
||||
self.url + 'storage-systems/%s/snapshot-groups/%s' % (self.ssid, self.snapshot_group_id),
|
||||
method='DELETE', headers=HEADERS, url_password=self.pwd, url_username=self.user,
|
||||
validate_certs=self.certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(msg="Failed to delete snapshot group. " +
|
||||
"Snapshot group [%s]. Id [%s]. Error [%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
self.module.exit_json(changed=True, msg="Snapshot group removed", **self.ssg_data)
|
||||
else:
|
||||
self.module.exit_json(changed=False, msg="Snapshot group absent")
|
||||
|
||||
elif self.snapshot_group_id:
|
||||
if self.ssg_needs_update:
|
||||
self.update_ssg()
|
||||
self.module.exit_json(changed=True, **self.ssg_data)
|
||||
else:
|
||||
self.module.exit_json(changed=False, **self.ssg_data)
|
||||
else:
|
||||
self.create_snapshot_group()
|
||||
|
||||
|
||||
def main():
|
||||
vg = SnapshotGroup()
|
||||
vg.apply()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,250 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_snapshot_images
|
||||
short_description: Create and delete snapshot images
|
||||
description:
|
||||
- Create and delete snapshots images on snapshot groups for NetApp E-series storage arrays.
|
||||
- Only the oldest snapshot image can be deleted so consistency is preserved.
|
||||
- "Related: Snapshot volumes are created from snapshot images."
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
snapshot_group:
|
||||
description:
|
||||
- The name of the snapshot group in which you want to create a snapshot image.
|
||||
required: True
|
||||
state:
|
||||
description:
|
||||
- Whether a new snapshot image should be created or oldest be deleted.
|
||||
required: True
|
||||
choices: ['create', 'remove']
|
||||
"""
|
||||
EXAMPLES = """
|
||||
- name: Create Snapshot
|
||||
netapp_e_snapshot_images:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ validate_certs }}"
|
||||
snapshot_group: "3300000060080E5000299C24000005B656D9F394"
|
||||
state: 'create'
|
||||
"""
|
||||
RETURN = """
|
||||
---
|
||||
changed: true
|
||||
msg: "Created snapshot image"
|
||||
image_id: "3400000060080E5000299B640063074057BC5C5E "
|
||||
"""
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def snapshot_group_from_name(module, ssid, api_url, api_pwd, api_usr, name):
|
||||
snap_groups = 'storage-systems/%s/snapshot-groups' % ssid
|
||||
snap_groups_url = api_url + snap_groups
|
||||
(ret, snapshot_groups) = request(snap_groups_url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
|
||||
validate_certs=module.params['validate_certs'])
|
||||
|
||||
snapshot_group_id = None
|
||||
for snapshot_group in snapshot_groups:
|
||||
if name == snapshot_group['label']:
|
||||
snapshot_group_id = snapshot_group['pitGroupRef']
|
||||
break
|
||||
if snapshot_group_id is None:
|
||||
module.fail_json(msg="Failed to lookup snapshot group. Group [%s]. Id [%s]." % (name, ssid))
|
||||
|
||||
return snapshot_group
|
||||
|
||||
|
||||
def oldest_image(module, ssid, api_url, api_pwd, api_usr, name):
|
||||
get_status = 'storage-systems/%s/snapshot-images' % ssid
|
||||
url = api_url + get_status
|
||||
|
||||
try:
|
||||
(ret, images) = request(url, url_username=api_usr, url_password=api_pwd, headers=HEADERS,
|
||||
validate_certs=module.params['validate_certs'])
|
||||
except:
|
||||
err = get_exception()
|
||||
module.fail_json(msg="Failed to get snapshot images for group. Group [%s]. Id [%s]. Error [%s]" %
|
||||
(name, ssid, str(err)))
|
||||
if not images:
|
||||
module.exit_json(msg="There are no snapshot images to remove. Group [%s]. Id [%s]." % (name, ssid))
|
||||
|
||||
oldest = min(images, key=lambda x: x['pitSequenceNumber'])
|
||||
if oldest is None or "pitRef" not in oldest:
|
||||
module.fail_json(msg="Failed to lookup oldest snapshot group. Group [%s]. Id [%s]." % (name, ssid))
|
||||
|
||||
return oldest
|
||||
|
||||
|
||||
def create_image(module, ssid, api_url, pwd, user, p, snapshot_group):
|
||||
snapshot_group_obj = snapshot_group_from_name(module, ssid, api_url, pwd, user, snapshot_group)
|
||||
snapshot_group_id = snapshot_group_obj['pitGroupRef']
|
||||
endpoint = 'storage-systems/%s/snapshot-images' % ssid
|
||||
url = api_url + endpoint
|
||||
post_data = json.dumps({'groupId': snapshot_group_id})
|
||||
|
||||
image_data = request(url, data=post_data, method='POST', url_username=user, url_password=pwd, headers=HEADERS,
|
||||
validate_certs=module.params['validate_certs'])
|
||||
|
||||
if image_data[1]['status'] == 'optimal':
|
||||
status = True
|
||||
id = image_data[1]['id']
|
||||
else:
|
||||
status = False
|
||||
id = ''
|
||||
|
||||
return status, id
|
||||
|
||||
|
||||
def delete_image(module, ssid, api_url, pwd, user, snapshot_group):
|
||||
image = oldest_image(module, ssid, api_url, pwd, user, snapshot_group)
|
||||
image_id = image['pitRef']
|
||||
endpoint = 'storage-systems/%s/snapshot-images/%s' % (ssid, image_id)
|
||||
url = api_url + endpoint
|
||||
|
||||
try:
|
||||
(ret, image_data) = request(url, method='DELETE', url_username=user, url_password=pwd, headers=HEADERS,
|
||||
validate_certs=module.params['validate_certs'])
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
image_data = (e[0], e[1])
|
||||
|
||||
if ret == 204:
|
||||
deleted_status = True
|
||||
error_message = ''
|
||||
else:
|
||||
deleted_status = False
|
||||
error_message = image_data[1]['errorMessage']
|
||||
|
||||
return deleted_status, error_message
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
snapshot_group=dict(required=True, type='str'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
api_url=dict(required=True),
|
||||
api_username=dict(required=False),
|
||||
api_password=dict(required=False, no_log=True),
|
||||
validate_certs=dict(required=False, default=True),
|
||||
state=dict(required=True, choices=['create', 'remove'], type='str'),
|
||||
))
|
||||
module = AnsibleModule(argument_spec)
|
||||
|
||||
p = module.params
|
||||
|
||||
ssid = p.pop('ssid')
|
||||
api_url = p.pop('api_url')
|
||||
user = p.pop('api_username')
|
||||
pwd = p.pop('api_password')
|
||||
snapshot_group = p.pop('snapshot_group')
|
||||
desired_state = p.pop('state')
|
||||
|
||||
if not api_url.endswith('/'):
|
||||
api_url += '/'
|
||||
|
||||
if desired_state == 'create':
|
||||
created_status, snapshot_id = create_image(module, ssid, api_url, pwd, user, p, snapshot_group)
|
||||
|
||||
if created_status:
|
||||
module.exit_json(changed=True, msg='Created snapshot image', image_id=snapshot_id)
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="Could not create snapshot image on system %s, in snapshot group %s" % (ssid, snapshot_group))
|
||||
else:
|
||||
deleted, error_msg = delete_image(module, ssid, api_url, pwd, user, snapshot_group)
|
||||
|
||||
if deleted:
|
||||
module.exit_json(changed=True, msg='Deleted snapshot image for snapshot group [%s]' % (snapshot_group))
|
||||
else:
|
||||
module.fail_json(
|
||||
msg="Could not create snapshot image on system %s, in snapshot group %s --- %s" % (
|
||||
ssid, snapshot_group, error_msg))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,287 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_snapshot_volume
|
||||
short_description: Manage E/EF-Series snapshot volumes.
|
||||
description:
|
||||
- Create, update, remove snapshot volumes for NetApp E/EF-Series storage arrays.
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
note: Only I(full_threshold) is supported for update operations. If the snapshot volume already exists and the threshold matches, then an C(ok) status will be returned, no other changes can be made to a pre-existing snapshot volume.
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
description:
|
||||
- storage array ID
|
||||
required: True
|
||||
snapshot_image_id:
|
||||
required: True
|
||||
description:
|
||||
- The identifier of the snapshot image used to create the new snapshot volume.
|
||||
- "Note: You'll likely want to use the M(netapp_e_facts) module to find the ID of the image you want."
|
||||
full_threshold:
|
||||
description:
|
||||
- The repository utilization warning threshold percentage
|
||||
default: 85
|
||||
name:
|
||||
required: True
|
||||
description:
|
||||
- The name you wish to give the snapshot volume
|
||||
view_mode:
|
||||
required: True
|
||||
description:
|
||||
- The snapshot volume access mode
|
||||
choices:
|
||||
- modeUnknown
|
||||
- readWrite
|
||||
- readOnly
|
||||
- __UNDEFINED
|
||||
repo_percentage:
|
||||
description:
|
||||
- The size of the view in relation to the size of the base volume
|
||||
default: 20
|
||||
storage_pool_name:
|
||||
description:
|
||||
- Name of the storage pool on which to allocate the repository volume.
|
||||
required: True
|
||||
state:
|
||||
description:
|
||||
- Whether to create or remove the snapshot volume
|
||||
required: True
|
||||
choices:
|
||||
- absent
|
||||
- present
|
||||
"""
|
||||
EXAMPLES = """
|
||||
- name: Snapshot volume
|
||||
netapp_e_snapshot_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
api_url: "{{ netapp_api_url }}"/
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
state: present
|
||||
storage_pool_name: "{{ snapshot_volume_storage_pool_name }}"
|
||||
snapshot_image_id: "{{ snapshot_volume_image_id }}"
|
||||
name: "{{ snapshot_volume_name }}"
|
||||
"""
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: Json facts for the volume that was created.
|
||||
"""
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
import json
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
class SnapshotVolume(object):
|
||||
def __init__(self):
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
ssid=dict(type='str', required=True),
|
||||
snapshot_image_id=dict(type='str', required=True),
|
||||
full_threshold=dict(type='int', default=85),
|
||||
name=dict(type='str', required=True),
|
||||
view_mode=dict(type='str', default='readOnly',
|
||||
choices=['readOnly', 'readWrite', 'modeUnknown', '__Undefined']),
|
||||
repo_percentage=dict(type='int', default=20),
|
||||
storage_pool_name=dict(type='str', required=True),
|
||||
state=dict(type='str', required=True, choices=['absent', 'present'])
|
||||
))
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec)
|
||||
args = self.module.params
|
||||
self.state = args['state']
|
||||
self.ssid = args['ssid']
|
||||
self.snapshot_image_id = args['snapshot_image_id']
|
||||
self.full_threshold = args['full_threshold']
|
||||
self.name = args['name']
|
||||
self.view_mode = args['view_mode']
|
||||
self.repo_percentage = args['repo_percentage']
|
||||
self.storage_pool_name = args['storage_pool_name']
|
||||
self.url = args['api_url']
|
||||
self.user = args['api_username']
|
||||
self.pwd = args['api_password']
|
||||
self.certs = args['validate_certs']
|
||||
|
||||
if not self.url.endswith('/'):
|
||||
self.url += '/'
|
||||
|
||||
@property
|
||||
def pool_id(self):
|
||||
pools = 'storage-systems/%s/storage-pools' % self.ssid
|
||||
url = self.url + pools
|
||||
(rc, data) = request(url, headers=HEADERS, url_username=self.user, url_password=self.pwd,
|
||||
validate_certs=self.certs)
|
||||
|
||||
for pool in data:
|
||||
if pool['name'] == self.storage_pool_name:
|
||||
self.pool_data = pool
|
||||
return pool['id']
|
||||
|
||||
self.module.fail_json(msg="No storage pool with the name: '%s' was found" % self.name)
|
||||
|
||||
@property
|
||||
def ss_vol_exists(self):
|
||||
rc, ss_vols = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid, headers=HEADERS,
|
||||
url_username=self.user, url_password=self.pwd, validate_certs=self.certs)
|
||||
if ss_vols:
|
||||
for ss_vol in ss_vols:
|
||||
if ss_vol['name'] == self.name:
|
||||
self.ss_vol = ss_vol
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
return False
|
||||
|
||||
@property
|
||||
def ss_vol_needs_update(self):
|
||||
if self.ss_vol['fullWarnThreshold'] != self.full_threshold:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def create_ss_vol(self):
|
||||
post_data = dict(
|
||||
snapshotImageId=self.snapshot_image_id,
|
||||
fullThreshold=self.full_threshold,
|
||||
name=self.name,
|
||||
viewMode=self.view_mode,
|
||||
repositoryPercentage=self.repo_percentage,
|
||||
repositoryPoolId=self.pool_id
|
||||
)
|
||||
|
||||
rc, create_resp = request(self.url + 'storage-systems/%s/snapshot-volumes' % self.ssid,
|
||||
data=json.dumps(post_data), headers=HEADERS, url_username=self.user,
|
||||
url_password=self.pwd, validate_certs=self.certs, method='POST')
|
||||
|
||||
self.ss_vol = create_resp
|
||||
# Doing a check after creation because the creation call fails to set the specified warning threshold
|
||||
if self.ss_vol_needs_update:
|
||||
self.update_ss_vol()
|
||||
else:
|
||||
self.module.exit_json(changed=True, **create_resp)
|
||||
|
||||
def update_ss_vol(self):
|
||||
post_data = dict(
|
||||
fullThreshold=self.full_threshold,
|
||||
)
|
||||
|
||||
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
|
||||
data=json.dumps(post_data), headers=HEADERS, url_username=self.user, url_password=self.pwd,
|
||||
method='POST', validate_certs=self.certs)
|
||||
|
||||
self.module.exit_json(changed=True, **resp)
|
||||
|
||||
def remove_ss_vol(self):
|
||||
rc, resp = request(self.url + 'storage-systems/%s/snapshot-volumes/%s' % (self.ssid, self.ss_vol['id']),
|
||||
headers=HEADERS, url_username=self.user, url_password=self.pwd, validate_certs=self.certs,
|
||||
method='DELETE')
|
||||
self.module.exit_json(changed=True, msg="Volume successfully deleted")
|
||||
|
||||
def apply(self):
|
||||
if self.state == 'present':
|
||||
if self.ss_vol_exists:
|
||||
if self.ss_vol_needs_update:
|
||||
self.update_ss_vol()
|
||||
else:
|
||||
self.module.exit_json(changed=False, **self.ss_vol)
|
||||
else:
|
||||
self.create_ss_vol()
|
||||
else:
|
||||
if self.ss_vol_exists:
|
||||
self.remove_ss_vol()
|
||||
else:
|
||||
self.module.exit_json(changed=False, msg="Volume already absent")
|
||||
|
||||
|
||||
def main():
|
||||
sv = SnapshotVolume()
|
||||
sv.apply()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -18,7 +18,7 @@
|
|||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = '''
|
||||
module: na_eseries_storage_system
|
||||
module: netapp_e_storage_system
|
||||
version_added: "2.2"
|
||||
short_description: Add/remove arrays from the Web Services Proxy
|
||||
description:
|
||||
|
@ -80,7 +80,7 @@ author: Kevin Hulquest (@hulquest)
|
|||
EXAMPLES = '''
|
||||
---
|
||||
- name: Presence of storage system
|
||||
na_eseries_storage_system:
|
||||
netapp_e_storage_system:
|
||||
ssid: "{{ item.key }}"
|
||||
state: present
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
|
@ -99,8 +99,7 @@ msg: Storage system removed.
|
|||
msg: Storage system added.
|
||||
'''
|
||||
import json
|
||||
import os
|
||||
from datetime import datetime as dt, timedelta, time
|
||||
from datetime import datetime as dt, timedelta
|
||||
from time import sleep
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
|
@ -267,7 +266,6 @@ def main():
|
|||
module.fail_json(msg="Failed to add storage system. Id[%s]. Request body [%s]. Error[%s]." %
|
||||
(ssid, request_data, str(err)))
|
||||
|
||||
|
||||
else: # array exists, modify...
|
||||
post_headers = dict(Accept="application/json")
|
||||
post_headers['Content-Type'] = 'application/json'
|
||||
|
@ -286,7 +284,6 @@ def main():
|
|||
module.fail_json(msg="Failed to update storage system. Id[%s]. Request body [%s]. Error[%s]." %
|
||||
(ssid, post_body, str(err)))
|
||||
|
||||
|
||||
elif state == 'absent':
|
||||
# delete the array
|
||||
try:
|
||||
|
|
|
@ -0,0 +1,884 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: netapp_e_storagepool
|
||||
short_description: Manage disk groups and disk pools
|
||||
version_added: '2.2'
|
||||
description:
|
||||
- Create or remove disk groups and disk pools for NetApp E-series storage arrays.
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
required: true
|
||||
description:
|
||||
- The ID of the array to manage (as configured on the web services proxy).
|
||||
state:
|
||||
required: true
|
||||
description:
|
||||
- Whether the specified storage pool should exist or not.
|
||||
- Note that removing a storage pool currently requires the removal of all defined volumes first.
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The name of the storage pool to manage
|
||||
criteria_drive_count:
|
||||
description:
|
||||
- The number of disks to use for building the storage pool. The pool will be expanded if this number exceeds the number of disks already in place
|
||||
criteria_drive_type:
|
||||
description:
|
||||
- The type of disk (hdd or ssd) to use when searching for candidates to use.
|
||||
choices: ['hdd','ssd']
|
||||
criteria_size_unit:
|
||||
description:
|
||||
- The unit used to interpret size parameters
|
||||
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
|
||||
default: 'gb'
|
||||
criteria_drive_min_size:
|
||||
description:
|
||||
- The minimum individual drive size (in size_unit) to consider when choosing drives for the storage pool.
|
||||
criteria_min_usable_capacity:
|
||||
description:
|
||||
- The minimum size of the storage pool (in size_unit). The pool will be expanded if this value exceeds itscurrent size.
|
||||
criteria_drive_interface_type:
|
||||
description:
|
||||
- The interface type to use when selecting drives for the storage pool (no value means all interface types will be considered)
|
||||
choices: ['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata']
|
||||
criteria_drive_require_fde:
|
||||
description:
|
||||
- Whether full disk encryption ability is required for drives to be added to the storage pool
|
||||
raid_level:
|
||||
required: true
|
||||
choices: ['raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']
|
||||
description:
|
||||
- "Only required when the requested state is 'present'. The RAID level of the storage pool to be created."
|
||||
erase_secured_drives:
|
||||
required: false
|
||||
choices: ['true', 'false']
|
||||
description:
|
||||
- Whether to erase secured disks before adding to storage pool
|
||||
secure_pool:
|
||||
required: false
|
||||
choices: ['true', 'false']
|
||||
description:
|
||||
- Whether to convert to a secure storage pool. Will only work if all drives in the pool are security capable.
|
||||
reserve_drive_count:
|
||||
required: false
|
||||
description:
|
||||
- Set the number of drives reserved by the storage pool for reconstruction operations. Only valide on raid disk pools.
|
||||
remove_volumes:
|
||||
required: false
|
||||
default: False
|
||||
description:
|
||||
- Prior to removing a storage pool, delete all volumes in the pool.
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
- name: No disk groups
|
||||
netapp_e_storagepool:
|
||||
ssid: "{{ ssid }}"
|
||||
name: "{{ item }}"
|
||||
state: absent
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
'''
|
||||
RETURN = '''
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: Json facts for the pool that was created.
|
||||
'''
|
||||
|
||||
import json
|
||||
import logging
|
||||
from traceback import format_exc
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def select(predicate, iterable):
|
||||
# python 2, 3 generic filtering.
|
||||
if predicate is None:
|
||||
predicate = bool
|
||||
for x in iterable:
|
||||
if predicate(x):
|
||||
yield x
|
||||
|
||||
|
||||
class groupby(object):
|
||||
# python 2, 3 generic grouping.
|
||||
def __init__(self, iterable, key=None):
|
||||
if key is None:
|
||||
key = lambda x: x
|
||||
self.keyfunc = key
|
||||
self.it = iter(iterable)
|
||||
self.tgtkey = self.currkey = self.currvalue = object()
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self):
|
||||
while self.currkey == self.tgtkey:
|
||||
self.currvalue = next(self.it) # Exit on StopIteration
|
||||
self.currkey = self.keyfunc(self.currvalue)
|
||||
self.tgtkey = self.currkey
|
||||
return (self.currkey, self._grouper(self.tgtkey))
|
||||
|
||||
def _grouper(self, tgtkey):
|
||||
while self.currkey == tgtkey:
|
||||
yield self.currvalue
|
||||
self.currvalue = next(self.it) # Exit on StopIteration
|
||||
self.currkey = self.keyfunc(self.currvalue)
|
||||
|
||||
|
||||
class NetAppESeriesStoragePool(object):
|
||||
def __init__(self):
|
||||
self._sp_drives_cached = None
|
||||
|
||||
self._size_unit_map = dict(
|
||||
bytes=1,
|
||||
b=1,
|
||||
kb=1024,
|
||||
mb=1024 ** 2,
|
||||
gb=1024 ** 3,
|
||||
tb=1024 ** 4,
|
||||
pb=1024 ** 5,
|
||||
eb=1024 ** 6,
|
||||
zb=1024 ** 7,
|
||||
yb=1024 ** 8
|
||||
)
|
||||
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
api_username=dict(type='str', required=True),
|
||||
api_password=dict(type='str', required=True, no_log=True),
|
||||
api_url=dict(type='str', required=True),
|
||||
state=dict(required=True, choices=['present', 'absent'], type='str'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
criteria_size_unit=dict(default='gb', type='str'),
|
||||
criteria_drive_count=dict(type='int'),
|
||||
criteria_drive_interface_type=dict(choices=['sas', 'sas4k', 'fibre', 'fibre520b', 'scsi', 'sata', 'pata'],
|
||||
type='str'),
|
||||
criteria_drive_type=dict(choices=['ssd', 'hdd'], type='str'),
|
||||
criteria_drive_min_size=dict(type='int'),
|
||||
criteria_drive_require_fde=dict(type='bool'),
|
||||
criteria_min_usable_capacity=dict(type='int'),
|
||||
raid_level=dict(
|
||||
choices=['raidUnsupported', 'raidAll', 'raid0', 'raid1', 'raid3', 'raid5', 'raid6', 'raidDiskPool']),
|
||||
erase_secured_drives=dict(type='bool'),
|
||||
log_path=dict(type='str'),
|
||||
remove_drives=dict(type='list'),
|
||||
secure_pool=dict(type='bool', default=False),
|
||||
reserve_drive_count=dict(type='int'),
|
||||
remove_volumes=dict(type='bool', default=False)
|
||||
))
|
||||
|
||||
self.module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['raid_level'])
|
||||
],
|
||||
mutually_exclusive=[
|
||||
|
||||
],
|
||||
# TODO: update validation for various selection criteria
|
||||
supports_check_mode=True
|
||||
)
|
||||
|
||||
p = self.module.params
|
||||
|
||||
log_path = p['log_path']
|
||||
|
||||
# logging setup
|
||||
self._logger = logging.getLogger(self.__class__.__name__)
|
||||
self.debug = self._logger.debug
|
||||
|
||||
if log_path:
|
||||
logging.basicConfig(level=logging.DEBUG, filename=log_path)
|
||||
|
||||
self.state = p['state']
|
||||
self.ssid = p['ssid']
|
||||
self.name = p['name']
|
||||
self.validate_certs = p['validate_certs']
|
||||
|
||||
self.criteria_drive_count = p['criteria_drive_count']
|
||||
self.criteria_drive_type = p['criteria_drive_type']
|
||||
self.criteria_size_unit = p['criteria_size_unit']
|
||||
self.criteria_drive_min_size = p['criteria_drive_min_size']
|
||||
self.criteria_min_usable_capacity = p['criteria_min_usable_capacity']
|
||||
self.criteria_drive_interface_type = p['criteria_drive_interface_type']
|
||||
self.criteria_drive_require_fde = p['criteria_drive_require_fde']
|
||||
|
||||
self.raid_level = p['raid_level']
|
||||
self.erase_secured_drives = p['erase_secured_drives']
|
||||
self.remove_drives = p['remove_drives']
|
||||
self.secure_pool = p['secure_pool']
|
||||
self.reserve_drive_count = p['reserve_drive_count']
|
||||
self.remove_volumes = p['remove_volumes']
|
||||
|
||||
try:
|
||||
self.api_usr = p['api_username']
|
||||
self.api_pwd = p['api_password']
|
||||
self.api_url = p['api_url']
|
||||
except KeyError:
|
||||
self.module.fail_json(msg="You must pass in api_username "
|
||||
"and api_password and api_url to the module.")
|
||||
|
||||
self.post_headers = dict(Accept="application/json")
|
||||
self.post_headers['Content-Type'] = 'application/json'
|
||||
|
||||
# Quick and dirty drive selector, since the one provided by web service proxy is broken for min_disk_size as of 2016-03-12.
|
||||
# Doesn't really need to be a class once this is in module_utils or retired- just groups everything together so we
|
||||
# can copy/paste to other modules more easily.
|
||||
# Filters all disks by specified criteria, then groups remaining disks by capacity, interface and disk type, and selects
|
||||
# the first set that matches the specified count and/or aggregate capacity.
|
||||
# class DriveSelector(object):
|
||||
def filter_drives(
|
||||
self,
|
||||
drives, # raw drives resp
|
||||
interface_type=None, # sas, sata, fibre, etc
|
||||
drive_type=None, # ssd/hdd
|
||||
spindle_speed=None, # 7200, 10000, 15000, ssd (=0)
|
||||
min_drive_size=None,
|
||||
max_drive_size=None,
|
||||
fde_required=None,
|
||||
size_unit='gb',
|
||||
min_total_capacity=None,
|
||||
min_drive_count=None,
|
||||
exact_drive_count=None,
|
||||
raid_level=None
|
||||
):
|
||||
if min_total_capacity is None and exact_drive_count is None:
|
||||
raise Exception("One of criteria_min_total_capacity or criteria_drive_count must be specified.")
|
||||
|
||||
if min_total_capacity:
|
||||
min_total_capacity = min_total_capacity * self._size_unit_map[size_unit]
|
||||
|
||||
# filter clearly invalid/unavailable drives first
|
||||
drives = select(lambda d: self._is_valid_drive(d), drives)
|
||||
|
||||
if interface_type:
|
||||
drives = select(lambda d: d['phyDriveType'] == interface_type, drives)
|
||||
|
||||
if drive_type:
|
||||
drives = select(lambda d: d['driveMediaType'] == drive_type, drives)
|
||||
|
||||
if spindle_speed is not None: # 0 is valid for ssds
|
||||
drives = select(lambda d: d['spindleSpeed'] == spindle_speed, drives)
|
||||
|
||||
if min_drive_size:
|
||||
min_drive_size_bytes = min_drive_size * self._size_unit_map[size_unit]
|
||||
drives = select(lambda d: int(d['rawCapacity']) >= min_drive_size_bytes, drives)
|
||||
|
||||
if max_drive_size:
|
||||
max_drive_size_bytes = max_drive_size * self._size_unit_map[size_unit]
|
||||
drives = select(lambda d: int(d['rawCapacity']) <= max_drive_size_bytes, drives)
|
||||
|
||||
if fde_required:
|
||||
drives = select(lambda d: d['fdeCapable'], drives)
|
||||
|
||||
# initial implementation doesn't have a preference for any of these values...
|
||||
# just return the first set we find that matches the requested disk count and/or minimum total capacity
|
||||
for (cur_capacity, drives_by_capacity) in groupby(drives, lambda d: int(d['rawCapacity'])):
|
||||
for (cur_interface_type, drives_by_interface_type) in groupby(drives_by_capacity,
|
||||
lambda d: d['phyDriveType']):
|
||||
for (cur_drive_type, drives_by_drive_type) in groupby(drives_by_interface_type,
|
||||
lambda d: d['driveMediaType']):
|
||||
# listify so we can consume more than once
|
||||
drives_by_drive_type = list(drives_by_drive_type)
|
||||
candidate_set = list() # reset candidate list on each iteration of the innermost loop
|
||||
|
||||
if exact_drive_count:
|
||||
if len(drives_by_drive_type) < exact_drive_count:
|
||||
continue # we know this set is too small, move on
|
||||
|
||||
for drive in drives_by_drive_type:
|
||||
candidate_set.append(drive)
|
||||
if self._candidate_set_passes(candidate_set, min_capacity_bytes=min_total_capacity,
|
||||
min_drive_count=min_drive_count,
|
||||
exact_drive_count=exact_drive_count, raid_level=raid_level):
|
||||
return candidate_set
|
||||
|
||||
raise Exception("couldn't find an available set of disks to match specified criteria")
|
||||
|
||||
def _is_valid_drive(self, d):
|
||||
is_valid = d['available'] \
|
||||
and d['status'] == 'optimal' \
|
||||
and not d['pfa'] \
|
||||
and not d['removed'] \
|
||||
and not d['uncertified'] \
|
||||
and not d['invalidDriveData'] \
|
||||
and not d['nonRedundantAccess']
|
||||
|
||||
return is_valid
|
||||
|
||||
def _candidate_set_passes(self, candidate_set, min_capacity_bytes=None, min_drive_count=None,
|
||||
exact_drive_count=None, raid_level=None):
|
||||
if not self._is_drive_count_valid(len(candidate_set), min_drive_count=min_drive_count,
|
||||
exact_drive_count=exact_drive_count, raid_level=raid_level):
|
||||
return False
|
||||
# TODO: this assumes candidate_set is all the same size- if we want to allow wastage, need to update to use min size of set
|
||||
if min_capacity_bytes is not None and self._calculate_usable_capacity(int(candidate_set[0]['rawCapacity']),
|
||||
len(candidate_set),
|
||||
raid_level=raid_level) < min_capacity_bytes:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def _calculate_usable_capacity(self, disk_size_bytes, disk_count, raid_level=None):
|
||||
if raid_level in [None, 'raid0']:
|
||||
return disk_size_bytes * disk_count
|
||||
if raid_level == 'raid1':
|
||||
return (disk_size_bytes * disk_count) / 2
|
||||
if raid_level in ['raid3', 'raid5']:
|
||||
return (disk_size_bytes * disk_count) - disk_size_bytes
|
||||
if raid_level in ['raid6', 'raidDiskPool']:
|
||||
return (disk_size_bytes * disk_count) - (disk_size_bytes * 2)
|
||||
raise Exception("unsupported raid_level: %s" % raid_level)
|
||||
|
||||
def _is_drive_count_valid(self, drive_count, min_drive_count=0, exact_drive_count=None, raid_level=None):
|
||||
if exact_drive_count and exact_drive_count != drive_count:
|
||||
return False
|
||||
if raid_level == 'raidDiskPool':
|
||||
if drive_count < 11:
|
||||
return False
|
||||
if raid_level == 'raid1':
|
||||
if drive_count % 2 != 0:
|
||||
return False
|
||||
if raid_level in ['raid3', 'raid5']:
|
||||
if drive_count < 3:
|
||||
return False
|
||||
if raid_level == 'raid6':
|
||||
if drive_count < 4:
|
||||
return False
|
||||
if min_drive_count and drive_count < min_drive_count:
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
def get_storage_pool(self, storage_pool_name):
|
||||
# global ifilter
|
||||
self.debug("fetching storage pools")
|
||||
# map the storage pool name to its id
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
|
||||
headers=dict(Accept="application/json"), url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
rc = err.args[0]
|
||||
if rc == 404 and self.state == 'absent':
|
||||
self.module.exit_json(
|
||||
msg="Storage pool [%s] did not exist." % (self.name))
|
||||
else:
|
||||
err = get_exception()
|
||||
self.module.exit_json(
|
||||
msg="Failed to get storage pools. Array id [%s]. Error[%s]. State[%s]. RC[%s]." %
|
||||
(self.ssid, str(err), self.state, rc))
|
||||
|
||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
||||
|
||||
pool_detail = next(select(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||
|
||||
if pool_detail:
|
||||
found = 'found'
|
||||
else:
|
||||
found = 'not found'
|
||||
self.debug(found)
|
||||
|
||||
return pool_detail
|
||||
|
||||
def get_candidate_disks(self):
|
||||
self.debug("getting candidate disks...")
|
||||
|
||||
# driveCapacityMin is broken on /drives POST. Per NetApp request we built our own
|
||||
# switch back to commented code below if it gets fixed
|
||||
# drives_req = dict(
|
||||
# driveCount = self.criteria_drive_count,
|
||||
# sizeUnit = 'mb',
|
||||
# raidLevel = self.raid_level
|
||||
# )
|
||||
#
|
||||
# if self.criteria_drive_type:
|
||||
# drives_req['driveType'] = self.criteria_drive_type
|
||||
# if self.criteria_disk_min_aggregate_size_mb:
|
||||
# drives_req['targetUsableCapacity'] = self.criteria_disk_min_aggregate_size_mb
|
||||
#
|
||||
# # TODO: this arg appears to be ignored, uncomment if it isn't
|
||||
# #if self.criteria_disk_min_size_gb:
|
||||
# # drives_req['driveCapacityMin'] = self.criteria_disk_min_size_gb * 1024
|
||||
# (rc,drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), data=json.dumps(drives_req), headers=self.post_headers, method='POST', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs)
|
||||
#
|
||||
# if rc == 204:
|
||||
# self.module.fail_json(msg='Cannot find disks to match requested criteria for storage pool')
|
||||
|
||||
# disk_ids = [d['id'] for d in drives_resp]
|
||||
|
||||
try:
|
||||
(rc, drives_resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.exit_json(
|
||||
msg="Failed to fetch disk drives. Array id [%s]. Error[%s]." % (self.ssid, str(err)))
|
||||
|
||||
try:
|
||||
candidate_set = self.filter_drives(drives_resp,
|
||||
exact_drive_count=self.criteria_drive_count,
|
||||
drive_type=self.criteria_drive_type,
|
||||
min_drive_size=self.criteria_drive_min_size,
|
||||
raid_level=self.raid_level,
|
||||
size_unit=self.criteria_size_unit,
|
||||
min_total_capacity=self.criteria_min_usable_capacity,
|
||||
interface_type=self.criteria_drive_interface_type,
|
||||
fde_required=self.criteria_drive_require_fde
|
||||
)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to allocate adequate drive count. Id [%s]. Error [%s]." % (self.ssid, str(err)))
|
||||
|
||||
disk_ids = [d['id'] for d in candidate_set]
|
||||
|
||||
return disk_ids
|
||||
|
||||
def create_storage_pool(self):
|
||||
self.debug("creating storage pool...")
|
||||
|
||||
sp_add_req = dict(
|
||||
raidLevel=self.raid_level,
|
||||
diskDriveIds=self.disk_ids,
|
||||
name=self.name
|
||||
)
|
||||
|
||||
if self.erase_secured_drives:
|
||||
sp_add_req['eraseSecuredDrives'] = self.erase_secured_drives
|
||||
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
|
||||
data=json.dumps(sp_add_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to create storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
self.pool_detail = self.get_storage_pool(self.name)
|
||||
|
||||
if self.secure_pool:
|
||||
secure_pool_data = dict(securePool=True)
|
||||
try:
|
||||
(retc, r) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
|
||||
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to update storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
@property
|
||||
def needs_raid_level_migration(self):
|
||||
current_raid_level = self.pool_detail['raidLevel']
|
||||
needs_migration = self.raid_level != current_raid_level
|
||||
|
||||
if needs_migration: # sanity check some things so we can fail early/check-mode
|
||||
if current_raid_level == 'raidDiskPool':
|
||||
self.module.fail_json(msg="raid level cannot be changed for disk pools")
|
||||
|
||||
return needs_migration
|
||||
|
||||
def migrate_raid_level(self):
|
||||
self.debug("migrating storage pool to raid level '%s'..." % self.raid_level)
|
||||
sp_raid_migrate_req = dict(
|
||||
raidLevel=self.raid_level
|
||||
)
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s/raid-type-migration" % (self.ssid,
|
||||
self.name),
|
||||
data=json.dumps(sp_raid_migrate_req), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to change the raid level of storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
|
||||
pool_id, self.ssid, str(err)))
|
||||
|
||||
@property
|
||||
def sp_drives(self, exclude_hotspares=True):
|
||||
if not self._sp_drives_cached:
|
||||
|
||||
self.debug("fetching drive list...")
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/drives" % (self.ssid), method='GET',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to fetch disk drives. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id, self.ssid, str(err)))
|
||||
|
||||
sp_id = self.pool_detail['id']
|
||||
if exclude_hotspares:
|
||||
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id and not d['hotSpare']]
|
||||
else:
|
||||
self._sp_drives_cached = [d for d in resp if d['currentVolumeGroupRef'] == sp_id]
|
||||
|
||||
return self._sp_drives_cached
|
||||
|
||||
@property
|
||||
def reserved_drive_count_differs(self):
|
||||
if int(self.pool_detail['volumeGroupData']['diskPoolData'][
|
||||
'reconstructionReservedDriveCount']) != self.reserve_drive_count:
|
||||
return True
|
||||
return False
|
||||
|
||||
@property
|
||||
def needs_expansion(self):
|
||||
if self.criteria_drive_count > len(self.sp_drives):
|
||||
return True
|
||||
# TODO: is totalRaidedSpace the best attribute for "how big is this SP"?
|
||||
if self.criteria_min_usable_capacity and \
|
||||
(self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]) > int(self.pool_detail['totalRaidedSpace']):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
def get_expansion_candidate_drives(self):
|
||||
# sanity checks; don't call this if we can't/don't need to expand
|
||||
if not self.needs_expansion:
|
||||
self.module.fail_json(msg="can't get expansion candidates when pool doesn't need expansion")
|
||||
|
||||
self.debug("fetching expansion candidate drives...")
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
|
||||
self.pool_detail['id']),
|
||||
method='GET', url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to fetch candidate drives for storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
|
||||
pool_id, self.ssid, str(err)))
|
||||
|
||||
current_drive_count = len(self.sp_drives)
|
||||
current_capacity_bytes = int(self.pool_detail['totalRaidedSpace']) # TODO: is this the right attribute to use?
|
||||
|
||||
if self.criteria_min_usable_capacity:
|
||||
requested_capacity_bytes = self.criteria_min_usable_capacity * self._size_unit_map[self.criteria_size_unit]
|
||||
else:
|
||||
requested_capacity_bytes = current_capacity_bytes
|
||||
|
||||
if self.criteria_drive_count:
|
||||
minimum_disks_to_add = max((self.criteria_drive_count - current_drive_count), 1)
|
||||
else:
|
||||
minimum_disks_to_add = 1
|
||||
|
||||
minimum_bytes_to_add = max(requested_capacity_bytes - current_capacity_bytes, 0)
|
||||
|
||||
# FUTURE: allow more control over expansion candidate selection?
|
||||
# loop over candidate disk sets and add until we've met both criteria
|
||||
|
||||
added_drive_count = 0
|
||||
added_capacity_bytes = 0
|
||||
|
||||
drives_to_add = set()
|
||||
|
||||
for s in resp:
|
||||
# don't trust the API not to give us duplicate drives across candidate sets, especially in multi-drive sets
|
||||
candidate_drives = s['drives']
|
||||
if len(drives_to_add.intersection(candidate_drives)) != 0:
|
||||
# duplicate, skip
|
||||
continue
|
||||
drives_to_add.update(candidate_drives)
|
||||
added_drive_count += len(candidate_drives)
|
||||
added_capacity_bytes += int(s['usableCapacity'])
|
||||
|
||||
if added_drive_count >= minimum_disks_to_add and added_capacity_bytes >= minimum_bytes_to_add:
|
||||
break
|
||||
|
||||
if (added_drive_count < minimum_disks_to_add) or (added_capacity_bytes < minimum_bytes_to_add):
|
||||
self.module.fail_json(
|
||||
msg="unable to find at least %s drives to add that would add at least %s bytes of capacity" % (
|
||||
minimum_disks_to_add, minimum_bytes_to_add))
|
||||
|
||||
return list(drives_to_add)
|
||||
|
||||
def expand_storage_pool(self):
|
||||
drives_to_add = self.get_expansion_candidate_drives()
|
||||
|
||||
self.debug("adding %s drives to storage pool..." % len(drives_to_add))
|
||||
sp_expand_req = dict(
|
||||
drives=drives_to_add
|
||||
)
|
||||
try:
|
||||
request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s/expand" % (self.ssid,
|
||||
self.pool_detail['id']),
|
||||
data=json.dumps(sp_expand_req), headers=self.post_headers, method='POST', url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to add drives to storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
|
||||
self.ssid,
|
||||
str(
|
||||
err)))
|
||||
|
||||
# TODO: check response
|
||||
# TODO: support blocking wait?
|
||||
|
||||
def reduce_drives(self, drive_list):
|
||||
if all(drive in drive_list for drive in self.sp_drives):
|
||||
# all the drives passed in are present in the system
|
||||
pass
|
||||
else:
|
||||
self.module.fail_json(
|
||||
msg="One of the drives you wish to remove does not currently exist in the storage pool you specified")
|
||||
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s/reduction" % (self.ssid,
|
||||
self.pool_detail['id']),
|
||||
data=json.dumps(drive_list), headers=self.post_headers, method='POST', url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to remove drives from storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
|
||||
pool_id, self.ssid, str(err)))
|
||||
|
||||
def update_reserve_drive_count(self, qty):
|
||||
data = dict(reservedDriveCount=qty)
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid, self.pool_detail['id']),
|
||||
data=json.dumps(data), headers=self.post_headers, method='POST', url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs, timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
pool_id = self.pool_detail['id']
|
||||
self.module.exit_json(
|
||||
msg="Failed to update reserve drive count. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
|
||||
self.ssid,
|
||||
str(
|
||||
err)))
|
||||
|
||||
def apply(self):
|
||||
changed = False
|
||||
pool_exists = False
|
||||
|
||||
self.pool_detail = self.get_storage_pool(self.name)
|
||||
|
||||
if self.pool_detail:
|
||||
pool_exists = True
|
||||
pool_id = self.pool_detail['id']
|
||||
|
||||
if self.state == 'absent':
|
||||
self.debug("CHANGED: storage pool exists, but requested state is 'absent'")
|
||||
changed = True
|
||||
elif self.state == 'present':
|
||||
# sanity checks first- we can't change these, so we'll bomb if they're specified
|
||||
if self.criteria_drive_type and self.criteria_drive_type != self.pool_detail['driveMediaType']:
|
||||
self.module.fail_json(
|
||||
msg="drive media type %s cannot be changed to %s" % (self.pool_detail['driveMediaType'],
|
||||
self.criteria_drive_type))
|
||||
|
||||
# now the things we can change...
|
||||
if self.needs_expansion:
|
||||
self.debug("CHANGED: storage pool needs expansion")
|
||||
changed = True
|
||||
|
||||
if self.needs_raid_level_migration:
|
||||
self.debug(
|
||||
"CHANGED: raid level migration required; storage pool uses '%s', requested is '%s'" % (
|
||||
self.pool_detail['raidLevel'], self.raid_level))
|
||||
changed = True
|
||||
|
||||
# if self.reserved_drive_count_differs:
|
||||
# changed = True
|
||||
|
||||
# TODO: validate other state details? (pool priority, alert threshold)
|
||||
|
||||
# per FPoole and others, pool reduce operations will not be supported. Automatic "smart" reduction
|
||||
# presents a difficult parameter issue, as the disk count can increase due to expansion, so we
|
||||
# can't just use disk count > criteria_drive_count.
|
||||
|
||||
else: # pool does not exist
|
||||
if self.state == 'present':
|
||||
self.debug("CHANGED: storage pool does not exist, but requested state is 'present'")
|
||||
changed = True
|
||||
|
||||
# ensure we can get back a workable set of disks
|
||||
# (doing this early so candidate selection runs under check mode)
|
||||
self.disk_ids = self.get_candidate_disks()
|
||||
else:
|
||||
self.module.exit_json(msg="Storage pool [%s] did not exist." % (self.name))
|
||||
|
||||
if changed and not self.module.check_mode:
|
||||
# apply changes
|
||||
if self.state == 'present':
|
||||
if not pool_exists:
|
||||
self.create_storage_pool()
|
||||
else: # pool exists but differs, modify...
|
||||
if self.needs_expansion:
|
||||
self.expand_storage_pool()
|
||||
|
||||
if self.remove_drives:
|
||||
self.reduce_drives(self.remove_drives)
|
||||
|
||||
if self.needs_raid_level_migration:
|
||||
self.migrate_raid_level()
|
||||
|
||||
# if self.reserved_drive_count_differs:
|
||||
# self.update_reserve_drive_count(self.reserve_drive_count)
|
||||
|
||||
if self.secure_pool:
|
||||
secure_pool_data = dict(securePool=True)
|
||||
try:
|
||||
(retc, r) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s" % (self.ssid,
|
||||
self.pool_detail[
|
||||
'id']),
|
||||
data=json.dumps(secure_pool_data), headers=self.post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs, timeout=120, ignore_errors=True)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.exit_json(
|
||||
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (
|
||||
pool_id, self.ssid, str(err)))
|
||||
|
||||
if int(retc) == 422:
|
||||
self.module.fail_json(
|
||||
msg="Error in enabling secure pool. One of the drives in the specified storage pool is likely not security capable")
|
||||
|
||||
elif self.state == 'absent':
|
||||
# delete the storage pool
|
||||
try:
|
||||
remove_vol_opt = ''
|
||||
if self.remove_volumes:
|
||||
remove_vol_opt = '?delete-volumes=true'
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/storage-pools/%s%s" % (self.ssid, pool_id,
|
||||
remove_vol_opt),
|
||||
method='DELETE',
|
||||
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except:
|
||||
err = get_exception()
|
||||
self.module.exit_json(
|
||||
msg="Failed to delete storage pool. Pool id [%s]. Array id [%s]. Error[%s]." % (pool_id,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
self.module.exit_json(changed=changed, **self.pool_detail)
|
||||
|
||||
|
||||
def main():
|
||||
sp = NetAppESeriesStoragePool()
|
||||
try:
|
||||
sp.apply()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
sp.debug("Exception in apply(): \n%s" % format_exc(e))
|
||||
raise
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
618
lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py
Normal file
618
lib/ansible/modules/extras/storage/netapp/netapp_e_volume.py
Normal file
|
@ -0,0 +1,618 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
|
||||
from ansible.module_utils.api import basic_auth_argument_spec
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: netapp_e_volume
|
||||
version_added: "2.2"
|
||||
short_description: Manage storage volumes (standard and thin)
|
||||
description:
|
||||
- Create or remove volumes (standard and thin) for NetApp E/EF-series storage arrays.
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
ssid:
|
||||
required: true
|
||||
description:
|
||||
- The ID of the array to manage (as configured on the web services proxy).
|
||||
state:
|
||||
required: true
|
||||
description:
|
||||
- Whether the specified volume should exist or not.
|
||||
choices: ['present', 'absent']
|
||||
name:
|
||||
required: true
|
||||
description:
|
||||
- The name of the volume to manage
|
||||
storage_pool_name:
|
||||
required: true
|
||||
description:
|
||||
- "Required only when requested state is 'present'. The name of the storage pool the volume should exist on."
|
||||
size_unit:
|
||||
description:
|
||||
- The unit used to interpret the size parameter
|
||||
choices: ['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb']
|
||||
default: 'gb'
|
||||
size:
|
||||
required: true
|
||||
description:
|
||||
- "Required only when state = 'present'. The size of the volume in (size_unit)."
|
||||
segment_size_kb:
|
||||
description:
|
||||
- The segment size of the new volume
|
||||
default: 512
|
||||
thin_provision:
|
||||
description:
|
||||
- Whether the volume should be thin provisioned. Thin volumes can only be created on disk pools (raidDiskPool).
|
||||
default: False
|
||||
choices: ['yes','no','true','false']
|
||||
thin_volume_repo_size:
|
||||
description:
|
||||
- Initial size of the thin volume repository volume (in size_unit)
|
||||
required: True
|
||||
thin_volume_max_repo_size:
|
||||
description:
|
||||
- Maximum size that the thin volume repository volume will automatically expand to
|
||||
default: same as size (in size_unit)
|
||||
ssd_cache_enabled:
|
||||
description:
|
||||
- Whether an existing SSD cache should be enabled on the volume (fails if no SSD cache defined)
|
||||
default: None (ignores existing SSD cache setting)
|
||||
choices: ['yes','no','true','false']
|
||||
data_assurance_enabled:
|
||||
description:
|
||||
- If data assurance should be enabled for the volume
|
||||
default: false
|
||||
|
||||
# TODO: doc thin volume parameters
|
||||
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
- name: No thin volume
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: NewThinVolumeByAnsible
|
||||
state: absent
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
when: check_volume
|
||||
|
||||
|
||||
- name: No fat volume
|
||||
netapp_e_volume:
|
||||
ssid: "{{ ssid }}"
|
||||
name: NewVolumeByAnsible
|
||||
state: absent
|
||||
log_path: /tmp/volume.log
|
||||
api_url: "{{ netapp_api_url }}"
|
||||
api_username: "{{ netapp_api_username }}"
|
||||
api_password: "{{ netapp_api_password }}"
|
||||
validate_certs: "{{ netapp_api_validate_certs }}"
|
||||
when: check_volume
|
||||
'''
|
||||
RETURN = '''
|
||||
---
|
||||
msg: "Standard volume [workload_vol_1] has been created."
|
||||
msg: "Thin volume [workload_thin_vol] has been created."
|
||||
msg: "Volume [workload_vol_1] has been expanded."
|
||||
msg: "Volume [workload_vol_1] has been deleted."
|
||||
msg: "Volume [workload_vol_1] did not exist."
|
||||
msg: "Volume [workload_vol_1] already exists."
|
||||
'''
|
||||
|
||||
import json
|
||||
import logging
|
||||
import time
|
||||
from traceback import format_exc
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data is None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def ifilter(predicate, iterable):
|
||||
# python 2, 3 generic filtering.
|
||||
if predicate is None:
|
||||
predicate = bool
|
||||
for x in iterable:
|
||||
if predicate(x):
|
||||
yield x
|
||||
|
||||
|
||||
class NetAppESeriesVolume(object):
|
||||
def __init__(self):
|
||||
self._size_unit_map = dict(
|
||||
bytes=1,
|
||||
b=1,
|
||||
kb=1024,
|
||||
mb=1024 ** 2,
|
||||
gb=1024 ** 3,
|
||||
tb=1024 ** 4,
|
||||
pb=1024 ** 5,
|
||||
eb=1024 ** 6,
|
||||
zb=1024 ** 7,
|
||||
yb=1024 ** 8
|
||||
)
|
||||
|
||||
self._post_headers = dict(Accept="application/json")
|
||||
self._post_headers['Content-Type'] = 'application/json'
|
||||
|
||||
argument_spec = basic_auth_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
state=dict(required=True, choices=['present', 'absent']),
|
||||
ssid=dict(required=True, type='str'),
|
||||
name=dict(required=True, type='str'),
|
||||
storage_pool_name=dict(type='str'),
|
||||
size_unit=dict(default='gb', choices=['bytes', 'b', 'kb', 'mb', 'gb', 'tb', 'pb', 'eb', 'zb', 'yb'],
|
||||
type='str'),
|
||||
size=dict(type='int'),
|
||||
segment_size_kb=dict(default=128, choices=[8, 16, 32, 64, 128, 256, 512], type='int'),
|
||||
ssd_cache_enabled=dict(type='bool'), # no default, leave existing setting alone
|
||||
data_assurance_enabled=dict(default=False, type='bool'),
|
||||
thin_provision=dict(default=False, type='bool'),
|
||||
thin_volume_repo_size=dict(type='int'),
|
||||
thin_volume_max_repo_size=dict(type='int'),
|
||||
# TODO: add cache, owning controller support, thin expansion policy, etc
|
||||
log_path=dict(type='str'),
|
||||
api_url=dict(type='str'),
|
||||
api_username=dict(type='str'),
|
||||
api_password=dict(type='str'),
|
||||
validate_certs=dict(type='bool'),
|
||||
))
|
||||
|
||||
self.module = AnsibleModule(argument_spec=argument_spec,
|
||||
required_if=[
|
||||
('state', 'present', ['storage_pool_name', 'size']),
|
||||
('thin_provision', 'true', ['thin_volume_repo_size'])
|
||||
],
|
||||
supports_check_mode=True)
|
||||
p = self.module.params
|
||||
|
||||
log_path = p['log_path']
|
||||
|
||||
# logging setup
|
||||
self._logger = logging.getLogger(self.__class__.__name__)
|
||||
self.debug = self._logger.debug
|
||||
|
||||
if log_path:
|
||||
logging.basicConfig(level=logging.DEBUG, filename=log_path)
|
||||
|
||||
self.state = p['state']
|
||||
self.ssid = p['ssid']
|
||||
self.name = p['name']
|
||||
self.storage_pool_name = p['storage_pool_name']
|
||||
self.size_unit = p['size_unit']
|
||||
self.size = p['size']
|
||||
self.segment_size_kb = p['segment_size_kb']
|
||||
self.ssd_cache_enabled = p['ssd_cache_enabled']
|
||||
self.data_assurance_enabled = p['data_assurance_enabled']
|
||||
self.thin_provision = p['thin_provision']
|
||||
self.thin_volume_repo_size = p['thin_volume_repo_size']
|
||||
self.thin_volume_max_repo_size = p['thin_volume_max_repo_size']
|
||||
|
||||
if not self.thin_volume_max_repo_size:
|
||||
self.thin_volume_max_repo_size = self.size
|
||||
|
||||
self.validate_certs = p['validate_certs']
|
||||
|
||||
try:
|
||||
self.api_usr = p['api_username']
|
||||
self.api_pwd = p['api_password']
|
||||
self.api_url = p['api_url']
|
||||
except KeyError:
|
||||
self.module.fail_json(msg="You must pass in api_username "
|
||||
"and api_password and api_url to the module.")
|
||||
|
||||
def get_volume(self, volume_name):
|
||||
self.debug('fetching volumes')
|
||||
# fetch the list of volume objects and look for one with a matching name (we'll need to merge volumes and thin-volumes)
|
||||
try:
|
||||
(rc, volumes) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
|
||||
headers=dict(Accept="application/json"), url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to obtain list of standard/thick volumes. Array Id [%s]. Error[%s]." % (self.ssid,
|
||||
str(err)))
|
||||
|
||||
try:
|
||||
self.debug('fetching thin-volumes')
|
||||
(rc, thinvols) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
|
||||
headers=dict(Accept="application/json"), url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to obtain list of thin volumes. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
|
||||
|
||||
volumes.extend(thinvols)
|
||||
|
||||
self.debug("searching for volume '%s'" % volume_name)
|
||||
volume_detail = next(ifilter(lambda a: a['name'] == volume_name, volumes), None)
|
||||
|
||||
if volume_detail:
|
||||
self.debug('found')
|
||||
else:
|
||||
self.debug('not found')
|
||||
|
||||
return volume_detail
|
||||
|
||||
def get_storage_pool(self, storage_pool_name):
|
||||
self.debug("fetching storage pools")
|
||||
# map the storage pool name to its id
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/storage-pools" % (self.ssid),
|
||||
headers=dict(Accept="application/json"), url_username=self.api_usr,
|
||||
url_password=self.api_pwd, validate_certs=self.validate_certs)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to obtain list of storage pools. Array Id [%s]. Error[%s]." % (self.ssid, str(err)))
|
||||
|
||||
self.debug("searching for storage pool '%s'" % storage_pool_name)
|
||||
pool_detail = next(ifilter(lambda a: a['name'] == storage_pool_name, resp), None)
|
||||
|
||||
if pool_detail:
|
||||
self.debug('found')
|
||||
else:
|
||||
self.debug('not found')
|
||||
|
||||
return pool_detail
|
||||
|
||||
def create_volume(self, pool_id, name, size_unit, size, segment_size_kb, data_assurance_enabled):
|
||||
volume_add_req = dict(
|
||||
name=name,
|
||||
poolId=pool_id,
|
||||
sizeUnit=size_unit,
|
||||
size=size,
|
||||
segSize=segment_size_kb,
|
||||
dataAssuranceEnabled=data_assurance_enabled,
|
||||
)
|
||||
|
||||
self.debug("creating volume '%s'" % name)
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes" % (self.ssid),
|
||||
data=json.dumps(volume_add_req), headers=self._post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to create volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
|
||||
str(err)))
|
||||
|
||||
def create_thin_volume(self, pool_id, name, size_unit, size, thin_volume_repo_size,
|
||||
thin_volume_max_repo_size, data_assurance_enabled):
|
||||
thin_volume_add_req = dict(
|
||||
name=name,
|
||||
poolId=pool_id,
|
||||
sizeUnit=size_unit,
|
||||
virtualSize=size,
|
||||
repositorySize=thin_volume_repo_size,
|
||||
maximumRepositorySize=thin_volume_max_repo_size,
|
||||
dataAssuranceEnabled=data_assurance_enabled,
|
||||
)
|
||||
|
||||
self.debug("creating thin-volume '%s'" % name)
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes" % (self.ssid),
|
||||
data=json.dumps(thin_volume_add_req), headers=self._post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to create thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
def delete_volume(self):
|
||||
# delete the volume
|
||||
self.debug("deleting volume '%s'" % self.volume_detail['name'])
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/%s/%s" % (self.ssid, self.volume_resource_name,
|
||||
self.volume_detail['id']),
|
||||
method='DELETE', url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs, timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to delete volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name, self.ssid,
|
||||
str(err)))
|
||||
|
||||
@property
|
||||
def volume_resource_name(self):
|
||||
if self.volume_detail['thinProvisioned']:
|
||||
return 'thin-volumes'
|
||||
else:
|
||||
return 'volumes'
|
||||
|
||||
@property
|
||||
def volume_properties_changed(self):
|
||||
return self.volume_ssdcache_setting_changed # or with other props here when extended
|
||||
|
||||
# TODO: add support for r/w cache settings, owning controller, scan settings, expansion policy, growth alert threshold
|
||||
|
||||
@property
|
||||
def volume_ssdcache_setting_changed(self):
|
||||
# None means ignore existing setting
|
||||
if self.ssd_cache_enabled is not None and self.ssd_cache_enabled != self.volume_detail['flashCached']:
|
||||
self.debug("flash cache setting changed")
|
||||
return True
|
||||
|
||||
def update_volume_properties(self):
|
||||
update_volume_req = dict()
|
||||
|
||||
# conditionally add values so we ignore unspecified props
|
||||
if self.volume_ssdcache_setting_changed:
|
||||
update_volume_req['flashCache'] = self.ssd_cache_enabled
|
||||
|
||||
self.debug("updating volume properties...")
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/%s/%s/" % (self.ssid, self.volume_resource_name,
|
||||
self.volume_detail['id']),
|
||||
data=json.dumps(update_volume_req), headers=self._post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to update volume properties. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
@property
|
||||
def volume_needs_expansion(self):
|
||||
current_size_bytes = int(self.volume_detail['capacity'])
|
||||
requested_size_bytes = self.size * self._size_unit_map[self.size_unit]
|
||||
|
||||
# TODO: check requested/current repo volume size for thin-volumes as well
|
||||
|
||||
# TODO: do we need to build any kind of slop factor in here?
|
||||
return requested_size_bytes > current_size_bytes
|
||||
|
||||
def expand_volume(self):
|
||||
is_thin = self.volume_detail['thinProvisioned']
|
||||
if is_thin:
|
||||
# TODO: support manual repo expansion as well
|
||||
self.debug('expanding thin volume')
|
||||
thin_volume_expand_req = dict(
|
||||
newVirtualSize=self.size,
|
||||
sizeUnit=self.size_unit
|
||||
)
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/thin-volumes/%s/expand" % (self.ssid,
|
||||
self.volume_detail[
|
||||
'id']),
|
||||
data=json.dumps(thin_volume_expand_req), headers=self._post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs, timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to expand thin volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
# TODO: check return code
|
||||
else:
|
||||
self.debug('expanding volume')
|
||||
volume_expand_req = dict(
|
||||
expansionSize=self.size,
|
||||
sizeUnit=self.size_unit
|
||||
)
|
||||
try:
|
||||
(rc, resp) = request(
|
||||
self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
|
||||
self.volume_detail['id']),
|
||||
data=json.dumps(volume_expand_req), headers=self._post_headers, method='POST',
|
||||
url_username=self.api_usr, url_password=self.api_pwd, validate_certs=self.validate_certs,
|
||||
timeout=120)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to expand volume. Volume [%s]. Array Id [%s]. Error[%s]." % (self.name,
|
||||
self.ssid,
|
||||
str(err)))
|
||||
|
||||
self.debug('polling for completion...')
|
||||
|
||||
while True:
|
||||
try:
|
||||
(rc, resp) = request(self.api_url + "/storage-systems/%s/volumes/%s/expand" % (self.ssid,
|
||||
self.volume_detail[
|
||||
'id']),
|
||||
method='GET', url_username=self.api_usr, url_password=self.api_pwd,
|
||||
validate_certs=self.validate_certs)
|
||||
except Exception:
|
||||
err = get_exception()
|
||||
self.module.fail_json(
|
||||
msg="Failed to get volume expansion progress. Volume [%s]. Array Id [%s]. Error[%s]." % (
|
||||
self.name, self.ssid, str(err)))
|
||||
|
||||
action = resp['action']
|
||||
percent_complete = resp['percentComplete']
|
||||
|
||||
self.debug('expand action %s, %s complete...' % (action, percent_complete))
|
||||
|
||||
if action == 'none':
|
||||
self.debug('expand complete')
|
||||
break
|
||||
else:
|
||||
time.sleep(5)
|
||||
|
||||
def apply(self):
|
||||
changed = False
|
||||
volume_exists = False
|
||||
msg = None
|
||||
|
||||
self.volume_detail = self.get_volume(self.name)
|
||||
|
||||
if self.volume_detail:
|
||||
volume_exists = True
|
||||
|
||||
if self.state == 'absent':
|
||||
self.debug("CHANGED: volume exists, but requested state is 'absent'")
|
||||
changed = True
|
||||
elif self.state == 'present':
|
||||
# check requested volume size, see if expansion is necessary
|
||||
if self.volume_needs_expansion:
|
||||
self.debug(
|
||||
"CHANGED: requested volume size %s%s is larger than current size %sb" % (self.size,
|
||||
self.size_unit,
|
||||
self.volume_detail[
|
||||
'capacity']))
|
||||
changed = True
|
||||
|
||||
if self.volume_properties_changed:
|
||||
self.debug("CHANGED: one or more volume properties have changed")
|
||||
changed = True
|
||||
|
||||
else:
|
||||
if self.state == 'present':
|
||||
self.debug("CHANGED: volume does not exist, but requested state is 'present'")
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
if self.module.check_mode:
|
||||
self.debug('skipping changes due to check mode')
|
||||
else:
|
||||
if self.state == 'present':
|
||||
if not volume_exists:
|
||||
pool_detail = self.get_storage_pool(self.storage_pool_name)
|
||||
|
||||
if not pool_detail:
|
||||
self.module.fail_json(msg='Requested storage pool (%s) not found' % self.storage_pool_name)
|
||||
|
||||
if self.thin_provision and not pool_detail['diskPool']:
|
||||
self.module.fail_json(
|
||||
msg='Thin provisioned volumes can only be located on disk pools (not volume groups)')
|
||||
|
||||
pool_id = pool_detail['id']
|
||||
|
||||
if not self.thin_provision:
|
||||
self.create_volume(pool_id, self.name, self.size_unit, self.size, self.segment_size_kb,
|
||||
self.data_assurance_enabled)
|
||||
msg = "Standard volume [%s] has been created." % (self.name)
|
||||
|
||||
else:
|
||||
self.create_thin_volume(pool_id, self.name, self.size_unit, self.size,
|
||||
self.thin_volume_repo_size, self.thin_volume_max_repo_size,
|
||||
self.data_assurance_enabled)
|
||||
msg = "Thin volume [%s] has been created." % (self.name)
|
||||
|
||||
else: # volume exists but differs, modify...
|
||||
if self.volume_needs_expansion:
|
||||
self.expand_volume()
|
||||
msg = "Volume [%s] has been expanded." % (self.name)
|
||||
|
||||
# this stuff always needs to run on present (since props can't be set on creation)
|
||||
if self.volume_properties_changed:
|
||||
self.update_volume_properties()
|
||||
msg = "Properties of volume [%s] has been updated." % (self.name)
|
||||
|
||||
elif self.state == 'absent':
|
||||
self.delete_volume()
|
||||
msg = "Volume [%s] has been deleted." % (self.name)
|
||||
else:
|
||||
self.debug("exiting with no changes")
|
||||
if self.state == 'absent':
|
||||
msg = "Volume [%s] did not exist." % (self.name)
|
||||
else:
|
||||
msg = "Volume [%s] already exists." % (self.name)
|
||||
|
||||
self.module.exit_json(msg=msg, changed=changed)
|
||||
|
||||
|
||||
def main():
|
||||
v = NetAppESeriesVolume()
|
||||
|
||||
try:
|
||||
v.apply()
|
||||
except Exception:
|
||||
e = get_exception()
|
||||
v.debug("Exception in apply(): \n%s" % format_exc(e))
|
||||
v.module.fail_json(msg="Module failed. Error [%s]." % (str(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,439 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2016, NetApp, Inc
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
#
|
||||
DOCUMENTATION = """
|
||||
---
|
||||
module: netapp_e_volume_copy
|
||||
short_description: Create volume copy pairs
|
||||
description:
|
||||
- Create and delete snapshots images on volume groups for NetApp E-series storage arrays.
|
||||
version_added: '2.2'
|
||||
author: Kevin Hulquest (@hulquest)
|
||||
options:
|
||||
api_username:
|
||||
required: true
|
||||
description:
|
||||
- The username to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_password:
|
||||
required: true
|
||||
description:
|
||||
- The password to authenticate with the SANtricity WebServices Proxy or embedded REST API.
|
||||
api_url:
|
||||
required: true
|
||||
description:
|
||||
- The url to the SANtricity WebServices Proxy or embedded REST API.
|
||||
example:
|
||||
- https://prod-1.wahoo.acme.com/devmgr/v2
|
||||
validate_certs:
|
||||
required: false
|
||||
default: true
|
||||
description:
|
||||
- Should https certificates be validated?
|
||||
source_volume_id:
|
||||
description:
|
||||
- The the id of the volume copy source.
|
||||
- If used, must be paired with destination_volume_id
|
||||
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
|
||||
destination_volume_id:
|
||||
description:
|
||||
- The the id of the volume copy destination.
|
||||
- If used, must be paired with source_volume_id
|
||||
- Mutually exclusive with volume_copy_pair_id, and search_volume_id
|
||||
volume_copy_pair_id:
|
||||
description:
|
||||
- The the id of a given volume copy pair
|
||||
- Mutually exclusive with destination_volume_id, source_volume_id, and search_volume_id
|
||||
- Can use to delete or check presence of volume pairs
|
||||
- Must specify this or (destination_volume_id and source_volume_id)
|
||||
state:
|
||||
description:
|
||||
- Whether the specified volume copy pair should exist or not.
|
||||
required: True
|
||||
choices: ['present', 'absent']
|
||||
create_copy_pair_if_does_not_exist:
|
||||
description:
|
||||
- Defines if a copy pair will be created if it does not exist.
|
||||
- If set to True destination_volume_id and source_volume_id are required.
|
||||
choices: [True, False]
|
||||
default: True
|
||||
start_stop_copy:
|
||||
description:
|
||||
- starts a re-copy or stops a copy in progress
|
||||
- "Note: If you stop the initial file copy before it it done the copy pair will be destroyed"
|
||||
- Requires volume_copy_pair_id
|
||||
search_volume_id:
|
||||
description:
|
||||
- Searches for all valid potential target and source volumes that could be used in a copy_pair
|
||||
- Mutually exclusive with volume_copy_pair_id, destination_volume_id and source_volume_id
|
||||
"""
|
||||
RESULTS = """
|
||||
"""
|
||||
EXAMPLES = """
|
||||
---
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: Json facts for the volume copy that was created.
|
||||
"""
|
||||
RETURN = """
|
||||
msg:
|
||||
description: Success message
|
||||
returned: success
|
||||
type: string
|
||||
sample: Created Volume Copy Pair with ID
|
||||
"""
|
||||
|
||||
import json
|
||||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.urls import open_url
|
||||
from ansible.module_utils.six.moves.urllib.error import HTTPError
|
||||
|
||||
HEADERS = {
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
}
|
||||
|
||||
|
||||
def request(url, data=None, headers=None, method='GET', use_proxy=True,
|
||||
force=False, last_mod_time=None, timeout=10, validate_certs=True,
|
||||
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
|
||||
try:
|
||||
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
|
||||
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
|
||||
url_username=url_username, url_password=url_password, http_agent=http_agent,
|
||||
force_basic_auth=force_basic_auth)
|
||||
except HTTPError:
|
||||
err = get_exception()
|
||||
r = err.fp
|
||||
|
||||
try:
|
||||
raw_data = r.read()
|
||||
if raw_data:
|
||||
data = json.loads(raw_data)
|
||||
else:
|
||||
raw_data = None
|
||||
except:
|
||||
if ignore_errors:
|
||||
pass
|
||||
else:
|
||||
raise Exception(raw_data)
|
||||
|
||||
resp_code = r.getcode()
|
||||
|
||||
if resp_code >= 400 and not ignore_errors:
|
||||
raise Exception(resp_code, data)
|
||||
else:
|
||||
return resp_code, data
|
||||
|
||||
|
||||
def find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(rc, resp) = request(url, method='GET', url_username=params['api_username'],
|
||||
url_password=params['api_password'], headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
|
||||
volume_copy_pair_id = None
|
||||
for potential_copy_pair in resp:
|
||||
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
|
||||
if potential_copy_pair['sourceVolume'] == params['source_volume_id']:
|
||||
volume_copy_pair_id = potential_copy_pair['id']
|
||||
|
||||
return volume_copy_pair_id
|
||||
|
||||
|
||||
def create_copy_pair(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs' % params['ssid']
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
rData = {
|
||||
"sourceId": params['source_volume_id'],
|
||||
"targetId": params['destination_volume_id']
|
||||
}
|
||||
|
||||
(rc, resp) = request(url, data=json.dumps(rData), ignore_errors=True, method='POST',
|
||||
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
if rc != 200:
|
||||
return False, (rc, resp)
|
||||
else:
|
||||
return True, (rc, resp)
|
||||
|
||||
|
||||
def delete_copy_pair_by_copy_pair_id(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
|
||||
params['ssid'], params['volume_copy_pair_id'])
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
|
||||
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
if rc != 204:
|
||||
return False, (rc, resp)
|
||||
else:
|
||||
return True, (rc, resp)
|
||||
|
||||
|
||||
def find_volume_copy_pair_id_by_volume_copy_pair_id(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs/%s?retainRepositories=false' % (
|
||||
params['ssid'], params['volume_copy_pair_id'])
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(rc, resp) = request(url, ignore_errors=True, method='DELETE',
|
||||
url_username=params['api_username'], url_password=params['api_password'], headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
if rc != 200:
|
||||
return False, (rc, resp)
|
||||
else:
|
||||
return True, (rc, resp)
|
||||
|
||||
|
||||
def start_stop_copy(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s?control=%s' % (
|
||||
params['ssid'], params['volume_copy_pair_id'], params['start_stop_copy'])
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(response_code, response_data) = request(url, ignore_errors=True, method='POST',
|
||||
url_username=params['api_username'], url_password=params['api_password'],
|
||||
headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
|
||||
if response_code == 200:
|
||||
return True, response_data[0]['percentComplete']
|
||||
else:
|
||||
return False, response_data
|
||||
|
||||
|
||||
def check_copy_status(params):
|
||||
get_status = 'storage-systems/%s/volume-copy-jobs-control/%s' % (
|
||||
params['ssid'], params['volume_copy_pair_id'])
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
|
||||
url_username=params['api_username'], url_password=params['api_password'],
|
||||
headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
|
||||
if response_code == 200:
|
||||
if response_data['percentComplete'] != -1:
|
||||
|
||||
return True, response_data['percentComplete']
|
||||
else:
|
||||
return False, response_data['percentComplete']
|
||||
else:
|
||||
return False, response_data
|
||||
|
||||
|
||||
def find_valid_copy_pair_targets_and_sources(params):
|
||||
get_status = 'storage-systems/%s/volumes' % params['ssid']
|
||||
url = params['api_url'] + get_status
|
||||
|
||||
(response_code, response_data) = request(url, ignore_errors=True, method='GET',
|
||||
url_username=params['api_username'], url_password=params['api_password'],
|
||||
headers=HEADERS,
|
||||
validate_certs=params['validate_certs'])
|
||||
|
||||
if response_code == 200:
|
||||
source_capacity = None
|
||||
candidates = []
|
||||
for volume in response_data:
|
||||
if volume['id'] == params['search_volume_id']:
|
||||
source_capacity = volume['capacity']
|
||||
else:
|
||||
candidates.append(volume)
|
||||
|
||||
potential_sources = []
|
||||
potential_targets = []
|
||||
|
||||
for volume in candidates:
|
||||
if volume['capacity'] > source_capacity:
|
||||
if volume['volumeCopyTarget'] is False:
|
||||
if volume['volumeCopySource'] is False:
|
||||
potential_targets.append(volume['id'])
|
||||
else:
|
||||
if volume['volumeCopyTarget'] is False:
|
||||
if volume['volumeCopySource'] is False:
|
||||
potential_sources.append(volume['id'])
|
||||
|
||||
return potential_targets, potential_sources
|
||||
|
||||
else:
|
||||
raise Exception("Response [%s]" % response_code)
|
||||
|
||||
|
||||
def main():
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
source_volume_id=dict(type='str'),
|
||||
destination_volume_id=dict(type='str'),
|
||||
copy_priority=dict(required=False, default=0, type='int'),
|
||||
ssid=dict(required=True, type='str'),
|
||||
api_url=dict(required=True),
|
||||
api_username=dict(required=False),
|
||||
api_password=dict(required=False, no_log=True),
|
||||
validate_certs=dict(required=False, default=True),
|
||||
targetWriteProtected=dict(required=False, default=True, type='bool'),
|
||||
onlineCopy=dict(required=False, default=False, type='bool'),
|
||||
volume_copy_pair_id=dict(type='str'),
|
||||
status=dict(required=True, choices=['present', 'absent'], type='str'),
|
||||
create_copy_pair_if_does_not_exist=dict(required=False, default=True, type='bool'),
|
||||
start_stop_copy=dict(required=False, choices=['start', 'stop'], type='str'),
|
||||
search_volume_id=dict(type='str'),
|
||||
),
|
||||
mutually_exclusive=[['volume_copy_pair_id', 'destination_volume_id'],
|
||||
['volume_copy_pair_id', 'source_volume_id'],
|
||||
['volume_copy_pair_id', 'search_volume_id'],
|
||||
['search_volume_id', 'destination_volume_id'],
|
||||
['search_volume_id', 'source_volume_id'],
|
||||
],
|
||||
required_together=[['source_volume_id', 'destination_volume_id'],
|
||||
],
|
||||
required_if=[["create_copy_pair_if_does_not_exist", True, ['source_volume_id', 'destination_volume_id'], ],
|
||||
["start_stop_copy", 'stop', ['volume_copy_pair_id'], ],
|
||||
["start_stop_copy", 'start', ['volume_copy_pair_id'], ],
|
||||
]
|
||||
|
||||
)
|
||||
params = module.params
|
||||
|
||||
if not params['api_url'].endswith('/'):
|
||||
params['api_url'] += '/'
|
||||
|
||||
# Check if we want to search
|
||||
if params['search_volume_id'] is not None:
|
||||
try:
|
||||
potential_targets, potential_sources = find_valid_copy_pair_targets_and_sources(params)
|
||||
except:
|
||||
e = get_exception()
|
||||
module.fail_json(msg="Failed to find valid copy pair candidates. Error [%s]" % str(e))
|
||||
|
||||
module.exit_json(changed=False,
|
||||
msg=' Valid source devices found: %s Valid target devices found: %s' % (len(potential_sources), len(potential_targets)),
|
||||
search_volume_id=params['search_volume_id'],
|
||||
valid_targets=potential_targets,
|
||||
valid_sources=potential_sources)
|
||||
|
||||
# Check if we want to start or stop a copy operation
|
||||
if params['start_stop_copy'] == 'start' or params['start_stop_copy'] == 'stop':
|
||||
|
||||
# Get the current status info
|
||||
currenty_running, status_info = check_copy_status(params)
|
||||
|
||||
# If we want to start
|
||||
if params['start_stop_copy'] == 'start':
|
||||
|
||||
# If we have already started
|
||||
if currenty_running is True:
|
||||
module.exit_json(changed=False, msg='Volume Copy Pair copy has started.',
|
||||
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=status_info)
|
||||
# If we need to start
|
||||
else:
|
||||
|
||||
start_status, info = start_stop_copy(params)
|
||||
|
||||
if start_status is True:
|
||||
module.exit_json(changed=True, msg='Volume Copy Pair copy has started.',
|
||||
volume_copy_pair_id=params['volume_copy_pair_id'], percent_done=info)
|
||||
else:
|
||||
module.fail_json(msg="Could not start volume copy pair Error: %s" % info)
|
||||
|
||||
# If we want to stop
|
||||
else:
|
||||
# If it has already stopped
|
||||
if currenty_running is False:
|
||||
module.exit_json(changed=False, msg='Volume Copy Pair copy is stopped.',
|
||||
volume_copy_pair_id=params['volume_copy_pair_id'])
|
||||
|
||||
# If we need to stop it
|
||||
else:
|
||||
start_status, info = start_stop_copy(params)
|
||||
|
||||
if start_status is True:
|
||||
module.exit_json(changed=True, msg='Volume Copy Pair copy has been stopped.',
|
||||
volume_copy_pair_id=params['volume_copy_pair_id'])
|
||||
else:
|
||||
module.fail_json(msg="Could not stop volume copy pair Error: %s" % info)
|
||||
|
||||
# If we want the copy pair to exist we do this stuff
|
||||
if params['status'] == 'present':
|
||||
|
||||
# We need to check if it exists first
|
||||
if params['volume_copy_pair_id'] is None:
|
||||
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
|
||||
params)
|
||||
|
||||
# If no volume copy pair is found we need need to make it.
|
||||
if params['volume_copy_pair_id'] is None:
|
||||
|
||||
# In order to create we can not do so with just a volume_copy_pair_id
|
||||
|
||||
copy_began_status, (rc, resp) = create_copy_pair(params)
|
||||
|
||||
if copy_began_status is True:
|
||||
module.exit_json(changed=True, msg='Created Volume Copy Pair with ID: %s' % resp['id'])
|
||||
else:
|
||||
module.fail_json(msg="Could not create volume copy pair Code: %s Error: %s" % (rc, resp))
|
||||
|
||||
# If it does exist we do nothing
|
||||
else:
|
||||
# We verify that it exists
|
||||
exist_status, (exist_status_code, exist_status_data) = find_volume_copy_pair_id_by_volume_copy_pair_id(
|
||||
params)
|
||||
|
||||
if exist_status:
|
||||
module.exit_json(changed=False,
|
||||
msg=' Volume Copy Pair with ID: %s exists' % params['volume_copy_pair_id'])
|
||||
else:
|
||||
if exist_status_code == 404:
|
||||
module.fail_json(
|
||||
msg=' Volume Copy Pair with ID: %s does not exist. Can not create without source_volume_id and destination_volume_id' %
|
||||
params['volume_copy_pair_id'])
|
||||
else:
|
||||
module.fail_json(msg="Could not find volume copy pair Code: %s Error: %s" % (
|
||||
exist_status_code, exist_status_data))
|
||||
|
||||
module.fail_json(msg="Done")
|
||||
|
||||
# If we want it to not exist we do this
|
||||
else:
|
||||
|
||||
if params['volume_copy_pair_id'] is None:
|
||||
params['volume_copy_pair_id'] = find_volume_copy_pair_id_from_source_volume_id_and_destination_volume_id(
|
||||
params)
|
||||
|
||||
# We delete it by the volume_copy_pair_id
|
||||
delete_status, (delete_status_code, delete_status_data) = delete_copy_pair_by_copy_pair_id(params)
|
||||
|
||||
if delete_status is True:
|
||||
module.exit_json(changed=True,
|
||||
msg=' Volume Copy Pair with ID: %s was deleted' % params['volume_copy_pair_id'])
|
||||
else:
|
||||
if delete_status_code == 404:
|
||||
module.exit_json(changed=False,
|
||||
msg=' Volume Copy Pair with ID: %s does not exist' % params['volume_copy_pair_id'])
|
||||
else:
|
||||
module.fail_json(msg="Could not delete volume copy pair Code: %s Error: %s" % (
|
||||
delete_status_code, delete_status_data))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
Loading…
Reference in a new issue