Native YAML - cloud/misc (#3615)

* Native YAML - cloud/misc

* Fix mistake
This commit is contained in:
Fabio Alessandro Locati 2016-12-02 14:49:23 +00:00 committed by Matt Clay
parent 75f9cb30e1
commit 2f369dff88
5 changed files with 205 additions and 49 deletions

View file

@ -176,37 +176,114 @@ author: "Sergei Antipov @UnderGreen"
EXAMPLES = '''
# Create new container with minimal options
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
password: 123456
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options with force(it will rewrite existing container)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
password: 123456
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
force: yes
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_host: node1
password: 123456
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options defining network interface with dhcp
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' netif='{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
password: 123456
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
netif: '{"net0":"name=eth0,ip=dhcp,ip6=dhcp,bridge=vmbr0"}'
# Create new container with minimal options defining network interface with static ip
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' netif='{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
password: 123456
hostname: example.org
ostemplate: 'local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
netif: '{"net0":"name=eth0,gw=192.168.0.1,ip=192.168.0.2/24,bridge=vmbr0"}'
# Create new container with minimal options defining a mount
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' mounts='{"mp0":"local:8,mp=/mnt/test/"}'
- proxmox:
vmid: 100
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
password: 123456
hostname: example.org
ostemplate: local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
mounts: '{"mp0":"local:8,mp=/mnt/test/"}'
# Start container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
- proxmox:
vmid: 100
api_user: root@pam
api_password: 1q2w3e
api_host: node1
state: started
# Stop container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
- proxmox:
vmid: 100
api_user: root@pam
api_password: 1q2w3e
api_host: node1
state: stopped
# Stop container with force
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
- proxmox:
vmid: 100
api_user: root@pam
api_passwordL 1q2w3e
api_host: node1
force: yes
state: stopped
# Restart container(stopped or mounted container you can't restart)
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
- proxmox:
vmid: 100
api_user: root@pam
api_password: 1q2w3e
api_host: node1
state: stopped
# Remove container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
- proxmox:
vmid: 100
api_user: root@pam
api_password: 1q2w3e
api_host: node1
state: absent
'''
import os

View file

@ -98,16 +98,39 @@ author: "Sergei Antipov @UnderGreen"
EXAMPLES = '''
# Upload new openvz template with minimal options
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
- proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
src: ~/ubuntu-14.04-x86_64.tar.gz
# Upload new openvz template with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox_template: node='uk-mc02' api_user='root@pam' api_host='node1' src='~/ubuntu-14.04-x86_64.tar.gz'
- proxmox_template:
node: uk-mc02
api_user: root@pam
api_host: node1
src: ~/ubuntu-14.04-x86_64.tar.gz
# Upload new openvz template with all options and force overwrite
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' storage='local' content_type='vztmpl' src='~/ubuntu-14.04-x86_64.tar.gz' force=yes
- proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
storage: local
content_type: vztmpl
src: ~/ubuntu-14.04-x86_64.tar.gz
force: yes
# Delete template with minimal options
- proxmox_template: node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' template='ubuntu-14.04-x86_64.tar.gz' state=absent
- proxmox_template:
node: uk-mc02
api_user: root@pam
api_password: 1q2w3e
api_host: node1
template: ubuntu-14.04-x86_64.tar.gz
state: absent
'''
import os

View file

@ -66,7 +66,9 @@ author:
EXAMPLES = '''
# a playbook task line:
- virt: name=alpha state=running
- virt:
name: alpha
state: running
# /usr/bin/ansible invocations
ansible host -m virt -a "name=alpha command=status"
@ -76,12 +78,16 @@ ansible host -m virt -a "name=alpha command=create uri=lxc:///"
# a playbook example of defining and launching an LXC guest
tasks:
- name: define vm
virt: name=foo
command=define
xml="{{ lookup('template', 'container-template.xml.j2') }}"
uri=lxc:///
virt:
name: foo
command: define
xml: '{{ lookup('template', 'container-template.xml.j2') }}'
uri: 'lxc:///'
- name: start vm
virt: name=foo state=running uri=lxc:///
virt:
name: foo
state: running
uri: 'lxc:///'
'''
RETURN = '''

View file

@ -74,43 +74,66 @@ requirements:
EXAMPLES = '''
# Define a new network
- virt_net: command=define name=br_nat xml='{{ lookup("template", "network/bridge.xml.j2") }}'
- virt_net:
command: define
name: br_nat
xml: '{{ lookup("template", "network/bridge.xml.j2") }}'
# Start a network
- virt_net: command=create name=br_nat
- virt_net:
command: create
name: br_nat
# List available networks
- virt_net: command=list_nets
- virt_net:
command: list_nets
# Get XML data of a specified network
- virt_net: command=get_xml name=br_nat
- virt_net:
command: get_xml
name: br_nat
# Stop a network
- virt_net: command=destroy name=br_nat
- virt_net:
command: destroy
name: br_nat
# Undefine a network
- virt_net: command=undefine name=br_nat
- virt_net:
command: undefine
name: br_nat
# Gather facts about networks
# Facts will be available as 'ansible_libvirt_networks'
- virt_net: command=facts
- virt_net:
command: facts
# Gather information about network managed by 'libvirt' remotely using uri
- virt_net: command=info uri='{{ item }}'
with_items: "{{ libvirt_uris }}"
- virt_net:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: networks
# Ensure that a network is active (needs to be defined and built first)
- virt_net: state=active name=br_nat
- virt_net:
state: active
name: br_nat
# Ensure that a network is inactive
- virt_net: state=inactive name=br_nat
- virt_net:
state: inactive
name: br_nat
# Ensure that a given network will be started at boot
- virt_net: autostart=yes name=br_nat
- virt_net:
autostart: yes
name: br_nat
# Disable autostart for a given network
- virt_net: autostart=no name=br_nat
- virt_net:
autostart: no
name: br_nat
'''
VIRT_FAILED = 1

View file

@ -79,49 +79,76 @@ requirements:
EXAMPLES = '''
# Define a new storage pool
- virt_pool: command=define name=vms xml='{{ lookup("template", "pool/dir.xml.j2") }}'
- virt_pool:
command: define
name: vms
xml: '{{ lookup("template", "pool/dir.xml.j2") }}'
# Build a storage pool if it does not exist
- virt_pool: command=build name=vms
- virt_pool:
command: build
name: vms
# Start a storage pool
- virt_pool: command=create name=vms
- virt_pool:
command: create
name: vms
# List available pools
- virt_pool: command=list_pools
- virt_pool:
command: list_pools
# Get XML data of a specified pool
- virt_pool: command=get_xml name=vms
- virt_pool:
command: get_xml
name: vms
# Stop a storage pool
- virt_pool: command=destroy name=vms
- virt_pool:
command: destroy
name: vms
# Delete a storage pool (destroys contents)
- virt_pool: command=delete name=vms
- virt_pool:
command: delete
name: vms
# Undefine a storage pool
- virt_pool: command=undefine name=vms
- virt_pool:
command: undefine
name: vms
# Gather facts about storage pools
# Facts will be available as 'ansible_libvirt_pools'
- virt_pool: command=facts
- virt_pool:
command: facts
# Gather information about pools managed by 'libvirt' remotely using uri
- virt_pool: command=info uri='{{ item }}'
with_items: "{{ libvirt_uris }}"
- virt_pool:
command: info
uri: '{{ item }}'
with_items: '{{ libvirt_uris }}'
register: storage_pools
# Ensure that a pool is active (needs to be defined and built first)
- virt_pool: state=active name=vms
- virt_pool:
state: active
name: vms
# Ensure that a pool is inactive
- virt_pool: state=inactive name=vms
- virt_pool:
state: inactive
name: vms
# Ensure that a given pool will be started at boot
- virt_pool: autostart=yes name=vms
- virt_pool:
autostart: yes
name: vms
# Disable autostart for a given pool
- virt_pool: autostart=no name=vms
- virt_pool:
autostart: no
name: vms
'''
VIRT_FAILED = 1