Not core.
This commit is contained in:
parent
42ab1eab36
commit
b2bf4b9bf7
2 changed files with 0 additions and 918 deletions
|
@ -1,425 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
|
||||
# (c) 2013, Vincent Van der Kussen <vincent at vanderkussen.org>
|
||||
#
|
||||
# This file is part of Ansible
|
||||
#
|
||||
# Ansible is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# Ansible is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: ovirt
|
||||
author: Vincent Van der Kussen
|
||||
short_description: oVirt/RHEV platform management
|
||||
description:
|
||||
- allows you to create new instances, either from scratch or an image, in addition to deleting or stopping instances on the oVirt/RHEV platform
|
||||
version_added: "1.4"
|
||||
options:
|
||||
user:
|
||||
description:
|
||||
- the user to authenticate with
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
url:
|
||||
description:
|
||||
- the url of the oVirt instance
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
instance_name:
|
||||
description:
|
||||
- the name of the instance to use
|
||||
default: null
|
||||
required: true
|
||||
aliases: [ vmname ]
|
||||
password:
|
||||
description:
|
||||
- password of the user to authenticate with
|
||||
default: null
|
||||
required: true
|
||||
aliases: []
|
||||
image:
|
||||
description:
|
||||
- template to use for the instance
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
resource_type:
|
||||
description:
|
||||
- whether you want to deploy an image or create an instance from scratch.
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'new', 'template' ]
|
||||
zone:
|
||||
description:
|
||||
- deploy the image to this oVirt cluster
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
instance_disksize:
|
||||
description:
|
||||
- size of the instance's disk in GB
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vm_disksize]
|
||||
instance_cpus:
|
||||
description:
|
||||
- the instance's number of cpu's
|
||||
default: 1
|
||||
required: false
|
||||
aliases: [ vmcpus ]
|
||||
instance_nic:
|
||||
description:
|
||||
- name of the network interface in oVirt/RHEV
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmnic ]
|
||||
instance_network:
|
||||
description:
|
||||
- the logical network the machine should belong to
|
||||
default: rhevm
|
||||
required: false
|
||||
aliases: [ vmnetwork ]
|
||||
instance_mem:
|
||||
description:
|
||||
- the instance's amount of memory in MB
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmmem ]
|
||||
instance_type:
|
||||
description:
|
||||
- define if the instance is a server or desktop
|
||||
default: server
|
||||
required: false
|
||||
aliases: [ vmtype ]
|
||||
choices: [ 'server', 'desktop' ]
|
||||
disk_alloc:
|
||||
description:
|
||||
- define if disk is thin or preallocated
|
||||
default: thin
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'thin', 'preallocated' ]
|
||||
disk_int:
|
||||
description:
|
||||
- interface type of the disk
|
||||
default: virtio
|
||||
required: false
|
||||
aliases: []
|
||||
choices: [ 'virtio', 'ide' ]
|
||||
instance_os:
|
||||
description:
|
||||
- type of Operating System
|
||||
default: null
|
||||
required: false
|
||||
aliases: [ vmos ]
|
||||
instance_cores:
|
||||
description:
|
||||
- define the instance's number of cores
|
||||
default: 1
|
||||
required: false
|
||||
aliases: [ vmcores ]
|
||||
sdomain:
|
||||
description:
|
||||
- the Storage Domain where you want to create the instance's disk on.
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
region:
|
||||
description:
|
||||
- the oVirt/RHEV datacenter where you want to deploy to
|
||||
default: null
|
||||
required: false
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- create, terminate or remove instances
|
||||
default: 'present'
|
||||
required: false
|
||||
aliases: []
|
||||
choices: ['present', 'absent', 'shutdown', 'started', 'restarted']
|
||||
|
||||
requirements: [ "ovirt-engine-sdk" ]
|
||||
'''
|
||||
EXAMPLES = '''
|
||||
# Basic example provisioning from image.
|
||||
|
||||
action: ovirt >
|
||||
user=admin@internal
|
||||
url=https://ovirt.example.com
|
||||
instance_name=ansiblevm04
|
||||
password=secret
|
||||
image=centos_64
|
||||
zone=cluster01
|
||||
resource_type=template"
|
||||
|
||||
# Full example to create new instance from scratch
|
||||
action: ovirt >
|
||||
instance_name=testansible
|
||||
resource_type=new
|
||||
instance_type=server
|
||||
user=admin@internal
|
||||
password=secret
|
||||
url=https://ovirt.example.com
|
||||
instance_disksize=10
|
||||
zone=cluster01
|
||||
region=datacenter1
|
||||
instance_cpus=1
|
||||
instance_nic=nic1
|
||||
instance_network=rhevm
|
||||
instance_mem=1000
|
||||
disk_alloc=thin
|
||||
sdomain=FIBER01
|
||||
instance_cores=1
|
||||
instance_os=rhel_6x64
|
||||
disk_int=virtio"
|
||||
|
||||
# stopping an instance
|
||||
action: ovirt >
|
||||
instance_name=testansible
|
||||
state=stopped
|
||||
user=admin@internal
|
||||
password=secret
|
||||
url=https://ovirt.example.com
|
||||
|
||||
# starting an instance
|
||||
action: ovirt >
|
||||
instance_name=testansible
|
||||
state=started
|
||||
user=admin@internal
|
||||
password=secret
|
||||
url=https://ovirt.example.com
|
||||
|
||||
|
||||
'''
|
||||
try:
|
||||
from ovirtsdk.api import API
|
||||
from ovirtsdk.xml import params
|
||||
except ImportError:
|
||||
print "failed=True msg='ovirtsdk required for this module'"
|
||||
sys.exit(1)
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# create connection with API
|
||||
#
|
||||
def conn(url, user, password):
|
||||
api = API(url=url, username=user, password=password, insecure=True)
|
||||
try:
|
||||
value = api.test()
|
||||
except:
|
||||
print "error connecting to the oVirt API"
|
||||
sys.exit(1)
|
||||
return api
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Create VM from scratch
|
||||
def create_vm(conn, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int):
|
||||
if vmdisk_alloc == 'thin':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))), type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=True, interface=vmdisk_int, type_="System", format='cow',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name='nic1', network=network_net, interface='virtio')
|
||||
elif vmdisk_alloc == 'preallocated':
|
||||
# define VM params
|
||||
vmparams = params.VM(name=vmname,cluster=conn.clusters.get(name=zone),os=params.OperatingSystem(type_=vmos),template=conn.templates.get(name="Blank"),memory=1024 * 1024 * int(vmmem),cpu=params.CPU(topology=params.CpuTopology(cores=int(vmcores))) ,type_=vmtype)
|
||||
# define disk params
|
||||
vmdisk= params.Disk(size=1024 * 1024 * 1024 * int(vmdisk_size), wipe_after_delete=True, sparse=False, interface=vmdisk_int, type_="System", format='raw',
|
||||
storage_domains=params.StorageDomains(storage_domain=[conn.storagedomains.get(name=sdomain)]))
|
||||
# define network parameters
|
||||
network_net = params.Network(name=vmnetwork)
|
||||
nic_net1 = params.NIC(name=vmnic, network=network_net, interface='virtio')
|
||||
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except:
|
||||
print "Error creating VM with specified parameters"
|
||||
sys.exit(1)
|
||||
vm = conn.vms.get(name=vmname)
|
||||
try:
|
||||
vm.disks.add(vmdisk)
|
||||
except:
|
||||
print "Error attaching disk"
|
||||
try:
|
||||
vm.nics.add(nic_net1)
|
||||
except:
|
||||
print "Error adding nic"
|
||||
|
||||
|
||||
# create an instance from a template
|
||||
def create_vm_template(conn, vmname, image, zone):
|
||||
vmparams = params.VM(name=vmname, cluster=conn.clusters.get(name=zone), template=conn.templates.get(name=image),disks=params.Disks(clone=True))
|
||||
try:
|
||||
conn.vms.add(vmparams)
|
||||
except:
|
||||
print 'error adding template %s' % image
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
# start instance
|
||||
def vm_start(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.start()
|
||||
|
||||
# Stop instance
|
||||
def vm_stop(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
|
||||
# restart instance
|
||||
def vm_restart(conn, vmname):
|
||||
state = vm_status(conn, vmname)
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.stop()
|
||||
while conn.vms.get(vmname).get_status().get_state() != 'down':
|
||||
time.sleep(5)
|
||||
vm.start()
|
||||
|
||||
# remove an instance
|
||||
def vm_remove(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
vm.delete()
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# VM statuses
|
||||
#
|
||||
# Get the VMs status
|
||||
def vm_status(conn, vmname):
|
||||
status = conn.vms.get(name=vmname).status.state
|
||||
print "vm status is : %s" % status
|
||||
return status
|
||||
|
||||
|
||||
# Get VM object and return it's name if object exists
|
||||
def get_vm(conn, vmname):
|
||||
vm = conn.vms.get(name=vmname)
|
||||
if vm == None:
|
||||
name = "empty"
|
||||
print "vmname: %s" % name
|
||||
else:
|
||||
name = vm.get_name()
|
||||
print "vmname: %s" % name
|
||||
return name
|
||||
|
||||
# ------------------------------------------------------------------- #
|
||||
# Hypervisor operations
|
||||
#
|
||||
# not available yet
|
||||
# ------------------------------------------------------------------- #
|
||||
# Main
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(
|
||||
argument_spec = dict(
|
||||
state = dict(default='present', choices=['present', 'absent', 'shutdown', 'started', 'restart']),
|
||||
#name = dict(required=True),
|
||||
user = dict(required=True),
|
||||
url = dict(required=True),
|
||||
instance_name = dict(required=True, aliases=['vmname']),
|
||||
password = dict(required=True),
|
||||
image = dict(),
|
||||
resource_type = dict(choices=['new', 'template']),
|
||||
zone = dict(),
|
||||
instance_disksize = dict(aliases=['vm_disksize']),
|
||||
instance_cpus = dict(default=1, aliases=['vmcpus']),
|
||||
instance_nic = dict(aliases=['vmnic']),
|
||||
instance_network = dict(default='rhevm', aliases=['vmnetwork']),
|
||||
instance_mem = dict(aliases=['vmmem']),
|
||||
instance_type = dict(default='server', aliases=['vmtype'], choices=['server', 'desktop']),
|
||||
disk_alloc = dict(default='thin', choices=['thin', 'preallocated']),
|
||||
disk_int = dict(default='virtio', choices=['virtio', 'ide']),
|
||||
instance_os = dict(aliases=['vmos']),
|
||||
instance_cores = dict(default=1, aliases=['vmcores']),
|
||||
sdomain = dict(),
|
||||
region = dict(),
|
||||
)
|
||||
)
|
||||
|
||||
state = module.params['state']
|
||||
user = module.params['user']
|
||||
url = module.params['url']
|
||||
vmname = module.params['instance_name']
|
||||
password = module.params['password']
|
||||
image = module.params['image'] # name of the image to deploy
|
||||
resource_type = module.params['resource_type'] # template or from scratch
|
||||
zone = module.params['zone'] # oVirt cluster
|
||||
vmdisk_size = module.params['instance_disksize'] # disksize
|
||||
vmcpus = module.params['instance_cpus'] # number of cpu
|
||||
vmnic = module.params['instance_nic'] # network interface
|
||||
vmnetwork = module.params['instance_network'] # logical network
|
||||
vmmem = module.params['instance_mem'] # mem size
|
||||
vmdisk_alloc = module.params['disk_alloc'] # thin, preallocated
|
||||
vmdisk_int = module.params['disk_int'] # disk interface virtio or ide
|
||||
vmos = module.params['instance_os'] # Operating System
|
||||
vmtype = module.params['instance_type'] # server or desktop
|
||||
vmcores = module.params['instance_cores'] # number of cores
|
||||
sdomain = module.params['sdomain'] # storage domain to store disk on
|
||||
region = module.params['region'] # oVirt Datacenter
|
||||
#initialize connection
|
||||
c = conn(url+"/api", user, password)
|
||||
|
||||
if state == 'present':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
if resource_type == 'template':
|
||||
create_vm_template(c, vmname, image, zone)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmname,image))
|
||||
elif resource_type == 'new':
|
||||
# FIXME: refactor, use keyword args.
|
||||
create_vm(c, vmtype, vmname, zone, vmdisk_size, vmcpus, vmnic, vmnetwork, vmmem, vmdisk_alloc, sdomain, vmcores, vmos, vmdisk_int)
|
||||
module.exit_json(changed=True, msg="deployed VM %s from scratch" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="You did not specify a resource type")
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s already exists" % vmname)
|
||||
|
||||
if state == 'started':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
module.exit_json(changed=False, msg="VM %s is already running" % vmname)
|
||||
else:
|
||||
vm_start(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s started" % vmname)
|
||||
|
||||
if state == 'shutdown':
|
||||
if vm_status(c, vmname) == 'down':
|
||||
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmname)
|
||||
else:
|
||||
vm_stop(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is shutting down" % vmname)
|
||||
|
||||
if state == 'restart':
|
||||
if vm_status(c, vmname) == 'up':
|
||||
vm_restart(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s is restarted" % vmname)
|
||||
else:
|
||||
module.exit_json(changed=False, msg="VM %s is not running" % vmname)
|
||||
|
||||
if state == 'absent':
|
||||
if get_vm(c, vmname) == "empty":
|
||||
module.exit_json(changed=False, msg="VM %s does not exist" % vmname)
|
||||
else:
|
||||
vm_remove(c, vmname)
|
||||
module.exit_json(changed=True, msg="VM %s removed" % vmname)
|
||||
|
||||
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
|
@ -1,493 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
Virt management features
|
||||
|
||||
Copyright 2007, 2012 Red Hat, Inc
|
||||
Michael DeHaan <michael.dehaan@gmail.com>
|
||||
Seth Vidal <skvidal@fedoraproject.org>
|
||||
|
||||
This software may be freely redistributed under the terms of the GNU
|
||||
general public license.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: virt
|
||||
short_description: Manages virtual machines supported by libvirt
|
||||
description:
|
||||
- Manages virtual machines supported by I(libvirt).
|
||||
version_added: "0.2"
|
||||
options:
|
||||
name:
|
||||
description:
|
||||
- name of the guest VM being managed. Note that VM must be previously
|
||||
defined with xml.
|
||||
required: true
|
||||
default: null
|
||||
aliases: []
|
||||
state:
|
||||
description:
|
||||
- Note that there may be some lag for state requests like C(shutdown)
|
||||
since these refer only to VM states. After starting a guest, it may not
|
||||
be immediately accessible.
|
||||
required: false
|
||||
choices: [ "running", "shutdown", "destroyed", "paused" ]
|
||||
default: "no"
|
||||
command:
|
||||
description:
|
||||
- in addition to state management, various non-idempotent commands are available. See examples
|
||||
required: false
|
||||
choices: ["create","status", "start", "stop", "pause", "unpause",
|
||||
"shutdown", "undefine", "destroy", "get_xml", "autostart",
|
||||
"freemem", "list_vms", "info", "nodeinfo", "virttype", "define"]
|
||||
uri:
|
||||
description:
|
||||
- libvirt connection uri
|
||||
required: false
|
||||
defaults: qemu:///system
|
||||
xml:
|
||||
description:
|
||||
- XML document used with the define command
|
||||
required: false
|
||||
default: null
|
||||
requirements: [ "libvirt" ]
|
||||
author: Michael DeHaan, Seth Vidal
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# a playbook task line:
|
||||
- virt: name=alpha state=running
|
||||
|
||||
# /usr/bin/ansible invocations
|
||||
ansible host -m virt -a "name=alpha command=status"
|
||||
ansible host -m virt -a "name=alpha command=get_xml"
|
||||
ansible host -m virt -a "name=alpha command=create uri=lxc:///"
|
||||
|
||||
# a playbook example of defining and launching an LXC guest
|
||||
tasks:
|
||||
- name: define vm
|
||||
virt: name=foo
|
||||
command=define
|
||||
xml="{{ lookup('template', 'container-template.xml.j2') }}"
|
||||
uri=lxc:///
|
||||
- name: start vm
|
||||
virt: name=foo state=running uri=lxc:///
|
||||
'''
|
||||
|
||||
VIRT_FAILED = 1
|
||||
VIRT_SUCCESS = 0
|
||||
VIRT_UNAVAILABLE=2
|
||||
|
||||
import sys
|
||||
|
||||
try:
|
||||
import libvirt
|
||||
except ImportError:
|
||||
print "failed=True msg='libvirt python module unavailable'"
|
||||
sys.exit(1)
|
||||
|
||||
ALL_COMMANDS = []
|
||||
VM_COMMANDS = ['create','status', 'start', 'stop', 'pause', 'unpause',
|
||||
'shutdown', 'undefine', 'destroy', 'get_xml', 'autostart', 'define']
|
||||
HOST_COMMANDS = ['freemem', 'list_vms', 'info', 'nodeinfo', 'virttype']
|
||||
ALL_COMMANDS.extend(VM_COMMANDS)
|
||||
ALL_COMMANDS.extend(HOST_COMMANDS)
|
||||
|
||||
VIRT_STATE_NAME_MAP = {
|
||||
0 : "running",
|
||||
1 : "running",
|
||||
2 : "running",
|
||||
3 : "paused",
|
||||
4 : "shutdown",
|
||||
5 : "shutdown",
|
||||
6 : "crashed"
|
||||
}
|
||||
|
||||
class VMNotFound(Exception):
|
||||
pass
|
||||
|
||||
class LibvirtConnection(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
|
||||
self.module = module
|
||||
|
||||
cmd = "uname -r"
|
||||
rc, stdout, stderr = self.module.run_command(cmd)
|
||||
|
||||
if "xen" in stdout:
|
||||
conn = libvirt.open(None)
|
||||
else:
|
||||
conn = libvirt.open(uri)
|
||||
|
||||
if not conn:
|
||||
raise Exception("hypervisor connection failure")
|
||||
|
||||
self.conn = conn
|
||||
|
||||
def find_vm(self, vmid):
|
||||
"""
|
||||
Extra bonus feature: vmid = -1 returns a list of everything
|
||||
"""
|
||||
conn = self.conn
|
||||
|
||||
vms = []
|
||||
|
||||
# this block of code borrowed from virt-manager:
|
||||
# get working domain's name
|
||||
ids = conn.listDomainsID()
|
||||
for id in ids:
|
||||
vm = conn.lookupByID(id)
|
||||
vms.append(vm)
|
||||
# get defined domain
|
||||
names = conn.listDefinedDomains()
|
||||
for name in names:
|
||||
vm = conn.lookupByName(name)
|
||||
vms.append(vm)
|
||||
|
||||
if vmid == -1:
|
||||
return vms
|
||||
|
||||
for vm in vms:
|
||||
if vm.name() == vmid:
|
||||
return vm
|
||||
|
||||
raise VMNotFound("virtual machine %s not found" % vmid)
|
||||
|
||||
def shutdown(self, vmid):
|
||||
return self.find_vm(vmid).shutdown()
|
||||
|
||||
def pause(self, vmid):
|
||||
return self.suspend(self.conn,vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
return self.resume(self.conn,vmid)
|
||||
|
||||
def suspend(self, vmid):
|
||||
return self.find_vm(vmid).suspend()
|
||||
|
||||
def resume(self, vmid):
|
||||
return self.find_vm(vmid).resume()
|
||||
|
||||
def create(self, vmid):
|
||||
return self.find_vm(vmid).create()
|
||||
|
||||
def destroy(self, vmid):
|
||||
return self.find_vm(vmid).destroy()
|
||||
|
||||
def undefine(self, vmid):
|
||||
return self.find_vm(vmid).undefine()
|
||||
|
||||
def get_status2(self, vm):
|
||||
state = vm.info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state,"unknown")
|
||||
|
||||
def get_status(self, vmid):
|
||||
state = self.find_vm(vmid).info()[0]
|
||||
return VIRT_STATE_NAME_MAP.get(state,"unknown")
|
||||
|
||||
def nodeinfo(self):
|
||||
return self.conn.getInfo()
|
||||
|
||||
def get_type(self):
|
||||
return self.conn.getType()
|
||||
|
||||
def get_xml(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.XMLDesc(0)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxVcpus()
|
||||
|
||||
def get_maxMemory(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.maxMemory()
|
||||
|
||||
def getFreeMemory(self):
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def get_autostart(self, vmid):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.autostart()
|
||||
|
||||
def set_autostart(self, vmid, val):
|
||||
vm = self.conn.lookupByName(vmid)
|
||||
return vm.setAutostart(val)
|
||||
|
||||
def define_from_xml(self, xml):
|
||||
return self.conn.defineXML(xml)
|
||||
|
||||
|
||||
class Virt(object):
|
||||
|
||||
def __init__(self, uri, module):
|
||||
self.module = module
|
||||
self.uri = uri
|
||||
|
||||
def __get_conn(self):
|
||||
self.conn = LibvirtConnection(self.uri, self.module)
|
||||
return self.conn
|
||||
|
||||
def get_vm(self, vmid):
|
||||
self.__get_conn()
|
||||
return self.conn.find_vm(vmid)
|
||||
|
||||
def state(self):
|
||||
vms = self.list_vms()
|
||||
state = []
|
||||
for vm in vms:
|
||||
state_blurb = self.conn.get_status(vm)
|
||||
state.append("%s %s" % (vm,state_blurb))
|
||||
return state
|
||||
|
||||
def info(self):
|
||||
vms = self.list_vms()
|
||||
info = dict()
|
||||
for vm in vms:
|
||||
data = self.conn.find_vm(vm).info()
|
||||
# libvirt returns maxMem, memory, and cpuTime as long()'s, which
|
||||
# xmlrpclib tries to convert to regular int's during serialization.
|
||||
# This throws exceptions, so convert them to strings here and
|
||||
# assume the other end of the xmlrpc connection can figure things
|
||||
# out or doesn't care.
|
||||
info[vm] = {
|
||||
"state" : VIRT_STATE_NAME_MAP.get(data[0],"unknown"),
|
||||
"maxMem" : str(data[1]),
|
||||
"memory" : str(data[2]),
|
||||
"nrVirtCpu" : data[3],
|
||||
"cpuTime" : str(data[4]),
|
||||
}
|
||||
info[vm]["autostart"] = self.conn.get_autostart(vm)
|
||||
|
||||
return info
|
||||
|
||||
def nodeinfo(self):
|
||||
self.__get_conn()
|
||||
info = dict()
|
||||
data = self.conn.nodeinfo()
|
||||
info = {
|
||||
"cpumodel" : str(data[0]),
|
||||
"phymemory" : str(data[1]),
|
||||
"cpus" : str(data[2]),
|
||||
"cpumhz" : str(data[3]),
|
||||
"numanodes" : str(data[4]),
|
||||
"sockets" : str(data[5]),
|
||||
"cpucores" : str(data[6]),
|
||||
"cputhreads" : str(data[7])
|
||||
}
|
||||
return info
|
||||
|
||||
def list_vms(self, state=None):
|
||||
self.conn = self.__get_conn()
|
||||
vms = self.conn.find_vm(-1)
|
||||
results = []
|
||||
for x in vms:
|
||||
try:
|
||||
if state:
|
||||
vmstate = self.conn.get_status2(x)
|
||||
if vmstate == state:
|
||||
results.append(x.name())
|
||||
else:
|
||||
results.append(x.name())
|
||||
except:
|
||||
pass
|
||||
return results
|
||||
|
||||
def virttype(self):
|
||||
return self.__get_conn().get_type()
|
||||
|
||||
def autostart(self, vmid):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.set_autostart(vmid, True)
|
||||
|
||||
def freemem(self):
|
||||
self.conn = self.__get_conn()
|
||||
return self.conn.getFreeMemory()
|
||||
|
||||
def shutdown(self, vmid):
|
||||
""" Make the machine with the given vmid stop running. Whatever that takes. """
|
||||
self.__get_conn()
|
||||
self.conn.shutdown(vmid)
|
||||
return 0
|
||||
|
||||
|
||||
def pause(self, vmid):
|
||||
""" Pause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.suspend(vmid)
|
||||
|
||||
def unpause(self, vmid):
|
||||
""" Unpause the machine with the given vmid. """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.resume(vmid)
|
||||
|
||||
def create(self, vmid):
|
||||
""" Start the machine via the given vmid """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def start(self, vmid):
|
||||
""" Start the machine via the given id/name """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.create(vmid)
|
||||
|
||||
def destroy(self, vmid):
|
||||
""" Pull the virtual power from the virtual domain, giving it virtually no time to virtually shut down. """
|
||||
self.__get_conn()
|
||||
return self.conn.destroy(vmid)
|
||||
|
||||
def undefine(self, vmid):
|
||||
""" Stop a domain, and then wipe it from the face of the earth. (delete disk/config file) """
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.undefine(vmid)
|
||||
|
||||
def status(self, vmid):
|
||||
"""
|
||||
Return a state suitable for server consumption. Aka, codes.py values, not XM output.
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.get_status(vmid)
|
||||
|
||||
def get_xml(self, vmid):
|
||||
"""
|
||||
Receive a Vm id as input
|
||||
Return an xml describing vm config returned by a libvirt call
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_xml(vmid)
|
||||
|
||||
def get_maxVcpus(self, vmid):
|
||||
"""
|
||||
Gets the max number of VCPUs on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_maxVcpus(vmid)
|
||||
|
||||
def get_max_memory(self, vmid):
|
||||
"""
|
||||
Gets the max memory on a guest
|
||||
"""
|
||||
|
||||
self.__get_conn()
|
||||
return self.conn.get_MaxMemory(vmid)
|
||||
|
||||
def define(self, xml):
|
||||
"""
|
||||
Define a guest with the given xml
|
||||
"""
|
||||
self.__get_conn()
|
||||
return self.conn.define_from_xml(xml)
|
||||
|
||||
def core(module):
|
||||
|
||||
state = module.params.get('state', None)
|
||||
guest = module.params.get('name', None)
|
||||
command = module.params.get('command', None)
|
||||
uri = module.params.get('uri', None)
|
||||
xml = module.params.get('xml', None)
|
||||
|
||||
v = Virt(uri, module)
|
||||
res = {}
|
||||
|
||||
if state and command=='list_vms':
|
||||
res = v.list_vms(state=state)
|
||||
if type(res) != dict:
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if state:
|
||||
if not guest:
|
||||
module.fail_json(msg = "state change requires a guest specified")
|
||||
|
||||
res['changed'] = False
|
||||
if state == 'running':
|
||||
if v.status(guest) is 'paused':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.unpause(guest)
|
||||
elif v.status(guest) is not 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.start(guest)
|
||||
elif state == 'shutdown':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.shutdown(guest)
|
||||
elif state == 'destroyed':
|
||||
if v.status(guest) is not 'shutdown':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.destroy(guest)
|
||||
elif state == 'paused':
|
||||
if v.status(guest) is 'running':
|
||||
res['changed'] = True
|
||||
res['msg'] = v.pause(guest)
|
||||
else:
|
||||
module.fail_json(msg="unexpected state")
|
||||
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
if command:
|
||||
if command in VM_COMMANDS:
|
||||
if not guest:
|
||||
module.fail_json(msg = "%s requires 1 argument: guest" % command)
|
||||
if command == 'define':
|
||||
if not xml:
|
||||
module.fail_json(msg = "define requires xml argument")
|
||||
try:
|
||||
v.get_vm(guest)
|
||||
except VMNotFound:
|
||||
v.define(xml)
|
||||
res = {'changed': True, 'created': guest}
|
||||
return VIRT_SUCCESS, res
|
||||
res = getattr(v, command)(guest)
|
||||
if type(res) != dict:
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
elif hasattr(v, command):
|
||||
res = getattr(v, command)()
|
||||
if type(res) != dict:
|
||||
res = { command: res }
|
||||
return VIRT_SUCCESS, res
|
||||
|
||||
else:
|
||||
module.fail_json(msg="Command %s not recognized" % basecmd)
|
||||
|
||||
module.fail_json(msg="expected state or command parameter to be specified")
|
||||
|
||||
def main():
|
||||
|
||||
module = AnsibleModule(argument_spec=dict(
|
||||
name = dict(aliases=['guest']),
|
||||
state = dict(choices=['running', 'shutdown', 'destroyed', 'paused']),
|
||||
command = dict(choices=ALL_COMMANDS),
|
||||
uri = dict(default='qemu:///system'),
|
||||
xml = dict(),
|
||||
))
|
||||
|
||||
rc = VIRT_SUCCESS
|
||||
try:
|
||||
rc, result = core(module)
|
||||
except Exception, e:
|
||||
module.fail_json(msg=str(e))
|
||||
|
||||
if rc != 0: # something went wrong emit the msg
|
||||
module.fail_json(rc=rc, msg=result)
|
||||
else:
|
||||
module.exit_json(**result)
|
||||
|
||||
|
||||
# import module snippets
|
||||
from ansible.module_utils.basic import *
|
||||
main()
|
Loading…
Reference in a new issue