vmware_guest: use the datacenter as a context for clone specs (#26511)
* Slight refactor on vmware_guest to fix path searching and vcsim compatibility. * Clean up pep8 errors * Fix more pep8 issues * Add assertions * Remove debug failure * Update docsting for folder with examples * Revise _get_vm_prop based on suggestions * Implement folder walker to find a folder path * More enhancements for datacenter->datacenter cloning * Fix a few pep8 issues * Remove useless check for subclass of None * Fix a few suggestions * Move serialize_spec to a util function * Group exceptions together Fixes #25011
This commit is contained in:
parent
5b898a7732
commit
10fc4417f7
6 changed files with 364 additions and 37 deletions
|
@ -213,6 +213,17 @@ def find_host_portgroup_by_name(host, portgroup_name):
|
|||
return None
|
||||
|
||||
|
||||
def _get_vm_prop(vm, attributes):
|
||||
"""Safely get a property or return None"""
|
||||
result = vm
|
||||
for attribute in attributes:
|
||||
try:
|
||||
result = getattr(result, attribute)
|
||||
except (AttributeError, IndexError):
|
||||
return None
|
||||
return result
|
||||
|
||||
|
||||
def gather_vm_facts(content, vm):
|
||||
""" Gather facts from vim.VirtualMachine object. """
|
||||
facts = {
|
||||
|
@ -225,8 +236,8 @@ def gather_vm_facts(content, vm):
|
|||
'hw_processor_count': vm.config.hardware.numCPU,
|
||||
'hw_memtotal_mb': vm.config.hardware.memoryMB,
|
||||
'hw_interfaces': [],
|
||||
'guest_tools_status': vm.guest.toolsRunningStatus,
|
||||
'guest_tools_version': vm.guest.toolsVersion,
|
||||
'guest_tools_status': _get_vm_prop(vm, ('guest', 'toolsRunningStatus')),
|
||||
'guest_tools_version': _get_vm_prop(vm, ('guest', 'toolsVersion')),
|
||||
'ipv4': None,
|
||||
'ipv6': None,
|
||||
'annotation': vm.config.annotation,
|
||||
|
@ -249,8 +260,10 @@ def gather_vm_facts(content, vm):
|
|||
facts['customvalues'][kn] = value_obj.value
|
||||
|
||||
net_dict = {}
|
||||
for device in vm.guest.net:
|
||||
net_dict[device.macAddress] = list(device.ipAddress)
|
||||
vmnet = _get_vm_prop(vm, ('guest', 'net'))
|
||||
if vmnet:
|
||||
for device in vmnet:
|
||||
net_dict[device.macAddress] = list(device.ipAddress)
|
||||
|
||||
for k, v in iteritems(net_dict):
|
||||
for ipaddress in v:
|
||||
|
@ -317,6 +330,9 @@ def get_current_snap_obj(snapshots, snapob):
|
|||
|
||||
def list_snapshots(vm):
|
||||
result = {}
|
||||
snapshot = _get_vm_prop(vm, ('vm', 'snapshot'))
|
||||
if not snapshot:
|
||||
return result
|
||||
if vm.snapshot is None:
|
||||
return result
|
||||
|
||||
|
@ -360,7 +376,7 @@ def connect_to_api(module, disconnect_atexit=True):
|
|||
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
|
||||
else:
|
||||
module.fail_json(msg="Unable to connect to vCenter or ESXi API on TCP/443.", apierror=str(connection_error))
|
||||
except Exception as e:
|
||||
except:
|
||||
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
|
||||
context.verify_mode = ssl.CERT_NONE
|
||||
service_instance = connect.SmartConnect(host=hostname, user=username, pwd=password, sslContext=context)
|
||||
|
@ -533,3 +549,58 @@ def run_command_in_guest(content, vm, username, password, program_path, program_
|
|||
result['failed'] = True
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def serialize_spec(clonespec):
|
||||
"""Serialize a clonespec or a relocation spec"""
|
||||
data = {}
|
||||
attrs = dir(clonespec)
|
||||
attrs = [x for x in attrs if not x.startswith('_')]
|
||||
for x in attrs:
|
||||
xo = getattr(clonespec, x)
|
||||
if callable(xo):
|
||||
continue
|
||||
xt = type(xo)
|
||||
if xo is None:
|
||||
data[x] = None
|
||||
elif issubclass(xt, list):
|
||||
data[x] = []
|
||||
for xe in xo:
|
||||
data[x].append(serialize_spec(xe))
|
||||
elif issubclass(xt, str):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, unicode):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, int):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, float):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, long):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, bool):
|
||||
data[x] = xo
|
||||
elif issubclass(xt, dict):
|
||||
data[x] = {}
|
||||
for k, v in xo.items():
|
||||
data[x][k] = serialize_spec(v)
|
||||
elif isinstance(xo, vim.vm.ConfigSpec):
|
||||
data[x] = serialize_spec(xo)
|
||||
elif isinstance(xo, vim.vm.RelocateSpec):
|
||||
data[x] = serialize_spec(xo)
|
||||
elif isinstance(xo, vim.vm.device.VirtualDisk):
|
||||
data[x] = serialize_spec(xo)
|
||||
elif isinstance(xo, vim.Description):
|
||||
data[x] = {
|
||||
'dynamicProperty': serialize_spec(xo.dynamicProperty),
|
||||
'dynamicType': serialize_spec(xo.dynamicType),
|
||||
'label': serialize_spec(xo.label),
|
||||
'summary': serialize_spec(xo.summary),
|
||||
}
|
||||
elif hasattr(xo, 'name'):
|
||||
data[x] = str(xo) + ':' + xo.name
|
||||
elif isinstance(xo, vim.vm.ProfileSpec):
|
||||
pass
|
||||
else:
|
||||
data[x] = str(xt)
|
||||
|
||||
return data
|
||||
|
|
|
@ -74,8 +74,21 @@ options:
|
|||
version_added: '2.3'
|
||||
folder:
|
||||
description:
|
||||
- Destination folder, absolute path to find an existing guest or create the new guest.
|
||||
default: /
|
||||
- Destination folder, absolute or relative path to find an existing guest or create the new guest.
|
||||
- The folder should include the datacenter. ESX's datacenter is ha-datacenter
|
||||
- 'Examples:'
|
||||
- ' folder: /ha-datacenter/vm'
|
||||
- ' folder: ha-datacenter/vm'
|
||||
- ' folder: /datacenter1/vm'
|
||||
- ' folder: datacenter1/vm'
|
||||
- ' folder: /datacenter1/vm/folder1'
|
||||
- ' folder: datacenter1/vm/folder1'
|
||||
- ' folder: /folder1/datacenter1/vm'
|
||||
- ' folder: folder1/datacenter1/vm'
|
||||
- ' folder: /folder1/datacenter1/vm/folder2'
|
||||
- ' folder: vm/folder2'
|
||||
- ' fodler: folder2'
|
||||
default: /vm
|
||||
hardware:
|
||||
description:
|
||||
- Manage some VM hardware attributes.
|
||||
|
@ -303,9 +316,8 @@ import time
|
|||
|
||||
from ansible.module_utils.basic import AnsibleModule
|
||||
from ansible.module_utils.pycompat24 import get_exception
|
||||
from ansible.module_utils.six import iteritems
|
||||
from ansible.module_utils.urls import fetch_url
|
||||
from ansible.module_utils.vmware import connect_to_api, find_obj, gather_vm_facts, get_all_objs
|
||||
from ansible.module_utils.vmware import serialize_spec
|
||||
|
||||
try:
|
||||
import json
|
||||
|
@ -424,30 +436,77 @@ class PyVmomiDeviceHelper(object):
|
|||
|
||||
class PyVmomiCache(object):
|
||||
""" This class caches references to objects which are requested multiples times but not modified """
|
||||
def __init__(self, content):
|
||||
def __init__(self, content, dc_name=None):
|
||||
self.content = content
|
||||
self.dc_name = dc_name
|
||||
self.networks = {}
|
||||
self.clusters = {}
|
||||
self.esx_hosts = {}
|
||||
self.parent_datacenters = {}
|
||||
|
||||
def find_obj(self, content, types, name, confine_to_datacenter=True):
|
||||
""" Wrapper around find_obj to set datacenter context """
|
||||
result = find_obj(content, types, name)
|
||||
if result and confine_to_datacenter:
|
||||
if self.get_parent_datacenter(result).name != self.dc_name:
|
||||
objects = self.get_all_objs(content, types, confine_to_datacenter=True)
|
||||
for obj in objects:
|
||||
if obj.name == name:
|
||||
return obj
|
||||
|
||||
return result
|
||||
|
||||
def get_all_objs(self, content, types, confine_to_datacenter=True):
|
||||
""" Wrapper around get_all_objs to set datacenter context """
|
||||
objects = get_all_objs(content, [vim.Datastore])
|
||||
if confine_to_datacenter:
|
||||
if hasattr(objects, 'items'):
|
||||
# resource pools come back as a dictionary
|
||||
for k, v in objects.items():
|
||||
parent_dc = self.get_parent_datacenter(k)
|
||||
if parent_dc.name != self.dc_name:
|
||||
objects.pop(k, None)
|
||||
else:
|
||||
# everything else should be a list
|
||||
objects = [x for x in objects if self.get_parent_datacenter(x).name == self.dc_name]
|
||||
|
||||
return objects
|
||||
|
||||
def get_network(self, network):
|
||||
if network not in self.networks:
|
||||
self.networks[network] = find_obj(self.content, [vim.Network], network)
|
||||
self.networks[network] = self.find_obj(self.content, [vim.Network], network)
|
||||
|
||||
return self.networks[network]
|
||||
|
||||
def get_cluster(self, cluster):
|
||||
if cluster not in self.clusters:
|
||||
self.clusters[cluster] = find_obj(self.content, [vim.ClusterComputeResource], cluster)
|
||||
self.clusters[cluster] = self.find_obj(self.content, [vim.ClusterComputeResource], cluster)
|
||||
|
||||
return self.clusters[cluster]
|
||||
|
||||
def get_esx_host(self, host):
|
||||
if host not in self.esx_hosts:
|
||||
self.esx_hosts[host] = find_obj(self.content, [vim.HostSystem], host)
|
||||
self.esx_hosts[host] = self.find_obj(self.content, [vim.HostSystem], host)
|
||||
|
||||
return self.esx_hosts[host]
|
||||
|
||||
def get_parent_datacenter(self, obj):
|
||||
""" Walk the parent tree to find the objects datacenter """
|
||||
if isinstance(obj, vim.Datacenter):
|
||||
return obj
|
||||
if obj in self.parent_datacenters:
|
||||
return self.parent_datacenters[obj]
|
||||
datacenter = None
|
||||
while True:
|
||||
if not hasattr(obj, 'parent'):
|
||||
break
|
||||
obj = obj.parent
|
||||
if isinstance(obj, vim.Datacenter):
|
||||
datacenter = obj
|
||||
break
|
||||
self.parent_datacenters[obj] = datacenter
|
||||
return datacenter
|
||||
|
||||
|
||||
class PyVmomiHelper(object):
|
||||
def __init__(self, module):
|
||||
|
@ -463,7 +522,7 @@ class PyVmomiHelper(object):
|
|||
self.change_detected = False
|
||||
self.customspec = None
|
||||
self.current_vm_obj = None
|
||||
self.cache = PyVmomiCache(self.content)
|
||||
self.cache = PyVmomiCache(self.content, dc_name=self.params['datacenter'])
|
||||
|
||||
def getvm(self, name=None, uuid=None, folder=None):
|
||||
|
||||
|
@ -476,19 +535,21 @@ class PyVmomiHelper(object):
|
|||
if uuid:
|
||||
vm = self.content.searchIndex.FindByUuid(uuid=uuid, vmSearch=True)
|
||||
elif folder:
|
||||
# Build the absolute folder path to pass into the search method
|
||||
if not self.params['folder'].startswith('/'):
|
||||
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
|
||||
searchpath = '%(datacenter)s%(folder)s' % self.params
|
||||
# searchpaths do not need to be absolute
|
||||
searchpath = self.params['folder']
|
||||
|
||||
# get all objects for this path ...
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath(searchpath)
|
||||
|
||||
if f_obj:
|
||||
if isinstance(f_obj, vim.Datacenter):
|
||||
f_obj = f_obj.vmFolder
|
||||
|
||||
for c_obj in f_obj.childEntity:
|
||||
|
||||
if not isinstance(c_obj, vim.VirtualMachine):
|
||||
continue
|
||||
|
||||
if c_obj.name == name:
|
||||
vm = c_obj
|
||||
if self.params['name_match'] == 'first':
|
||||
|
@ -650,7 +711,7 @@ class PyVmomiHelper(object):
|
|||
self.module.fail_json(msg="Network '%(name)s' does not exists" % network)
|
||||
|
||||
elif 'vlan' in network:
|
||||
dvps = get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
|
||||
dvps = self.cache.get_all_objs(self.content, [vim.dvs.DistributedVirtualPortgroup])
|
||||
for dvp in dvps:
|
||||
if hasattr(dvp.config.defaultPortConfig, 'vlan') and dvp.config.defaultPortConfig.vlan.vlanId == network['vlan']:
|
||||
network['name'] = dvp.config.name
|
||||
|
@ -994,13 +1055,32 @@ class PyVmomiHelper(object):
|
|||
|
||||
return hostsystem
|
||||
|
||||
def autoselect_datastore(self):
|
||||
datastore = None
|
||||
datastore_name = None
|
||||
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
|
||||
|
||||
if datastores is None or len(datastores) == 0:
|
||||
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
|
||||
|
||||
datastore_freespace = 0
|
||||
for ds in datastores:
|
||||
if ds.summary.freeSpace > datastore_freespace:
|
||||
datastore = ds
|
||||
datastore_name = datastore.name
|
||||
datastore_freespace = ds.summary.freeSpace
|
||||
|
||||
return datastore
|
||||
|
||||
def select_datastore(self, vm_obj=None):
|
||||
datastore = None
|
||||
datastore_name = None
|
||||
|
||||
if len(self.params['disk']) != 0:
|
||||
# TODO: really use the datastore for newly created disks
|
||||
if 'autoselect_datastore' in self.params['disk'][0] and self.params['disk'][0]['autoselect_datastore']:
|
||||
datastores = get_all_objs(self.content, [vim.Datastore])
|
||||
datastores = self.cache.get_all_objs(self.content, [vim.Datastore])
|
||||
datastores = [x for x in datastores if self.cache.get_parent_datacenter(x).name == self.params['datacenter']]
|
||||
if datastores is None or len(datastores) == 0:
|
||||
self.module.fail_json(msg="Unable to find a datastore list when autoselecting")
|
||||
|
||||
|
@ -1019,14 +1099,23 @@ class PyVmomiHelper(object):
|
|||
|
||||
elif 'datastore' in self.params['disk'][0]:
|
||||
datastore_name = self.params['disk'][0]['datastore']
|
||||
datastore = find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
datastore = self.cache.find_obj(self.content, [vim.Datastore], datastore_name)
|
||||
else:
|
||||
self.module.fail_json(msg="Either datastore or autoselect_datastore should be provided to select datastore")
|
||||
|
||||
if not datastore and self.params['template']:
|
||||
# use the template's existing DS
|
||||
disks = [x for x in vm_obj.config.hardware.device if isinstance(x, vim.vm.device.VirtualDisk)]
|
||||
datastore = disks[0].backing.datastore
|
||||
datastore_name = datastore.name
|
||||
if disks:
|
||||
datastore = disks[0].backing.datastore
|
||||
datastore_name = datastore.name
|
||||
# validation
|
||||
if datastore:
|
||||
dc = self.cache.get_parent_datacenter(datastore)
|
||||
if dc.name != self.params['datacenter']:
|
||||
datastore = self.autoselect_datastore()
|
||||
datastore_name = datastore.name
|
||||
|
||||
if not datastore:
|
||||
self.module.fail_json(msg="Failed to find a matching datastore")
|
||||
|
||||
|
@ -1045,13 +1134,13 @@ class PyVmomiHelper(object):
|
|||
return False
|
||||
|
||||
def select_resource_pool_by_name(self, resource_pool_name):
|
||||
resource_pool = find_obj(self.content, [vim.ResourcePool], resource_pool_name)
|
||||
resource_pool = self.cache.find_obj(self.content, [vim.ResourcePool], resource_pool_name)
|
||||
if resource_pool is None:
|
||||
self.module.fail_json(msg='Could not find resource_pool "%s"' % resource_pool_name)
|
||||
return resource_pool
|
||||
|
||||
def select_resource_pool_by_host(self, host):
|
||||
resource_pools = get_all_objs(self.content, [vim.ResourcePool])
|
||||
resource_pools = self.cache.get_all_objs(self.content, [vim.ResourcePool])
|
||||
for rp in resource_pools.items():
|
||||
if not rp[0]:
|
||||
continue
|
||||
|
@ -1082,6 +1171,39 @@ class PyVmomiHelper(object):
|
|||
self.module.fail_json(msg="hardware.scsi attribute should be 'paravirtual' or 'lsilogic'")
|
||||
return disk_controller_type
|
||||
|
||||
def find_folder(self, searchpath):
|
||||
""" Walk inventory objects one position of the searchpath at a time """
|
||||
|
||||
# split the searchpath so we can iterate through it
|
||||
paths = [x.replace('/', '') for x in searchpath.split('/')]
|
||||
paths_total = len(paths) - 1
|
||||
position = 0
|
||||
|
||||
# recursive walk while looking for next element in searchpath
|
||||
root = self.content.rootFolder
|
||||
while root and position <= paths_total:
|
||||
change = False
|
||||
if hasattr(root, 'childEntity'):
|
||||
for child in root.childEntity:
|
||||
if child.name == paths[position]:
|
||||
root = child
|
||||
position += 1
|
||||
change = True
|
||||
break
|
||||
elif isinstance(root, vim.Datacenter):
|
||||
if hasattr(root, 'vmFolder'):
|
||||
if root.vmFolder.name == paths[position]:
|
||||
root = root.vmFolder
|
||||
position += 1
|
||||
change = True
|
||||
else:
|
||||
root = None
|
||||
|
||||
if not change:
|
||||
root = None
|
||||
|
||||
return root
|
||||
|
||||
def deploy_vm(self):
|
||||
# https://github.com/vmware/pyvmomi-community-samples/blob/master/samples/clone_vm.py
|
||||
# https://www.vmware.com/support/developer/vc-sdk/visdk25pubs/ReferenceGuide/vim.vm.CloneSpec.html
|
||||
|
@ -1094,15 +1216,21 @@ class PyVmomiHelper(object):
|
|||
# - static IPs
|
||||
|
||||
# datacenters = get_all_objs(self.content, [vim.Datacenter])
|
||||
datacenter = find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
|
||||
datacenter = self.cache.find_obj(self.content, [vim.Datacenter], self.params['datacenter'])
|
||||
if datacenter is None:
|
||||
self.module.fail_json(msg='No datacenter named %(datacenter)s was found' % self.params)
|
||||
|
||||
destfolder = None
|
||||
if not self.params['folder'].startswith('/'):
|
||||
self.module.fail_json(msg="Folder %(folder)s needs to be an absolute path, starting with '/'." % self.params)
|
||||
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath('/%(datacenter)s%(folder)s' % self.params)
|
||||
# make an attempt with findinventorybypath, although this works poorly
|
||||
# when datacenters are nested under folders
|
||||
f_obj = self.content.searchIndex.FindByInventoryPath('%(folder)s' % self.params)
|
||||
|
||||
# retry by walking down the object tree
|
||||
if f_obj is None:
|
||||
f_obj = self.find_folder(self.params['folder'])
|
||||
|
||||
# abort if no strategy was successful
|
||||
if f_obj is None:
|
||||
self.module.fail_json(msg='No folder matched the path: %(folder)s' % self.params)
|
||||
destfolder = f_obj
|
||||
|
@ -1115,8 +1243,13 @@ class PyVmomiHelper(object):
|
|||
else:
|
||||
vm_obj = None
|
||||
|
||||
if self.params['resource_pool']:
|
||||
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
|
||||
# need a resource pool if cloning from template
|
||||
if self.params['resource_pool'] or self.params['template']:
|
||||
if self.params['esxi_hostname']:
|
||||
host = self.select_host()
|
||||
resource_pool = self.select_resource_pool_by_host(host)
|
||||
else:
|
||||
resource_pool = self.select_resource_pool_by_name(self.params['resource_pool'])
|
||||
|
||||
if resource_pool is None:
|
||||
self.module.fail_json(msg='Unable to find resource pool "%(resource_pool)s"' % self.params)
|
||||
|
@ -1143,6 +1276,8 @@ class PyVmomiHelper(object):
|
|||
if len(self.params['customization']) > 0 or network_changes is True:
|
||||
self.customize_vm(vm_obj=vm_obj)
|
||||
|
||||
clonespec = None
|
||||
clone_method = None
|
||||
try:
|
||||
if self.params['template']:
|
||||
# create the relocation spec
|
||||
|
@ -1153,8 +1288,9 @@ class PyVmomiHelper(object):
|
|||
relospec.host = self.select_host()
|
||||
relospec.datastore = datastore
|
||||
|
||||
if self.params['resource_pool']:
|
||||
relospec.pool = resource_pool
|
||||
# https://www.vmware.com/support/developer/vc-sdk/visdk41pubs/ApiReference/vim.vm.RelocateSpec.html
|
||||
# > pool: For a clone operation from a template to a virtual machine, this argument is required.
|
||||
relospec.pool = resource_pool
|
||||
|
||||
if self.params['snapshot_src'] is not None and self.params['linked_clone']:
|
||||
relospec.diskMoveType = vim.vm.RelocateSpec.DiskMoveOptions.createNewChildDiskBacking
|
||||
|
@ -1171,6 +1307,7 @@ class PyVmomiHelper(object):
|
|||
clonespec.snapshot = snapshot[0].snapshot
|
||||
|
||||
clonespec.config = self.configspec
|
||||
clone_method = 'Clone'
|
||||
task = vm_obj.Clone(folder=destfolder, name=self.params['name'], spec=clonespec)
|
||||
self.change_detected = True
|
||||
else:
|
||||
|
@ -1181,6 +1318,7 @@ class PyVmomiHelper(object):
|
|||
suspendDirectory=None,
|
||||
vmPathName="[" + datastore_name + "] " + self.params["name"])
|
||||
|
||||
clone_method = 'CreateVM_Task'
|
||||
task = destfolder.CreateVM_Task(config=self.configspec, pool=resource_pool)
|
||||
self.change_detected = True
|
||||
self.wait_for_task(task)
|
||||
|
@ -1191,7 +1329,20 @@ class PyVmomiHelper(object):
|
|||
if task.info.state == 'error':
|
||||
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2021361
|
||||
# https://kb.vmware.com/selfservice/microsites/search.do?language=en_US&cmd=displayKC&externalId=2173
|
||||
return {'changed': self.change_detected, 'failed': True, 'msg': task.info.error.msg}
|
||||
|
||||
# provide these to the user for debugging
|
||||
clonespec_json = serialize_spec(clonespec)
|
||||
configspec_json = serialize_spec(self.configspec)
|
||||
kwargs = {
|
||||
'changed': self.change_detected,
|
||||
'failed': True,
|
||||
'msg': task.info.error.msg,
|
||||
'clonespec': clonespec_json,
|
||||
'configspec': configspec_json,
|
||||
'clone_method': clone_method
|
||||
}
|
||||
|
||||
return kwargs
|
||||
else:
|
||||
# set annotation
|
||||
vm = task.info.result
|
||||
|
@ -1340,9 +1491,8 @@ def main():
|
|||
|
||||
result = {'failed': False, 'changed': False}
|
||||
|
||||
# Prepend /vm if it was missing from the folder path, also strip trailing slashes
|
||||
if not module.params['folder'].startswith('/vm') and module.params['folder'].startswith('/'):
|
||||
module.params['folder'] = '/vm%(folder)s' % module.params
|
||||
# FindByInventoryPath() does not require an absolute path
|
||||
# so we should leave the input folder path unmodified
|
||||
module.params['folder'] = module.params['folder'].rstrip('/')
|
||||
|
||||
pyv = PyVmomiHelper(module)
|
||||
|
|
3
test/integration/targets/vmware_guest/aliases
Normal file
3
test/integration/targets/vmware_guest/aliases
Normal file
|
@ -0,0 +1,3 @@
|
|||
posix/ci/cloud/vcenter
|
||||
cloud/vcenter
|
||||
skip/python3
|
12
test/integration/targets/vmware_guest/tasks/main.yml
Normal file
12
test/integration/targets/vmware_guest/tasks/main.yml
Normal file
|
@ -0,0 +1,12 @@
|
|||
#- name: make sure pyvmomi is installed
|
||||
# pip:
|
||||
# name: pyvmomi
|
||||
# state: latest
|
||||
|
||||
- name: store the vcenter container ip
|
||||
set_fact:
|
||||
vcsim: "{{ lookup('env', 'vcenter_host') }}"
|
||||
- debug: var=vcsim
|
||||
|
||||
- include: poweroff_d1_c1_f0.yml
|
||||
- include: poweroff_d1_c1_f1.yml
|
|
@ -0,0 +1,40 @@
|
|||
#- name: make sure pyvmomi is installed
|
||||
# pip:
|
||||
# name: pyvmomi
|
||||
# state: latest
|
||||
|
||||
- name: kill vcsim
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/killall' }}"
|
||||
- name: start vcsim with no folders
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/spawn?datacenter=1&cluster=1&folder=0' }}"
|
||||
register: vcsim_instance
|
||||
|
||||
- name: get a list of VMS from vcsim
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/govc_find?filter=VM' }}"
|
||||
register: vmlist
|
||||
|
||||
- debug: var=vcsim_instance
|
||||
- debug: var=vmlist
|
||||
|
||||
- name: set state to poweroff on all VMs
|
||||
vmware_guest:
|
||||
validate_certs: False
|
||||
hostname: "{{ vcsim }}"
|
||||
username: "{{ vcsim_instance['json']['username'] }}"
|
||||
password: "{{ vcsim_instance['json']['password'] }}"
|
||||
name: "{{ item|basename }}"
|
||||
datacenter: "{{ (item|basename).split('_')[0] }}"
|
||||
state: poweredoff
|
||||
folder: "{{ item|dirname }}"
|
||||
with_items: "{{ vmlist['json'] }}"
|
||||
register: poweroff_d1_c1_f0
|
||||
|
||||
- debug: var=poweroff_d1_c1_f0
|
||||
|
||||
- name: make sure no changes were made
|
||||
assert:
|
||||
that:
|
||||
- "poweroff_d1_c1_f0.results|map(attribute='changed')|unique|list == [False]"
|
|
@ -0,0 +1,51 @@
|
|||
#- name: make sure pyvmomi is installed
|
||||
# pip:
|
||||
# name: pyvmomi
|
||||
# state: latest
|
||||
|
||||
- name: store the vcenter container ip
|
||||
set_fact:
|
||||
vcsim: "{{ lookup('env', 'vcenter_host') }}"
|
||||
- debug: var=vcsim
|
||||
|
||||
- name: kill vcsim
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/killall' }}"
|
||||
- name: start vcsim with folders
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/spawn?datacenter=1&cluster=1&folder=1' }}"
|
||||
register: vcsim_instance
|
||||
|
||||
- name: get a list of VMS from vcsim
|
||||
uri:
|
||||
url: "{{ 'http://' + vcsim + ':5000/govc_find?filter=VM' }}"
|
||||
register: vmlist
|
||||
|
||||
- debug: var=vcsim_instance
|
||||
- debug: var=vmlist
|
||||
|
||||
# https://github.com/ansible/ansible/issues/25011
|
||||
# Sending "-folders 1" to vcsim nests the datacenter under
|
||||
# the folder so that the path prefix is no longer /vm
|
||||
#
|
||||
# /F0/DC0/vm/F0/DC0_H0_VM0
|
||||
|
||||
- name: set state to poweredoff on all VMs
|
||||
vmware_guest:
|
||||
validate_certs: False
|
||||
hostname: "{{ vcsim }}"
|
||||
username: "{{ vcsim_instance['json']['username'] }}"
|
||||
password: "{{ vcsim_instance['json']['password'] }}"
|
||||
name: "{{ item|basename }}"
|
||||
datacenter: "{{ (item|basename).split('_')[0] }}"
|
||||
state: poweredoff
|
||||
folder: "{{ item|dirname }}"
|
||||
with_items: "{{ vmlist['json'] }}"
|
||||
register: poweroff_d1_c1_f1
|
||||
|
||||
- debug: var=poweroff_d1_c1_f1
|
||||
|
||||
- name: make sure no changes were made
|
||||
assert:
|
||||
that:
|
||||
- "poweroff_d1_c1_f1.results|map(attribute='changed')|unique|list == [False]"
|
Loading…
Reference in a new issue