inventory_plugins: Add kubevirt inventory plugin

Signed-off-by: Ondra Machacek <omachace@redhat.com>
This commit is contained in:
Ondra Machacek 2018-10-30 13:23:23 +01:00 committed by Gonéri Le Bouder
parent b41c6fcdd4
commit af528bed80
6 changed files with 614 additions and 0 deletions

View file

@ -0,0 +1,263 @@
# Copyright (c) 2018 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
name: kubevirt
plugin_type: inventory
author:
- KubeVirt Team (@kubevirt)
version_added: "2.8"
short_description: KubeVirt inventory source
extends_documentation_fragment:
- inventory_cache
- constructed
description:
- Fetch running VirtualMachines for one or more namespaces.
- Groups by namespace, namespace_vms and labels.
- Uses kubevirt.(yml|yaml) YAML configuration file to set parameter values.
options:
plugin:
description: token that ensures this is a source file for the 'kubevirt' plugin.
required: True
choices: ['kubevirt']
type: str
host_format:
description:
- Specify the format of the host in the inventory group.
default: "{namespace}-{name}-{uid}"
connections:
type: list
description:
- Optional list of cluster connection settings. If no connections are provided, the default
I(~/.kube/config) and active context will be used, and objects will be returned for all namespaces
the active user is authorized to access.
suboptions:
name:
description:
- Optional name to assign to the cluster. If not provided, a name is constructed from the server
and port.
type: str
kubeconfig:
description:
- Path to an existing Kubernetes config file. If not provided, and no other connection
options are provided, the OpenShift client will attempt to load the default
configuration file from I(~/.kube/config.json). Can also be specified via K8S_AUTH_KUBECONFIG
environment variable.
type: str
context:
description:
- The name of a context found in the config file. Can also be specified via K8S_AUTH_CONTEXT environment
variable.
type: str
host:
description:
- Provide a URL for accessing the API. Can also be specified via K8S_AUTH_HOST environment variable.
type: str
api_key:
description:
- Token used to authenticate with the API. Can also be specified via K8S_AUTH_API_KEY environment
variable.
type: str
username:
description:
- Provide a username for authenticating with the API. Can also be specified via K8S_AUTH_USERNAME
environment variable.
type: str
password:
description:
- Provide a password for authenticating with the API. Can also be specified via K8S_AUTH_PASSWORD
environment variable.
type: str
cert_file:
description:
- Path to a certificate used to authenticate with the API. Can also be specified via K8S_AUTH_CERT_FILE
environment variable.
type: str
key_file:
description:
- Path to a key file used to authenticate with the API. Can also be specified via K8S_AUTH_HOST
environment variable.
type: str
ssl_ca_cert:
description:
- Path to a CA certificate used to authenticate with the API. Can also be specified via
K8S_AUTH_SSL_CA_CERT environment variable.
type: str
verify_ssl:
description:
- "Whether or not to verify the API server's SSL certificates. Can also be specified via
K8S_AUTH_VERIFY_SSL environment variable."
type: bool
namespaces:
description:
- List of namespaces. If not specified, will fetch all virtual machines for all namespaces user is authorized
to access.
type: list
network_name:
description:
- In case of multiple network attached to virtual machine, define which interface should be returned as primary IP
address.
type: str
api_version:
description:
- "Specify the KubeVirt API version."
type: str
annotation_variable:
description:
- "Specify the name of the annotation which provides data, which should be used as inventory host variables."
- "Note, that the value in ansible annotations should be json."
type: str
default: 'ansible'
requirements:
- "openshift >= 0.6"
- "PyYAML >= 3.11"
'''
EXAMPLES = '''
# File must be named kubevirt.yaml or kubevirt.yml
# Authenticate with token, and return all virtual machines for all namespaces
plugin: kubevirt
connections:
- host: https://kubevirt.io
token: xxxxxxxxxxxxxxxx
ssl_verify: false
# Use default config (~/.kube/config) file and active context, and return vms with interfaces
# connected to network myovsnetwork and from namespace vms
plugin: kubevirt
connections:
- namespaces:
- vms
network_name: myovsnetwork
'''
import json
from ansible.plugins.inventory.k8s import K8sInventoryException, InventoryModule as K8sInventoryModule, format_dynamic_api_exc
try:
from openshift.dynamic.exceptions import DynamicApiError
except ImportError:
pass
API_VERSION = 'kubevirt.io/v1alpha3'
class InventoryModule(K8sInventoryModule):
NAME = 'kubevirt'
def setup(self, config_data, cache, cache_key):
self.config_data = config_data
super(InventoryModule, self).setup(config_data, cache, cache_key)
def fetch_objects(self, connections):
client = self.get_api_client()
vm_format = self.config_data.get('host_format', '{namespace}-{name}-{uid}')
if connections:
for connection in connections:
client = self.get_api_client(**connection)
name = connection.get('name', self.get_default_host_name(client.configuration.host))
if connection.get('namespaces'):
namespaces = connection['namespaces']
else:
namespaces = self.get_available_namespaces(client)
interface_name = connection.get('network_name')
api_version = connection.get('api_version', API_VERSION)
annotation_variable = connection.get('annotation_variable', 'ansible')
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, interface_name, api_version, annotation_variable)
else:
name = self.get_default_host_name(client.configuration.host)
namespaces = self.get_available_namespaces(client)
for namespace in namespaces:
self.get_vms_for_namespace(client, name, namespace, vm_format, None, api_version, annotation_variable)
def get_vms_for_namespace(self, client, name, namespace, name_format, interface_name=None, api_version=None, annotation_variable=None):
v1_vm = client.resources.get(api_version=api_version, kind='VirtualMachineInstance')
try:
obj = v1_vm.get(namespace=namespace)
except DynamicApiError as exc:
self.display.debug(exc)
raise K8sInventoryException('Error fetching Virtual Machines list: %s' % format_dynamic_api_exc(exc))
namespace_group = 'namespace_{0}'.format(namespace)
namespace_vms_group = '{0}_vms'.format(namespace_group)
name = self._sanitize_group_name(name)
namespace_group = self._sanitize_group_name(namespace_group)
namespace_vms_group = self._sanitize_group_name(namespace_vms_group)
self.inventory.add_group(name)
self.inventory.add_group(namespace_group)
self.inventory.add_child(name, namespace_group)
self.inventory.add_group(namespace_vms_group)
self.inventory.add_child(namespace_group, namespace_vms_group)
for vm in obj.items:
if not (vm.status and vm.status.interfaces):
continue
# Find interface by its name:
if interface_name is None:
interface = vm.status.interfaces[0]
else:
interface = next(
(i for i in vm.status.interfaces if i.name == interface_name),
None
)
# If interface is not found or IP address is not reported skip this VM:
if interface is None or interface.ipAddress is None:
continue
vm_name = name_format.format(namespace=vm.metadata.namespace, name=vm.metadata.name, uid=vm.metadata.uid)
vm_ip = interface.ipAddress
vm_annotations = {} if not vm.metadata.annotations else dict(vm.metadata.annotations)
self.inventory.add_host(vm_name)
if vm.metadata.labels:
# create a group for each label_value
for key, value in vm.metadata.labels:
group_name = 'label_{0}_{1}'.format(key, value)
group_name = self._sanitize_group_name(group_name)
self.inventory.add_group(group_name)
self.inventory.add_child(group_name, vm_name)
vm_labels = dict(vm.metadata.labels)
else:
vm_labels = {}
self.inventory.add_child(namespace_vms_group, vm_name)
# add hostvars
self.inventory.set_variable(vm_name, 'ansible_host', vm_ip)
self.inventory.set_variable(vm_name, 'labels', vm_labels)
self.inventory.set_variable(vm_name, 'annotations', vm_annotations)
self.inventory.set_variable(vm_name, 'object_type', 'vm')
self.inventory.set_variable(vm_name, 'resource_version', vm.metadata.resourceVersion)
self.inventory.set_variable(vm_name, 'uid', vm.metadata.uid)
# Add all variables which are listed in 'ansible' annotation:
annotations_data = json.loads(vm_annotations.get(annotation_variable, "{}"))
for k, v in annotations_data.items():
self.inventory.set_variable(vm_name, k, v)
def verify_file(self, path):
if super(InventoryModule, self).verify_file(path):
if path.endswith(('kubevirt.yml', 'kubevirt.yaml')):
return True
return False
def _sanitize_group_name(self, name):
""" Replace unsupported charactes in group name by underscore character """
name = name.replace('/', '_')
name = name.replace('.', '_')
name = name.replace('-', '_')
return name

View file

@ -0,0 +1 @@
shippable/posix/group2

View file

@ -0,0 +1,67 @@
#!/usr/bin/env python
import json
import sys
def check_hosts(contrib, plugin):
contrib_hosts = sorted(contrib['_meta']['hostvars'].keys())
plugin_hosts = sorted(plugin['_meta']['hostvars'].keys())
assert contrib_hosts == plugin_hosts
return contrib_hosts, plugin_hosts
def check_groups(contrib, plugin):
contrib_groups = set(contrib.keys())
plugin_groups = set(plugin.keys())
missing_groups = contrib_groups.difference(plugin_groups)
if missing_groups:
print("groups: %s are missing from the plugin" % missing_groups)
assert not missing_groups
return contrib_groups, plugin_groups
def check_host_vars(key, value, plugin, host):
# tags are a dict in the plugin
if key.startswith('ec2_tag'):
print('assert tag', key, value)
assert 'tags' in plugin['_meta']['hostvars'][host], 'b file does not have tags in host'
btags = plugin['_meta']['hostvars'][host]['tags']
tagkey = key.replace('ec2_tag_', '')
assert tagkey in btags, '%s tag not in b file host tags' % tagkey
assert value == btags[tagkey], '%s != %s' % (value, btags[tagkey])
else:
print('assert var', key, value, key in plugin['_meta']['hostvars'][host], plugin['_meta']['hostvars'][host].get(key))
assert key in plugin['_meta']['hostvars'][host], "%s not in b's %s hostvars" % (key, host)
assert value == plugin['_meta']['hostvars'][host][key], "%s != %s" % (value, plugin['_meta']['hostvars'][host][key])
def main():
# a should be the source of truth (the script output)
a = sys.argv[1]
# b should be the thing to check (the plugin output)
b = sys.argv[2]
with open(a, 'r') as f:
adata = json.loads(f.read())
with open(b, 'r') as f:
bdata = json.loads(f.read())
print(adata)
print(bdata)
# all hosts should be present obviously
ahosts, bhosts = check_hosts(adata, bdata)
# all groups should be present obviously
agroups, bgroups = check_groups(adata, bdata)
# check host vars can be reconstructed
for ahost in ahosts:
contrib_host_vars = adata['_meta']['hostvars'][ahost]
for key, value in contrib_host_vars.items():
check_host_vars(key, value, bdata, ahost)
if __name__ == "__main__":
main()

View file

@ -0,0 +1,61 @@
#!/usr/bin/env bash
if [[ $(python --version 2>&1) =~ 2\.6 ]]
then
echo "Openshift client is not supported on Python 2.6"
exit 0
fi
set -eux
source virtualenv.sh
pip install openshift
./server.py &
# Fake auth file
mkdir -p ~/.kube/
cat <<EOF > ~/.kube/config
apiVersion: v1
clusters:
- cluster:
insecure-skip-tls-verify: true
server: http://localhost:12345
name: development
contexts:
- context:
cluster: development
user: developer
name: dev-frontend
current-context: dev-frontend
kind: Config
preferences: {}
users:
- name: developer
user:
token: ZDNg7LzSlp8a0u0fht_tRnPMTOjxqgJGCyi_iy0ecUw
EOF
#################################################
# RUN THE PLUGIN
#################################################
# run the plugin second
export ANSIBLE_INVENTORY_ENABLED=kubevirt
export ANSIBLE_INVENTORY=test.kubevirt.yml
cat << EOF > "$OUTPUT_DIR/test.kubevirt.yml"
plugin: kubevirt
connections:
- namespaces:
- default
EOF
ANSIBLE_JINJA2_NATIVE=1 ansible-inventory -vvvv -i "$OUTPUT_DIR/test.kubevirt.yml" --list --output="$OUTPUT_DIR/plugin.out"
kill -9 "$(jobs -p)"
#################################################
# DIFF THE RESULTS
#################################################
./inventory_diff.py "$(pwd)/test.out" "$OUTPUT_DIR/plugin.out"

View file

@ -0,0 +1,161 @@
#!/usr/bin/env python
import json
import os
try:
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
except ImportError:
from BaseHTTPServer import HTTPServer
from SimpleHTTPServer import SimpleHTTPRequestHandler
from threading import Thread
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
class TestHandler(SimpleHTTPRequestHandler):
# Path handlers:
handlers = {}
def log_message(self, format, *args):
"""
Empty method, so we don't mix output of HTTP server with tests
"""
pass
def do_GET(self):
params = urlparse(self.path)
if params.path in self.handlers:
self.handlers[params.path](self)
else:
SimpleHTTPRequestHandler.do_GET(self)
def do_POST(self):
params = urlparse(self.path)
if params.path in self.handlers:
self.handlers[params.path](self)
else:
SimpleHTTPRequestHandler.do_POST(self)
class TestServer(object):
# The host and port and path used by the embedded tests web server:
PORT = None
# The embedded web server:
_httpd = None
# Thread for http server:
_thread = None
def set_json_response(self, path, code, body):
def _handle_request(handler):
handler.send_response(code)
handler.send_header('Content-Type', 'application/json')
handler.end_headers()
data = json.dumps(body, ensure_ascii=False).encode('utf-8')
handler.wfile.write(data)
TestHandler.handlers[path] = _handle_request
def start_server(self, host='localhost'):
self._httpd = HTTPServer((host, 12345), TestHandler)
self._thread = Thread(target=self._httpd.serve_forever)
self._thread.start()
def stop_server(self):
self._httpd.shutdown()
self._thread.join()
if __name__ == '__main__':
print(os.getpid())
server = TestServer()
server.start_server()
server.set_json_response(path="/version", code=200, body={})
server.set_json_response(path="/api", code=200, body={
"kind": "APIVersions", "versions": ["v1"], "serverAddressByClientCIDRs": [{"clientCIDR": "0.0.0.0/0", "serverAddress": "localhost:12345"}]
})
server.set_json_response(path="/api/v1", code=200, body={'resources': {}})
server.set_json_response(path="/apis", code=200, body={
"kind": "APIGroupList", "apiVersion": "v1",
"groups": [{
"name": "kubevirt.io", "versions": [{"groupVersion": "kubevirt.io/v1alpha3", "version": "v1alpha3"}],
"preferredVersion": {"groupVersion": "kubevirt.io/v1alpha3", "version": "v1alpha3"}
}]
})
server.set_json_response(
path="/apis/kubevirt.io/v1alpha3",
code=200,
body={
"kind": "APIResourceList", "apiVersion": "v1", "groupVersion": "kubevirt.io/v1alpha3",
"resources": [{
"name": "virtualmachineinstances", "singularName": "virtualmachineinstance",
"namespaced": True, "kind": "VirtualMachineInstance",
"verbs": ["delete", "deletecollection", "get", "list", "patch", "create", "update", "watch"],
"shortNames":["vmi", "vmis"]
}]
}
)
server.set_json_response(
path="/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances",
code=200,
body={'apiVersion': 'kubevirt.io/v1alpha3',
'items': [{'apiVersion': 'kubevirt.io/v1alpha3',
'kind': 'VirtualMachineInstance',
'metadata': {'annotations': {'ansible': '{"data1": "yes", "data2": "no"}'},
'creationTimestamp': '2019-04-05T14:17:02Z',
'generateName': 'myvm',
'generation': 1,
'labels': {'kubevirt.io/nodeName': 'localhost',
'label': 'x',
'vm.cnv.io/name': 'myvm'},
'name': 'myvm',
'namespace': 'default',
'ownerReferences': [{'apiVersion': 'kubevirt.io/v1alpha3',
'blockOwnerDeletion': True,
'controller': True,
'kind': 'VirtualMachine',
'name': 'myvm',
'uid': 'f78ebe62-5666-11e9-a214-0800279ffc6b'}],
'resourceVersion': '1614085',
'selfLink': '/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances/myvm',
'uid': '7ba1b196-57ad-11e9-9e2e-0800279ffc6b'},
'spec': {'domain': {'devices': {'disks': [{'disk': {'bus': 'virtio'},
'name': 'containerdisk'},
{'disk': {'bus': 'virtio'}, 'name': 'ansiblecloudinitdisk'}],
'interfaces': [{'bridge': {}, 'name': 'default'}]},
'firmware': {'uuid': 'cdf77e9e-871b-5acb-a707-80ef3d4b9849'},
'machine': {'type': ''},
'resources': {'requests': {'memory': '64M'}}},
'networks': [{'name': 'default', 'pod': {}}],
'volumes': [{'containerDisk': {'image': 'kubevirt/cirros-container-disk-demo:v0.11.0'},
'name': 'containerdisk'},
{'cloudInitNoCloud': {'userData': '#cloud-config\npassword: password\nchpasswd: { expire: False }'},
'name': 'ansiblecloudinitdisk'}]},
'status': {'conditions': [{'lastProbeTime': None,
'lastTransitionTime': None,
'status': 'True',
'type': 'LiveMigratable'},
{'lastProbeTime': None,
'lastTransitionTime': '2019-04-05T14:17:27Z',
'status': 'True',
'type': 'Ready'}],
'interfaces': [{'ipAddress': '172.17.0.19',
'mac': '02:42:ac:11:00:13',
'name': 'default'}],
'migrationMethod': 'BlockMigration',
'nodeName': 'localhost',
'phase': 'Running'}}],
'kind': 'VirtualMachineInstanceList',
'metadata': {'continue': '',
'resourceVersion': '1614862',
'selfLink': '/apis/kubevirt.io/v1alpha3/namespaces/default/virtualmachineinstances'}}
)

View file

@ -0,0 +1,61 @@
{
"_meta": {
"hostvars": {
"default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b": {
"annotations": {
"ansible": "{\"data1\": \"yes\", \"data2\": \"no\"}"
},
"ansible_host": "172.17.0.19",
"data1": "yes",
"data2": "no",
"labels": {
"kubevirt.io/nodeName": "localhost",
"label": "x",
"vm.cnv.io/name": "myvm"
},
"object_type": "vm",
"resource_version": "1614085",
"uid": "7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
}
}
},
"all": {
"children": [
"label_kubevirt_io_nodeName_localhost",
"label_label_x",
"label_vm_cnv_io_name_myvm",
"localhost_12345",
"ungrouped"
]
},
"label_kubevirt_io_nodeName_localhost": {
"hosts": [
"default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
]
},
"label_label_x": {
"hosts": [
"default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
]
},
"label_vm_cnv_io_name_myvm": {
"hosts": [
"default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
]
},
"localhost_12345": {
"children": [
"namespace_default"
]
},
"namespace_default": {
"children": [
"namespace_default_vms"
]
},
"namespace_default_vms": {
"hosts": [
"default-myvm-7ba1b196-57ad-11e9-9e2e-0800279ffc6b"
]
}
}