2012-02-24 09:25:09 +01:00
|
|
|
#!/usr/bin/python
|
2012-08-03 03:29:10 +02:00
|
|
|
# -*- coding: utf-8 -*-
|
2012-02-24 09:25:09 +01:00
|
|
|
|
2012-02-29 01:08:09 +01:00
|
|
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
|
|
|
|
#
|
|
|
|
# This file is part of Ansible
|
|
|
|
#
|
|
|
|
# Ansible is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU General Public License as published by
|
|
|
|
# the Free Software Foundation, either version 3 of the License, or
|
|
|
|
# (at your option) any later version.
|
|
|
|
#
|
|
|
|
# Ansible is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the GNU General Public License
|
|
|
|
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
|
|
|
|
|
2012-04-18 01:59:23 +02:00
|
|
|
import array
|
|
|
|
import fcntl
|
2013-02-20 17:53:14 +01:00
|
|
|
import fnmatch
|
2012-04-18 01:59:23 +02:00
|
|
|
import glob
|
|
|
|
import platform
|
|
|
|
import re
|
|
|
|
import socket
|
|
|
|
import struct
|
2013-02-28 21:33:56 +01:00
|
|
|
import datetime
|
2013-03-01 00:54:43 +01:00
|
|
|
import getpass
|
2012-02-24 09:25:09 +01:00
|
|
|
|
2012-09-17 15:11:04 +02:00
|
|
|
DOCUMENTATION = '''
|
|
|
|
---
|
|
|
|
module: setup
|
|
|
|
short_description: Gathers facts about remote hosts
|
2013-02-20 17:53:14 +01:00
|
|
|
options:
|
|
|
|
filter:
|
2013-02-23 18:36:01 +01:00
|
|
|
version_added: 1.1
|
2013-02-20 17:53:14 +01:00
|
|
|
description:
|
2013-02-23 18:36:01 +01:00
|
|
|
- if supplied, only return facts that match this shell-style (fnmatch) wildcard.
|
2013-02-20 17:53:14 +01:00
|
|
|
required: false
|
2013-02-23 21:11:15 +01:00
|
|
|
default: '*'
|
2013-02-15 16:17:58 +01:00
|
|
|
description:
|
2012-09-19 16:09:26 +02:00
|
|
|
- This module is automatically called by playbooks to gather useful
|
|
|
|
variables about remote hosts that can be used in playbooks. It can also be
|
|
|
|
executed directly by C(/usr/bin/ansible) to check what variables are
|
|
|
|
available to a host. Ansible provides many I(facts) about the system,
|
|
|
|
automatically.
|
2013-02-15 16:17:58 +01:00
|
|
|
notes:
|
2012-09-19 16:09:26 +02:00
|
|
|
- More ansible facts will be added with successive releases. If I(facter) or
|
|
|
|
I(ohai) are installed, variables from these programs will also be snapshotted
|
|
|
|
into the JSON file for usage in templating. These variables are prefixed
|
|
|
|
with C(facter_) and C(ohai_) so it's easy to tell their source. All variables are
|
|
|
|
bubbled up to the caller. Using the ansible facts and choosing to not
|
|
|
|
install I(facter) and I(ohai) means you can avoid Ruby-dependencies on your
|
2012-11-21 18:49:30 +01:00
|
|
|
remote systems. (See also M(facter) and M(ohai).)
|
2013-02-20 18:13:05 +01:00
|
|
|
- The filter option filters only the first level subkey below ansible_facts.
|
2012-10-01 12:37:51 +02:00
|
|
|
author: Michael DeHaan
|
2012-09-17 15:11:04 +02:00
|
|
|
'''
|
|
|
|
|
2013-02-23 21:11:15 +01:00
|
|
|
EXAMPLES = """
|
|
|
|
# Display facts from all hosts and store them indexed by I(hostname) at C(/tmp/facts).
|
|
|
|
ansible all -m setup --tree /tmp/facts
|
|
|
|
|
|
|
|
# Display only facts regarding memory found by ansible on all hosts and output them.
|
|
|
|
ansible all -m setup -a 'filter=ansible_*_mb'
|
|
|
|
|
|
|
|
# Display only facts returned by facter.
|
|
|
|
ansible all -m setup -a 'filter=facter_*'
|
|
|
|
|
|
|
|
# Display only facts returned by facter.
|
|
|
|
ansible all -m setup -a 'filter=ansible_eth[0-2]'
|
|
|
|
"""
|
|
|
|
|
2012-05-30 17:42:05 +02:00
|
|
|
try:
|
|
|
|
import selinux
|
|
|
|
HAVE_SELINUX=True
|
|
|
|
except ImportError:
|
|
|
|
HAVE_SELINUX=False
|
|
|
|
|
2012-02-24 09:25:09 +01:00
|
|
|
try:
|
|
|
|
import json
|
|
|
|
except ImportError:
|
|
|
|
import simplejson as json
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
class Facts(object):
|
|
|
|
"""
|
|
|
|
This class should only attempt to populate those facts that
|
|
|
|
are mostly generic to all systems. This includes platform facts,
|
|
|
|
service facts (eg. ssh keys or selinux), and distribution facts.
|
|
|
|
Anything that requires extensive code or may have more than one
|
|
|
|
possible implementation to establish facts for a given topic should
|
|
|
|
subclass Facts.
|
|
|
|
"""
|
|
|
|
|
|
|
|
_I386RE = re.compile(r'i[3456]86')
|
|
|
|
# For the most part, we assume that platform.dist() will tell the truth.
|
|
|
|
# This is the fallback to handle unknowns or exceptions
|
|
|
|
OSDIST_DICT = { '/etc/redhat-release': 'RedHat',
|
2013-02-13 19:28:50 +01:00
|
|
|
'/etc/vmware-release': 'VMwareESX',
|
2013-02-28 04:16:12 +01:00
|
|
|
'/etc/openwrt_release': 'OpenWrt',
|
2013-02-15 16:17:58 +01:00
|
|
|
'/etc/system-release': 'OtherLinux' }
|
2012-07-18 23:46:28 +02:00
|
|
|
SELINUX_MODE_DICT = { 1: 'enforcing', 0: 'permissive', -1: 'disabled' }
|
|
|
|
|
2012-09-17 05:51:46 +02:00
|
|
|
# A list of dicts. If there is a platform with more than one
|
|
|
|
# package manager, put the preferred one last. If there is an
|
|
|
|
# ansible module, use that as the value for the 'name' key.
|
2013-01-24 05:12:13 +01:00
|
|
|
PKG_MGRS = [ { 'path' : '/usr/bin/yum', 'name' : 'yum' },
|
|
|
|
{ 'path' : '/usr/bin/apt-get', 'name' : 'apt' },
|
|
|
|
{ 'path' : '/usr/bin/zypper', 'name' : 'zypper' },
|
2013-02-15 16:17:58 +01:00
|
|
|
{ 'path' : '/usr/bin/pacman', 'name' : 'pacman' },
|
2013-02-28 04:16:12 +01:00
|
|
|
{ 'path' : '/bin/opkg', 'name' : 'opkg' },
|
2013-03-01 17:49:12 +01:00
|
|
|
{ 'path' : '/opt/local/bin/pkgin', 'name' : 'pkgin' },
|
|
|
|
{ 'path' : '/opt/local/bin/port', 'name' : 'macports' } ]
|
2012-09-14 20:25:23 +02:00
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
self.facts = {}
|
2012-07-18 23:46:28 +02:00
|
|
|
self.get_platform_facts()
|
|
|
|
self.get_distribution_facts()
|
2012-09-05 18:26:47 +02:00
|
|
|
self.get_cmdline()
|
2012-07-18 23:46:28 +02:00
|
|
|
self.get_public_ssh_host_keys()
|
|
|
|
self.get_selinux_facts()
|
2012-09-14 20:25:23 +02:00
|
|
|
self.get_pkg_mgr_facts()
|
2012-09-30 08:58:04 +02:00
|
|
|
self.get_lsb_facts()
|
2013-02-28 21:33:56 +01:00
|
|
|
self.get_date_time_facts()
|
2013-03-01 00:54:43 +01:00
|
|
|
self.get_user_facts()
|
2012-07-18 23:46:28 +02:00
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def populate(self):
|
|
|
|
return self.facts
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
# Platform
|
|
|
|
# patform.system() can be Linux, Darwin, Java, or Windows
|
|
|
|
def get_platform_facts(self):
|
|
|
|
self.facts['system'] = platform.system()
|
|
|
|
self.facts['kernel'] = platform.release()
|
|
|
|
self.facts['machine'] = platform.machine()
|
|
|
|
self.facts['python_version'] = platform.python_version()
|
|
|
|
self.facts['fqdn'] = socket.getfqdn()
|
|
|
|
self.facts['hostname'] = self.facts['fqdn'].split('.')[0]
|
2012-10-31 01:07:12 +01:00
|
|
|
self.facts['domain'] = '.'.join(self.facts['fqdn'].split('.')[1:])
|
2012-07-18 23:46:28 +02:00
|
|
|
if self.facts['machine'] == 'x86_64':
|
|
|
|
self.facts['architecture'] = self.facts['machine']
|
|
|
|
elif Facts._I386RE.search(self.facts['machine']):
|
|
|
|
self.facts['architecture'] = 'i386'
|
|
|
|
else:
|
2012-08-14 15:57:54 +02:00
|
|
|
self.facts['architecture'] = self.facts['machine']
|
2012-07-18 23:46:28 +02:00
|
|
|
if self.facts['system'] == 'Linux':
|
|
|
|
self.get_distribution_facts()
|
2013-02-14 12:45:08 +01:00
|
|
|
elif self.facts['system'] == 'AIX':
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/bootinfo -p")
|
|
|
|
data = out.split('\n')
|
|
|
|
self.facts['architecture'] = data[0]
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
# platform.dist() is deprecated in 2.6
|
|
|
|
# in 2.6 and newer, you should use platform.linux_distribution()
|
|
|
|
def get_distribution_facts(self):
|
2013-02-16 19:35:37 +01:00
|
|
|
|
2013-02-19 15:59:22 +01:00
|
|
|
# A list with OS Family members
|
2013-02-23 18:25:04 +01:00
|
|
|
OS_FAMILY = dict(
|
|
|
|
RedHat = 'RedHat', Fedora = 'RedHat', CentOS = 'RedHat', Scientific = 'RedHat',
|
|
|
|
SLC = 'RedHat', Ascendos = 'RedHat', CloudLinux = 'RedHat', PSBM = 'RedHat',
|
|
|
|
OracleLinux = 'RedHat', OVS = 'RedHat', OEL = 'RedHat', Amazon = 'RedHat',
|
|
|
|
XenServer = 'RedHat', Ubuntu = 'Debian', Debian = 'Debian', SLES = 'Suse',
|
|
|
|
SLED = 'Suse', OpenSuSE = 'Suse', SuSE = 'Suse', Gentoo = 'Gentoo',
|
|
|
|
Archlinux = 'Archlinux', Mandriva = 'Mandrake', Mandrake = 'Mandrake',
|
|
|
|
Solaris = 'Solaris', Nexenta = 'Solaris', OmniOS = 'Solaris', OpenIndiana = 'Solaris',
|
2013-03-01 17:49:12 +01:00
|
|
|
SmartOS = 'Solaris', AIX = 'AIX', MacOSX = 'Darwin'
|
2013-02-23 18:25:04 +01:00
|
|
|
)
|
2013-02-19 15:59:22 +01:00
|
|
|
|
2013-02-14 12:45:08 +01:00
|
|
|
if self.facts['system'] == 'AIX':
|
2013-02-16 19:35:37 +01:00
|
|
|
self.facts['distribution'] = 'AIX'
|
|
|
|
rc, out, err = module.run_command("/usr/bin/oslevel")
|
|
|
|
data = out.split('.')
|
|
|
|
self.facts['distribution_version'] = data[0]
|
|
|
|
self.facts['distribution_release'] = data[1]
|
2013-03-01 17:49:12 +01:00
|
|
|
elif self.facts['system'] == 'Darwin':
|
|
|
|
self.facts['distribution'] = 'MacOSX'
|
|
|
|
rc, out, err = module.run_command("/usr/bin/sw_vers -productVersion")
|
|
|
|
data = out.split()[-1]
|
|
|
|
self.facts['distribution_version'] = data
|
2013-02-14 12:45:08 +01:00
|
|
|
else:
|
2013-02-16 19:35:37 +01:00
|
|
|
dist = platform.dist()
|
|
|
|
self.facts['distribution'] = dist[0].capitalize() or 'NA'
|
|
|
|
self.facts['distribution_version'] = dist[1] or 'NA'
|
|
|
|
self.facts['distribution_release'] = dist[2] or 'NA'
|
|
|
|
# Try to handle the exceptions now ...
|
|
|
|
for (path, name) in Facts.OSDIST_DICT.items():
|
|
|
|
if os.path.exists(path):
|
|
|
|
if self.facts['distribution'] == 'Fedora':
|
|
|
|
pass
|
|
|
|
elif name == 'RedHat':
|
|
|
|
data = get_file_content(path)
|
|
|
|
if 'Red Hat' in data:
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
else:
|
|
|
|
self.facts['distribution'] = data.split()[0]
|
|
|
|
elif name == 'OtherLinux':
|
|
|
|
data = get_file_content(path)
|
|
|
|
if 'Amazon' in data:
|
|
|
|
self.facts['distribution'] = 'Amazon'
|
|
|
|
self.facts['distribution_version'] = data.split()[-1]
|
2013-02-28 04:16:12 +01:00
|
|
|
elif name == 'OpenWrt':
|
|
|
|
data = get_file_content(path)
|
|
|
|
if 'OpenWrt' in data:
|
|
|
|
self.facts['distribution'] = name
|
|
|
|
version = re.search('DISTRIB_RELEASE="(.*)"', data)
|
|
|
|
if version:
|
|
|
|
self.facts['distribution_version'] = version.groups()[0]
|
|
|
|
release = re.search('DISTRIB_CODENAME="(.*)"', data)
|
|
|
|
if release:
|
|
|
|
self.facts['distribution_release'] = release.groups()[0]
|
2012-07-18 23:46:28 +02:00
|
|
|
else:
|
2013-02-16 19:35:37 +01:00
|
|
|
self.facts['distribution'] = name
|
2012-07-18 23:46:28 +02:00
|
|
|
|
2013-02-23 18:25:04 +01:00
|
|
|
self.facts['os_family'] = self.facts['distribution']
|
|
|
|
if self.facts['distribution'] in OS_FAMILY:
|
2013-02-27 02:30:33 +01:00
|
|
|
self.facts['os_family'] = OS_FAMILY[self.facts['distribution']]
|
2013-02-19 15:59:22 +01:00
|
|
|
|
2012-09-05 18:26:47 +02:00
|
|
|
def get_cmdline(self):
|
|
|
|
data = get_file_content('/proc/cmdline')
|
|
|
|
if data:
|
|
|
|
self.facts['cmdline'] = {}
|
|
|
|
for piece in shlex.split(data):
|
|
|
|
item = piece.split('=', 1)
|
|
|
|
if len(item) == 1:
|
|
|
|
self.facts['cmdline'][item[0]] = True
|
|
|
|
else:
|
|
|
|
self.facts['cmdline'][item[0]] = item[1]
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
def get_public_ssh_host_keys(self):
|
2012-09-06 20:21:05 +02:00
|
|
|
dsa_filename = '/etc/ssh/ssh_host_dsa_key.pub'
|
|
|
|
rsa_filename = '/etc/ssh/ssh_host_rsa_key.pub'
|
|
|
|
|
|
|
|
if self.facts['system'] == 'Darwin':
|
|
|
|
dsa_filename = '/etc/ssh_host_dsa_key.pub'
|
|
|
|
rsa_filename = '/etc/ssh_host_rsa_key.pub'
|
|
|
|
dsa = get_file_content(dsa_filename)
|
|
|
|
rsa = get_file_content(rsa_filename)
|
2012-07-18 23:46:28 +02:00
|
|
|
if dsa is None:
|
|
|
|
dsa = 'NA'
|
|
|
|
else:
|
|
|
|
self.facts['ssh_host_key_dsa_public'] = dsa.split()[1]
|
|
|
|
if rsa is None:
|
|
|
|
rsa = 'NA'
|
|
|
|
else:
|
|
|
|
self.facts['ssh_host_key_rsa_public'] = rsa.split()[1]
|
|
|
|
|
2012-09-14 20:25:23 +02:00
|
|
|
def get_pkg_mgr_facts(self):
|
|
|
|
self.facts['pkg_mgr'] = 'unknown'
|
2012-09-17 05:51:46 +02:00
|
|
|
for pkg in Facts.PKG_MGRS:
|
|
|
|
if os.path.exists(pkg['path']):
|
|
|
|
self.facts['pkg_mgr'] = pkg['name']
|
2012-09-14 20:25:23 +02:00
|
|
|
|
2012-09-30 08:58:04 +02:00
|
|
|
def get_lsb_facts(self):
|
|
|
|
lsb_path = module.get_bin_path('lsb_release')
|
|
|
|
if lsb_path is None:
|
|
|
|
return self.facts
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command([lsb_path, "-a"])
|
|
|
|
if rc == 0:
|
2012-09-30 08:58:04 +02:00
|
|
|
self.facts['lsb'] = {}
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if len(line) < 1:
|
|
|
|
continue
|
|
|
|
value = line.split(':', 1)[1].strip()
|
|
|
|
if 'LSB Version:' in line:
|
|
|
|
self.facts['lsb']['release'] = value
|
|
|
|
elif 'Distributor ID:' in line:
|
|
|
|
self.facts['lsb']['id'] = value
|
|
|
|
elif 'Description:' in line:
|
|
|
|
self.facts['lsb']['description'] = value
|
|
|
|
elif 'Release:' in line:
|
|
|
|
self.facts['lsb']['release'] = value
|
|
|
|
elif 'Codename:' in line:
|
|
|
|
self.facts['lsb']['codename'] = value
|
|
|
|
if 'lsb' in self.facts and 'release' in self.facts['lsb']:
|
|
|
|
self.facts['lsb']['major_release'] = self.facts['lsb']['release'].split('.')[0]
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
def get_selinux_facts(self):
|
|
|
|
if not HAVE_SELINUX:
|
|
|
|
self.facts['selinux'] = False
|
|
|
|
return
|
|
|
|
self.facts['selinux'] = {}
|
|
|
|
if not selinux.is_selinux_enabled():
|
|
|
|
self.facts['selinux']['status'] = 'disabled'
|
|
|
|
else:
|
|
|
|
self.facts['selinux']['status'] = 'enabled'
|
2012-08-08 07:57:44 +02:00
|
|
|
try:
|
|
|
|
self.facts['selinux']['policyvers'] = selinux.security_policyvers()
|
2012-08-08 17:56:12 +02:00
|
|
|
except OSError, e:
|
2012-08-08 07:57:44 +02:00
|
|
|
self.facts['selinux']['policyvers'] = 'unknown'
|
|
|
|
try:
|
|
|
|
(rc, configmode) = selinux.selinux_getenforcemode()
|
2012-08-11 18:38:05 +02:00
|
|
|
if rc == 0:
|
|
|
|
self.facts['selinux']['config_mode'] = Facts.SELINUX_MODE_DICT.get(configmode, 'unknown')
|
2012-08-08 07:57:44 +02:00
|
|
|
else:
|
|
|
|
self.facts['selinux']['config_mode'] = 'unknown'
|
|
|
|
except OSError, e:
|
|
|
|
self.facts['selinux']['config_mode'] = 'unknown'
|
|
|
|
try:
|
|
|
|
mode = selinux.security_getenforce()
|
2012-08-11 18:38:05 +02:00
|
|
|
self.facts['selinux']['mode'] = Facts.SELINUX_MODE_DICT.get(mode, 'unknown')
|
2012-08-08 07:57:44 +02:00
|
|
|
except OSError, e:
|
|
|
|
self.facts['selinux']['mode'] = 'unknown'
|
|
|
|
try:
|
|
|
|
(rc, policytype) = selinux.selinux_getpolicytype()
|
|
|
|
if rc == 0:
|
|
|
|
self.facts['selinux']['type'] = policytype
|
|
|
|
else:
|
|
|
|
self.facts['selinux']['type'] = 'unknown'
|
|
|
|
except OSError, e:
|
|
|
|
self.facts['selinux']['type'] = 'unknown'
|
2012-07-18 23:46:28 +02:00
|
|
|
|
2013-02-28 21:33:56 +01:00
|
|
|
|
|
|
|
def get_date_time_facts(self):
|
|
|
|
self.facts['date_time'] = {}
|
|
|
|
|
|
|
|
now = datetime.datetime.now()
|
|
|
|
self.facts['date_time']['year'] = now.strftime('%Y')
|
|
|
|
self.facts['date_time']['month'] = now.strftime('%m')
|
|
|
|
self.facts['date_time']['day'] = now.strftime('%d')
|
|
|
|
self.facts['date_time']['hour'] = now.strftime('%H')
|
|
|
|
self.facts['date_time']['minute'] = now.strftime('%M')
|
|
|
|
self.facts['date_time']['second'] = now.strftime('%S')
|
|
|
|
self.facts['date_time']['epoch'] = now.strftime('%s')
|
|
|
|
self.facts['date_time']['date'] = now.strftime('%Y-%m-%d')
|
|
|
|
self.facts['date_time']['time'] = now.strftime('%H:%M:%S')
|
2013-03-01 09:28:03 +01:00
|
|
|
self.facts['date_time']['iso8601_micro'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
|
|
|
self.facts['date_time']['iso8601'] = now.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
|
|
|
|
self.facts['date_time']['tz'] = time.strftime("%Z")
|
2013-02-28 21:33:56 +01:00
|
|
|
|
|
|
|
|
2013-03-01 00:54:43 +01:00
|
|
|
# User
|
|
|
|
def get_user_facts(self):
|
|
|
|
self.facts['user_id'] = getpass.getuser()
|
|
|
|
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
class Hardware(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Hardware subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this, it
|
|
|
|
should define:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
platform = 'Generic'
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __new__(cls, *arguments, **keyword):
|
2012-07-18 23:46:28 +02:00
|
|
|
subclass = cls
|
|
|
|
for sc in Hardware.__subclasses__():
|
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Facts.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
class LinuxHardware(Hardware):
|
|
|
|
"""
|
|
|
|
Linux-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
|
2013-01-02 05:52:27 +01:00
|
|
|
In addition, it also defines number of DMI facts and device facts.
|
2012-07-18 23:46:28 +02:00
|
|
|
"""
|
2012-08-11 18:35:58 +02:00
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
platform = 'Linux'
|
|
|
|
MEMORY_FACTS = ['MemTotal', 'SwapTotal', 'MemFree', 'SwapFree']
|
|
|
|
# DMI bits
|
2012-08-11 18:35:58 +02:00
|
|
|
DMI_DICT = dict(
|
2012-09-05 18:26:47 +02:00
|
|
|
form_factor = '/sys/devices/virtual/dmi/id/chassis_type',
|
2012-08-11 18:35:58 +02:00
|
|
|
product_name = '/sys/devices/virtual/dmi/id/product_name',
|
|
|
|
product_serial = '/sys/devices/virtual/dmi/id/product_serial',
|
|
|
|
product_uuid = '/sys/devices/virtual/dmi/id/product_uuid',
|
|
|
|
product_version = '/sys/devices/virtual/dmi/id/product_version',
|
|
|
|
system_vendor = '/sys/devices/virtual/dmi/id/sys_vendor',
|
|
|
|
bios_date = '/sys/devices/virtual/dmi/id/bios_date',
|
|
|
|
bios_version = '/sys/devices/virtual/dmi/id/bios_version'
|
|
|
|
)
|
|
|
|
# DMI SPEC -- http://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.0.pdf
|
2012-07-18 23:46:28 +02:00
|
|
|
FORM_FACTOR = [ "Unknown", "Other", "Unknown", "Desktop",
|
|
|
|
"Low Profile Desktop", "Pizza Box", "Mini Tower", "Tower",
|
|
|
|
"Portable", "Laptop", "Notebook", "Hand Held", "Docking Station",
|
|
|
|
"All In One", "Sub Notebook", "Space-saving", "Lunch Box",
|
|
|
|
"Main Server Chassis", "Expansion Chassis", "Sub Chassis",
|
|
|
|
"Bus Expansion Chassis", "Peripheral Chassis", "RAID Chassis",
|
|
|
|
"Rack Mount Chassis", "Sealed-case PC", "Multi-system",
|
2012-08-02 16:22:51 +02:00
|
|
|
"CompactPCI", "AdvancedTCA", "Blade" ]
|
2012-07-18 23:46:28 +02:00
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Hardware.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_dmi_facts()
|
2013-01-02 05:52:27 +01:00
|
|
|
self.get_device_facts()
|
|
|
|
self.get_mount_facts()
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
if not os.access("/proc/meminfo", os.R_OK):
|
|
|
|
return
|
|
|
|
for line in open("/proc/meminfo").readlines():
|
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0]
|
|
|
|
if key in LinuxHardware.MEMORY_FACTS:
|
|
|
|
val = data[1].strip().split(' ')[0]
|
|
|
|
self.facts["%s_mb" % key.lower()] = long(val) / 1024
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
i = 0
|
|
|
|
physid = 0
|
|
|
|
sockets = {}
|
|
|
|
if not os.access("/proc/cpuinfo", os.R_OK):
|
|
|
|
return
|
|
|
|
self.facts['processor'] = []
|
|
|
|
for line in open("/proc/cpuinfo").readlines():
|
|
|
|
data = line.split(":", 1)
|
|
|
|
key = data[0].strip()
|
2012-08-24 23:11:06 +02:00
|
|
|
# model name is for Intel arch, Processor (mind the uppercase P)
|
|
|
|
# works for some ARM devices, like the Sheevaplug.
|
|
|
|
if key == 'model name' or key == 'Processor':
|
2012-07-18 23:46:28 +02:00
|
|
|
if 'processor' not in self.facts:
|
|
|
|
self.facts['processor'] = []
|
|
|
|
self.facts['processor'].append(data[1].strip())
|
|
|
|
i += 1
|
|
|
|
elif key == 'physical id':
|
|
|
|
physid = data[1].strip()
|
|
|
|
if physid not in sockets:
|
|
|
|
sockets[physid] = 1
|
|
|
|
elif key == 'cpu cores':
|
|
|
|
sockets[physid] = int(data[1].strip())
|
|
|
|
if len(sockets) > 0:
|
|
|
|
self.facts['processor_count'] = len(sockets)
|
|
|
|
self.facts['processor_cores'] = reduce(lambda x, y: x + y, sockets.values())
|
2012-04-18 01:59:23 +02:00
|
|
|
else:
|
2012-07-18 23:46:28 +02:00
|
|
|
self.facts['processor_count'] = i
|
|
|
|
self.facts['processor_cores'] = 'NA'
|
|
|
|
|
|
|
|
def get_dmi_facts(self):
|
|
|
|
for (key,path) in LinuxHardware.DMI_DICT.items():
|
|
|
|
data = get_file_content(path)
|
|
|
|
if data is not None:
|
|
|
|
if key == 'form_factor':
|
2012-08-02 16:06:55 +02:00
|
|
|
try:
|
|
|
|
self.facts['form_factor'] = LinuxHardware.FORM_FACTOR[int(data)]
|
|
|
|
except IndexError, e:
|
|
|
|
self.facts['form_factor'] = 'unknown (%s)' % data
|
2012-07-18 23:46:28 +02:00
|
|
|
else:
|
|
|
|
self.facts[key] = data
|
|
|
|
else:
|
|
|
|
self.facts[key] = 'NA'
|
|
|
|
|
2013-01-02 05:52:27 +01:00
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
2013-02-13 02:01:58 +01:00
|
|
|
mtab = get_file_content('/etc/mtab', '')
|
2013-01-02 05:52:27 +01:00
|
|
|
for line in mtab.split('\n'):
|
|
|
|
if line.startswith('/'):
|
|
|
|
fields = line.rstrip('\n').split()
|
|
|
|
self.facts['mounts'].append({'mount': fields[1], 'device':fields[0], 'fstype': fields[2], 'options': fields[3]})
|
|
|
|
|
|
|
|
def get_device_facts(self):
|
|
|
|
self.facts['devices'] = {}
|
|
|
|
lspci = module.get_bin_path('lspci')
|
|
|
|
if lspci:
|
|
|
|
rc, pcidata, err = module.run_command(lspci)
|
2013-02-12 18:45:48 +01:00
|
|
|
else:
|
|
|
|
pcidata = None
|
2013-01-02 05:52:27 +01:00
|
|
|
|
2013-02-11 23:24:47 +01:00
|
|
|
try:
|
|
|
|
block_devs = os.listdir("/sys/block")
|
|
|
|
except OSError:
|
|
|
|
return
|
|
|
|
|
|
|
|
for block in block_devs:
|
Fixing logical error in detecting virtual device.
So In my Centos 5.9 machine, if there is RAID mount ansible will crash, as it cannot find scheduler file. The reason being, this should be a virtual device as there is no "device" folder under e.g. /sys/block/md0/
Here is the crash:
[kk@u1 ansible]$ ansible q3 -m setup -k -u root --tree=/tmp/facts
SSH password:
q3 | FAILED => failed to parse: /sys/block/md0
Traceback (most recent call last):
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1797, in ?
main()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1050, in main
data = run_setup(module)
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1000, in run_setup
facts = ansible_facts()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 990, in ansible_facts
facts.update(Hardware().populate())
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 312, in populate
self.get_device_facts()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 439, in get_device_facts
m = re.match(".*?(\[(.*)\])", scheduler)
File "/usr/lib64/python2.4/sre.py", line 129, in match
return _compile(pattern, flags).match(string)
TypeError: expected string or buffer
2013-02-12 03:23:58 +01:00
|
|
|
virtual = 1
|
2013-01-02 05:52:27 +01:00
|
|
|
sysfs_no_links = 0
|
|
|
|
try:
|
|
|
|
path = os.readlink(os.path.join("/sys/block/", block))
|
|
|
|
except OSError, e:
|
|
|
|
if e.errno == errno.EINVAL:
|
|
|
|
path = block
|
|
|
|
sysfs_no_links = 1
|
|
|
|
else:
|
|
|
|
continue
|
2013-02-12 18:45:48 +01:00
|
|
|
if "virtual" in path:
|
2013-01-02 05:52:27 +01:00
|
|
|
continue
|
|
|
|
sysdir = os.path.join("/sys/block", path)
|
|
|
|
if sysfs_no_links == 1:
|
|
|
|
for folder in os.listdir(sysdir):
|
2013-02-12 18:45:48 +01:00
|
|
|
if "device" in folder:
|
Fixing logical error in detecting virtual device.
So In my Centos 5.9 machine, if there is RAID mount ansible will crash, as it cannot find scheduler file. The reason being, this should be a virtual device as there is no "device" folder under e.g. /sys/block/md0/
Here is the crash:
[kk@u1 ansible]$ ansible q3 -m setup -k -u root --tree=/tmp/facts
SSH password:
q3 | FAILED => failed to parse: /sys/block/md0
Traceback (most recent call last):
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1797, in ?
main()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1050, in main
data = run_setup(module)
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 1000, in run_setup
facts = ansible_facts()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 990, in ansible_facts
facts.update(Hardware().populate())
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 312, in populate
self.get_device_facts()
File "/root/.ansible/tmp/ansible-1360629441.14-171498703486275/setup", line 439, in get_device_facts
m = re.match(".*?(\[(.*)\])", scheduler)
File "/usr/lib64/python2.4/sre.py", line 129, in match
return _compile(pattern, flags).match(string)
TypeError: expected string or buffer
2013-02-12 03:23:58 +01:00
|
|
|
virtual = 0
|
2013-01-02 05:52:27 +01:00
|
|
|
break
|
|
|
|
if virtual:
|
|
|
|
continue
|
|
|
|
d = {}
|
2013-02-12 18:45:48 +01:00
|
|
|
diskname = os.path.basename(sysdir)
|
2013-01-02 05:52:27 +01:00
|
|
|
for key in ['vendor', 'model']:
|
|
|
|
d[key] = get_file_content(sysdir + "/device/" + key)
|
|
|
|
|
|
|
|
for key,test in [ ('removable','/removable'), \
|
|
|
|
('support_discard','/queue/discard_granularity'),
|
|
|
|
]:
|
|
|
|
d[key] = get_file_content(sysdir + test)
|
|
|
|
|
|
|
|
d['partitions'] = {}
|
|
|
|
for folder in os.listdir(sysdir):
|
|
|
|
m = re.search("(" + diskname + "\d+)", folder)
|
|
|
|
if m:
|
|
|
|
part = {}
|
|
|
|
partname = m.group(1)
|
|
|
|
part_sysdir = sysdir + "/" + partname
|
|
|
|
|
|
|
|
part['start'] = get_file_content(part_sysdir + "/start",0)
|
|
|
|
part['sectors'] = get_file_content(part_sysdir + "/size",0)
|
|
|
|
part['sectorsize'] = get_file_content(part_sysdir + "/queue/hw_sector_size",512)
|
|
|
|
part['size'] = module.pretty_bytes((float(part['sectors']) * float(part['sectorsize'])))
|
|
|
|
d['partitions'][partname] = part
|
|
|
|
|
|
|
|
d['rotational'] = get_file_content(sysdir + "/queue/rotational")
|
|
|
|
d['scheduler_mode'] = ""
|
|
|
|
scheduler = get_file_content(sysdir + "/queue/scheduler")
|
|
|
|
m = re.match(".*?(\[(.*)\])", scheduler)
|
|
|
|
if m:
|
|
|
|
d['scheduler_mode'] = m.group(2)
|
|
|
|
|
|
|
|
d['sectors'] = get_file_content(sysdir + "/size")
|
2013-02-11 23:23:14 +01:00
|
|
|
if not d['sectors']:
|
|
|
|
d['sectors'] = 0
|
2013-01-02 05:52:27 +01:00
|
|
|
d['sectorsize'] = get_file_content(sysdir + "/queue/hw_sector_size")
|
2013-02-11 23:23:14 +01:00
|
|
|
if not d['sectorsize']:
|
|
|
|
d['sectorsize'] = 512
|
2013-01-02 05:52:27 +01:00
|
|
|
d['size'] = module.pretty_bytes(float(d['sectors']) * float(d['sectorsize']))
|
|
|
|
|
|
|
|
d['host'] = ""
|
2013-02-12 18:45:48 +01:00
|
|
|
m = re.match(".+/[a-f0-9]+:([a-f0-9]+:[a-f0-9]+\.[a-f0-9]+)/host\d+/", sysdir)
|
|
|
|
if m and pcidata:
|
2013-01-02 05:52:27 +01:00
|
|
|
pciid = m.group(1)
|
|
|
|
did = re.escape(pciid)
|
|
|
|
m = re.search("^" + did + "\s(.*)$", pcidata, re.MULTILINE)
|
|
|
|
d['host'] = m.group(1)
|
|
|
|
|
|
|
|
d['holders'] = []
|
|
|
|
for folder in os.listdir(sysdir + "/holders"):
|
2013-02-12 18:45:48 +01:00
|
|
|
if not folder.startswith("dm-"):
|
|
|
|
continue
|
|
|
|
name = get_file_content(sysdir + "/holders/" + folder + "/dm/name")
|
2013-01-02 05:52:27 +01:00
|
|
|
if name:
|
|
|
|
d['holders'].append(name)
|
|
|
|
else:
|
|
|
|
d['holders'].append(folder)
|
|
|
|
|
|
|
|
self.facts['devices'][diskname] = d
|
|
|
|
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
class SunOSHardware(Hardware):
|
|
|
|
"""
|
|
|
|
In addition to the generic memory and cpu facts, this also sets
|
|
|
|
swap_reserved_mb and swap_allocated_mb that is available from *swap -s*.
|
|
|
|
"""
|
|
|
|
platform = 'SunOS'
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Hardware.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/sbin/psrinfo -v")
|
2012-07-18 23:46:28 +02:00
|
|
|
self.facts['processor'] = []
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'processor operates' in line:
|
|
|
|
if 'processor' not in self.facts:
|
|
|
|
self.facts['processor'] = []
|
|
|
|
self.facts['processor'].append(line.strip())
|
|
|
|
self.facts['processor_cores'] = 'NA'
|
|
|
|
self.facts['processor_count'] = len(self.facts['processor'])
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command(["/usr/sbin/prtconf"])
|
2012-07-18 23:46:28 +02:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'Memory size' in line:
|
|
|
|
self.facts['memtotal_mb'] = line.split()[2]
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/sbin/swap -s")
|
2012-07-18 23:46:28 +02:00
|
|
|
allocated = long(out.split()[1][:-1])
|
|
|
|
reserved = long(out.split()[5][:-1])
|
|
|
|
used = long(out.split()[8][:-1])
|
|
|
|
free = long(out.split()[10][:-1])
|
|
|
|
self.facts['swapfree_mb'] = free / 1024
|
|
|
|
self.facts['swaptotal_mb'] = (free + used) / 1024
|
|
|
|
self.facts['swap_allocated_mb'] = allocated / 1024
|
|
|
|
self.facts['swap_reserved_mb'] = reserved / 1024
|
|
|
|
|
|
|
|
class FreeBSDHardware(Hardware):
|
|
|
|
"""
|
|
|
|
FreeBSD-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
2013-01-02 05:52:27 +01:00
|
|
|
- devices
|
2012-07-18 23:46:28 +02:00
|
|
|
"""
|
|
|
|
platform = 'FreeBSD'
|
|
|
|
DMESG_BOOT = '/var/run/dmesg.boot'
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Hardware.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
2013-01-02 05:52:27 +01:00
|
|
|
self.get_device_facts()
|
|
|
|
self.get_mount_facts()
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
self.facts['processor'] = []
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/sbin/sysctl -n hw.ncpu")
|
2012-07-18 23:46:28 +02:00
|
|
|
self.facts['processor_count'] = out.strip()
|
2012-10-12 21:18:19 +02:00
|
|
|
|
2013-02-20 00:36:25 +01:00
|
|
|
dmesg_boot = get_file_content(FreeBSDHardware.DMESG_BOOT)
|
|
|
|
if not dmesg_boot:
|
|
|
|
rc, dmesg_boot, err = module.run_command("/sbin/dmesg")
|
|
|
|
for line in dmesg_boot.split('\n'):
|
2012-07-18 23:46:28 +02:00
|
|
|
if 'CPU:' in line:
|
|
|
|
cpu = re.sub(r'CPU:\s+', r"", line)
|
|
|
|
self.facts['processor'].append(cpu.strip())
|
|
|
|
if 'Logical CPUs per core' in line:
|
|
|
|
self.facts['processor_cores'] = line.split()[4]
|
|
|
|
|
2012-10-12 21:18:19 +02:00
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
def get_memory_facts(self):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/sbin/sysctl vm.stats")
|
2012-04-18 01:59:23 +02:00
|
|
|
for line in out.split('\n'):
|
|
|
|
data = line.split()
|
2012-07-18 23:46:28 +02:00
|
|
|
if 'vm.stats.vm.v_page_size' in line:
|
|
|
|
pagesize = long(data[1])
|
|
|
|
if 'vm.stats.vm.v_page_count' in line:
|
|
|
|
pagecount = long(data[1])
|
|
|
|
if 'vm.stats.vm.v_free_count' in line:
|
|
|
|
freecount = long(data[1])
|
|
|
|
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
|
|
|
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
|
|
|
# Get swapinfo. swapinfo output looks like:
|
|
|
|
# Device 1M-blocks Used Avail Capacity
|
|
|
|
# /dev/ada0p3 314368 0 314368 0%
|
2012-08-07 02:07:02 +02:00
|
|
|
#
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/sbin/swapinfo -m")
|
2012-07-18 23:46:28 +02:00
|
|
|
lines = out.split('\n')
|
|
|
|
if len(lines[-1]) == 0:
|
|
|
|
lines.pop()
|
|
|
|
data = lines[-1].split()
|
|
|
|
self.facts['swaptotal_mb'] = data[1]
|
|
|
|
self.facts['swapfree_mb'] = data[3]
|
|
|
|
|
2013-01-02 05:52:27 +01:00
|
|
|
def get_mount_facts(self):
|
|
|
|
self.facts['mounts'] = []
|
|
|
|
fstab = get_file_content('/etc/fstab')
|
2013-02-20 00:36:25 +01:00
|
|
|
if fstab:
|
|
|
|
for line in fstab.split('\n'):
|
|
|
|
if line.startswith('#') or line.strip() == '':
|
|
|
|
continue
|
|
|
|
fields = re.sub(r'\s+',' ',line.rstrip('\n')).split()
|
|
|
|
self.facts['mounts'].append({'mount': fields[1] , 'device': fields[0], 'fstype' : fields[2], 'options': fields[3]})
|
2013-01-02 05:52:27 +01:00
|
|
|
|
|
|
|
def get_device_facts(self):
|
|
|
|
sysdir = '/dev'
|
|
|
|
self.facts['devices'] = {}
|
|
|
|
drives = re.compile('(ada?\d+|da\d+|a?cd\d+)') #TODO: rc, disks, err = module.run_command("/sbin/sysctl kern.disks")
|
|
|
|
slices = re.compile('(ada?\d+s\d+\w*|da\d+s\d+\w*)')
|
|
|
|
if os.path.isdir(sysdir):
|
|
|
|
dirlist = sorted(os.listdir(sysdir))
|
|
|
|
for device in dirlist:
|
|
|
|
d = drives.match(device)
|
|
|
|
if d:
|
|
|
|
self.facts['devices'][d.group(1)] = []
|
|
|
|
s = slices.match(device)
|
|
|
|
if s:
|
|
|
|
self.facts['devices'][d.group(1)].append(s.group(1))
|
|
|
|
|
2013-02-14 12:45:08 +01:00
|
|
|
class AIX(Hardware):
|
|
|
|
"""
|
|
|
|
AIX-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- memfree_mb
|
|
|
|
- memtotal_mb
|
|
|
|
- swapfree_mb
|
|
|
|
- swaptotal_mb
|
|
|
|
- processor (a list)
|
|
|
|
- processor_cores
|
|
|
|
- processor_count
|
|
|
|
"""
|
|
|
|
platform = 'AIX'
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
Hardware.__init__(self)
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
self.get_dmi_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
self.facts['processor'] = []
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/lsattr -El proc0 -a type")
|
|
|
|
data = out.split(' ')
|
|
|
|
self.facts['processor'] = data[1]
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/lsattr -El proc0 -a smt_threads")
|
|
|
|
data = out.split(' ')
|
|
|
|
self.facts['processor_cores'] = int(data[1])
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/lsdev -Cc processor")
|
|
|
|
i = 0
|
|
|
|
for line in out.split('\n'):
|
|
|
|
data = line.split(':')
|
|
|
|
if 'Available' in line:
|
2013-02-18 01:48:02 +01:00
|
|
|
i += 1
|
2013-02-14 12:45:08 +01:00
|
|
|
self.facts['processor_count'] = int(i)
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
pagesize = 4096
|
|
|
|
rc, out, err = module.run_command("/usr/bin/vmstat -v")
|
|
|
|
for line in out.split('\n'):
|
|
|
|
data = line.split()
|
|
|
|
if 'memory pages' in line:
|
|
|
|
pagecount = long(data[0])
|
|
|
|
if 'free pages' in line:
|
|
|
|
freecount = long(data[0])
|
|
|
|
self.facts['memtotal_mb'] = pagesize * pagecount / 1024 / 1024
|
|
|
|
self.facts['memfree_mb'] = pagesize * freecount / 1024 / 1024
|
|
|
|
# Get swapinfo. swapinfo output looks like:
|
|
|
|
# Device 1M-blocks Used Avail Capacity
|
|
|
|
# /dev/ada0p3 314368 0 314368 0%
|
|
|
|
#
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/lsps -s")
|
|
|
|
lines = out.split('\n')
|
|
|
|
data = lines[1].split()
|
|
|
|
swaptotal_mb = long(data[0].rstrip('MB'))
|
|
|
|
percused = int(data[1].rstrip('%'))
|
|
|
|
self.facts['swaptotal_mb'] = swaptotal_mb
|
|
|
|
self.facts['swapfree_mb'] = long(swaptotal_mb * ( 100 - percused ) / 100)
|
|
|
|
|
|
|
|
def get_dmi_facts(self):
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/lsattr -El sys0 -a fwversion")
|
|
|
|
data = out.split()
|
|
|
|
self.facts['firmware_version'] = data[1].strip('IBM,')
|
|
|
|
|
2013-03-01 17:49:12 +01:00
|
|
|
class Darwin(Hardware):
|
|
|
|
"""
|
|
|
|
Darwin-specific subclass of Hardware. Defines memory and CPU facts:
|
|
|
|
- processor_cores
|
|
|
|
- memtotal_mb
|
|
|
|
- memfree_mb
|
|
|
|
"""
|
|
|
|
platform = 'Darwin'
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
Hardware.__init__(self)
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_cpu_facts()
|
|
|
|
self.get_memory_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_cpu_facts(self):
|
|
|
|
self.facts['processor'] = []
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/sysctl machdep.cpu.brand_string")
|
|
|
|
data = out[:-1].split(': ')
|
|
|
|
self.facts['processor'] = data[1]
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/sysctl machdep.cpu.core_count")
|
|
|
|
data = out[:-1].split(': ')
|
|
|
|
self.facts['processor_cores'] = data[1]
|
|
|
|
|
|
|
|
def get_memory_facts(self):
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/sysctl hw.memsize")
|
|
|
|
data = out[:-1].split(': ')
|
|
|
|
self.facts['memtotal_mb'] = int(data[1]) / 1024 / 1024
|
|
|
|
rc, out, err = module.run_command("/usr/sbin/sysctl hw.usermem")
|
|
|
|
data = out[:-1].split(': ')
|
|
|
|
self.facts['memfree_mb'] = int(data[1]) / 1024 / 1024
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
class Network(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Network subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this,
|
|
|
|
you must define:
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
platform = 'Generic'
|
|
|
|
|
2012-07-20 08:37:10 +02:00
|
|
|
IPV6_SCOPE = { '0' : 'global',
|
|
|
|
'10' : 'host',
|
|
|
|
'20' : 'link',
|
|
|
|
'40' : 'admin',
|
|
|
|
'50' : 'site',
|
|
|
|
'80' : 'organization' }
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __new__(cls, *arguments, **keyword):
|
2012-07-18 23:46:28 +02:00
|
|
|
subclass = cls
|
|
|
|
for sc in Network.__subclasses__():
|
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Facts.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
class LinuxNetwork(Network):
|
|
|
|
"""
|
|
|
|
This is a Linux-specific subclass of Network. It defines
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
2012-08-11 12:39:22 +02:00
|
|
|
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
|
|
|
|
- ipv4_address and ipv6_address: the first non-local address for each family.
|
2012-07-18 23:46:28 +02:00
|
|
|
"""
|
|
|
|
platform = 'Linux'
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Network.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
2012-08-30 05:26:22 +02:00
|
|
|
ip_path = module.get_bin_path('ip')
|
|
|
|
if ip_path is None:
|
|
|
|
return self.facts
|
2012-08-14 07:37:14 +02:00
|
|
|
default_ipv4, default_ipv6 = self.get_default_interfaces(ip_path)
|
|
|
|
interfaces, ips = self.get_interfaces_info(ip_path, default_ipv4, default_ipv6)
|
2012-08-11 08:40:54 +02:00
|
|
|
self.facts['interfaces'] = interfaces.keys()
|
|
|
|
for iface in interfaces:
|
|
|
|
self.facts[iface] = interfaces[iface]
|
2012-08-14 07:37:14 +02:00
|
|
|
self.facts['default_ipv4'] = default_ipv4
|
|
|
|
self.facts['default_ipv6'] = default_ipv6
|
|
|
|
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
|
|
|
|
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
|
2012-08-11 12:39:22 +02:00
|
|
|
return self.facts
|
2012-08-11 08:40:54 +02:00
|
|
|
|
2012-08-14 07:37:14 +02:00
|
|
|
def get_default_interfaces(self, ip_path):
|
|
|
|
# Use the commands:
|
|
|
|
# ip -4 route get 8.8.8.8 -> Google public DNS
|
|
|
|
# ip -6 route get 2404:6800:400a:800::1012 -> ipv6.google.com
|
|
|
|
# to find out the default outgoing interface, address, and gateway
|
|
|
|
command = dict(
|
|
|
|
v4 = [ip_path, '-4', 'route', 'get', '8.8.8.8'],
|
|
|
|
v6 = [ip_path, '-6', 'route', 'get', '2404:6800:400a:800::1012']
|
|
|
|
)
|
|
|
|
interface = dict(v4 = {}, v6 = {})
|
|
|
|
for v in 'v4', 'v6':
|
2012-09-05 03:22:47 +02:00
|
|
|
if v == 'v6' and not socket.has_ipv6:
|
|
|
|
continue
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command(command[v])
|
|
|
|
if not out:
|
2013-02-15 16:17:58 +01:00
|
|
|
# v6 routing may result in
|
2012-08-16 01:22:05 +02:00
|
|
|
# RTNETLINK answers: Invalid argument
|
|
|
|
continue
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
words = out.split('\n')[0].split()
|
2012-08-14 07:37:14 +02:00
|
|
|
# A valid output starts with the queried address on the first line
|
2012-08-20 03:19:07 +02:00
|
|
|
if len(words) > 0 and words[0] == command[v][-1]:
|
2012-08-14 07:37:14 +02:00
|
|
|
for i in range(len(words) - 1):
|
|
|
|
if words[i] == 'dev':
|
|
|
|
interface[v]['interface'] = words[i+1]
|
|
|
|
elif words[i] == 'src':
|
|
|
|
interface[v]['address'] = words[i+1]
|
|
|
|
elif words[i] == 'via' and words[i+1] != command[v][-1]:
|
|
|
|
interface[v]['gateway'] = words[i+1]
|
|
|
|
return interface['v4'], interface['v6']
|
|
|
|
|
|
|
|
def get_interfaces_info(self, ip_path, default_ipv4, default_ipv6):
|
2012-08-11 08:40:54 +02:00
|
|
|
interfaces = {}
|
2012-08-14 07:37:14 +02:00
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
2012-08-11 18:08:55 +02:00
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command([ip_path, 'addr', 'show'])
|
|
|
|
for line in out.split('\n'):
|
2012-08-11 08:40:54 +02:00
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
if not line.startswith(' '):
|
|
|
|
device = words[1][0:-1]
|
|
|
|
mtu = words[4]
|
|
|
|
elif words[0].startswith('link/'):
|
|
|
|
iface_type = words[0].split('/')[1]
|
2012-08-17 21:59:32 +02:00
|
|
|
# tun interfaces can have any interface type, but won't have an address
|
|
|
|
if iface_type in ('void', 'none') or len(words) == 1:
|
2012-08-11 08:40:54 +02:00
|
|
|
macaddress = 'unknown'
|
|
|
|
else:
|
|
|
|
macaddress = words[1]
|
|
|
|
elif words[0] == 'inet':
|
2012-08-17 21:59:32 +02:00
|
|
|
if '/' in words[1]:
|
|
|
|
address, netmask_length = words[1].split('/')
|
|
|
|
else:
|
|
|
|
# pointopoint interfaces do not have a prefix
|
|
|
|
address = words[1]
|
|
|
|
netmask_length = "32"
|
2012-08-11 08:40:54 +02:00
|
|
|
address_bin = struct.unpack('!L', socket.inet_aton(address))[0]
|
|
|
|
|
|
|
|
netmask_bin = (1<<32) - (1<<32>>int(netmask_length))
|
|
|
|
netmask = socket.inet_ntoa(struct.pack('!L', netmask_bin))
|
|
|
|
|
|
|
|
network = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
|
|
|
|
|
|
|
|
iface = words[-1]
|
|
|
|
# If an interface has multiple IPv4 addresses, make up an
|
|
|
|
# interface name for each address
|
|
|
|
if iface in interfaces:
|
|
|
|
i = 0
|
2012-09-08 04:47:03 +02:00
|
|
|
while str(iface) + "_" + str(i) in interfaces:
|
2012-08-11 08:40:54 +02:00
|
|
|
i += 1
|
2012-09-08 04:47:03 +02:00
|
|
|
iface = str(iface) + "_" + str(i)
|
2012-08-11 08:40:54 +02:00
|
|
|
|
|
|
|
interfaces[iface] = {}
|
|
|
|
interfaces[iface]['macaddress'] = macaddress
|
|
|
|
interfaces[iface]['mtu'] = mtu
|
2012-08-14 07:37:14 +02:00
|
|
|
interfaces[iface]['type'] = iface_type
|
2012-08-11 08:40:54 +02:00
|
|
|
interfaces[iface]['device'] = device
|
|
|
|
interfaces[iface]['ipv4'] = {'address': address,
|
|
|
|
'netmask': netmask,
|
|
|
|
'network': network}
|
|
|
|
|
2012-08-14 07:37:14 +02:00
|
|
|
# If this is the default address, update default_ipv4
|
|
|
|
if 'address' in default_ipv4 and default_ipv4['address'] == address:
|
|
|
|
default_ipv4['netmask'] = netmask
|
|
|
|
default_ipv4['network'] = network
|
|
|
|
default_ipv4['macaddress'] = macaddress
|
|
|
|
default_ipv4['mtu'] = mtu
|
|
|
|
default_ipv4['type'] = iface_type
|
|
|
|
default_ipv4['alias'] = words[-1]
|
|
|
|
|
2012-08-11 18:08:55 +02:00
|
|
|
if not address.startswith('127.'):
|
|
|
|
ips['all_ipv4_addresses'].append(address)
|
2012-08-11 08:40:54 +02:00
|
|
|
|
|
|
|
elif words[0] == 'inet6':
|
|
|
|
address, prefix = words[1].split('/')
|
|
|
|
scope = words[3]
|
|
|
|
|
|
|
|
iface = device
|
|
|
|
if iface not in interfaces:
|
|
|
|
interfaces[iface] = {}
|
|
|
|
interfaces[iface]['macaddress'] = macaddress
|
|
|
|
interfaces[iface]['mtu'] = mtu
|
|
|
|
interfaces[iface]['device'] = device
|
|
|
|
if 'ipv6' not in interfaces[iface]:
|
|
|
|
interfaces[iface]['ipv6'] = []
|
|
|
|
interfaces[iface]['ipv6'].append( {
|
|
|
|
'address': address,
|
|
|
|
'prefix': prefix,
|
|
|
|
'scope': scope} )
|
|
|
|
|
2012-08-14 07:37:14 +02:00
|
|
|
# If this is the default address, update default_ipv6
|
|
|
|
if 'address' in default_ipv6 and default_ipv6['address'] == address:
|
|
|
|
default_ipv6['prefix'] = prefix
|
|
|
|
default_ipv6['scope'] = scope
|
|
|
|
default_ipv6['macaddress'] = macaddress
|
|
|
|
default_ipv6['mtu'] = mtu
|
|
|
|
default_ipv6['type'] = iface_type
|
|
|
|
|
2012-08-11 18:08:55 +02:00
|
|
|
if not address == '::1':
|
|
|
|
ips['all_ipv6_addresses'].append(address)
|
2012-08-11 08:40:54 +02:00
|
|
|
|
|
|
|
return interfaces, ips
|
2012-07-18 23:46:28 +02:00
|
|
|
|
2013-02-13 18:37:17 +01:00
|
|
|
class GenericBsdIfconfigNetwork(Network):
|
|
|
|
"""
|
|
|
|
This is a generic BSD subclass of Network using the ifconfig command.
|
|
|
|
It defines
|
|
|
|
- interfaces (a list of interface names)
|
|
|
|
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
|
|
|
|
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
|
|
|
|
It currently does not define
|
|
|
|
- default_ipv4 and default_ipv6
|
|
|
|
- type, mtu and network on interfaces
|
|
|
|
"""
|
|
|
|
platform = 'Generic_BSD_Ifconfig'
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
Network.__init__(self)
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
ifconfig_path = module.get_bin_path('ifconfig')
|
|
|
|
if ifconfig_path is None:
|
|
|
|
return self.facts
|
|
|
|
route_path = module.get_bin_path('route')
|
|
|
|
if route_path is None:
|
|
|
|
return self.facts
|
|
|
|
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
|
|
|
|
interfaces, ips = self.get_interfaces_info(ifconfig_path)
|
|
|
|
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
|
|
|
|
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
|
|
|
|
self.facts['interfaces'] = interfaces.keys()
|
|
|
|
for iface in interfaces:
|
|
|
|
self.facts[iface] = interfaces[iface]
|
|
|
|
self.facts['default_ipv4'] = default_ipv4
|
|
|
|
self.facts['default_ipv6'] = default_ipv6
|
|
|
|
self.facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
|
|
|
|
self.facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_default_interfaces(self, route_path):
|
|
|
|
# Use the commands:
|
|
|
|
# route -n get 8.8.8.8 -> Google public DNS
|
|
|
|
# route -n get 2404:6800:400a:800::1012 -> ipv6.google.com
|
|
|
|
# to find out the default outgoing interface, address, and gateway
|
|
|
|
command = dict(
|
|
|
|
v4 = [route_path, '-n', 'get', '8.8.8.8'],
|
|
|
|
v6 = [route_path, '-n', 'get', '2404:6800:400a:800::1012']
|
|
|
|
)
|
|
|
|
interface = dict(v4 = {}, v6 = {})
|
|
|
|
for v in 'v4', 'v6':
|
|
|
|
if v == 'v6' and not socket.has_ipv6:
|
|
|
|
continue
|
|
|
|
rc, out, err = module.run_command(command[v])
|
|
|
|
if not out:
|
|
|
|
# v6 routing may result in
|
|
|
|
# RTNETLINK answers: Invalid argument
|
|
|
|
continue
|
|
|
|
lines = out.split('\n')
|
|
|
|
for line in lines:
|
|
|
|
words = line.split()
|
|
|
|
# look for first word starting interface
|
|
|
|
if len(words) > 0 and words[0] == 'interface:':
|
|
|
|
interface[v]['interface'] = words[1]
|
|
|
|
return interface['v4'], interface['v6']
|
|
|
|
|
|
|
|
def get_interfaces_info(self, ifconfig_path):
|
|
|
|
interfaces = {}
|
|
|
|
current_if = {}
|
|
|
|
ips = dict(
|
|
|
|
all_ipv4_addresses = [],
|
|
|
|
all_ipv6_addresses = [],
|
|
|
|
)
|
|
|
|
rc, out, err = module.run_command([ifconfig_path])
|
|
|
|
for line in out.split('\n'):
|
|
|
|
if line:
|
|
|
|
words = line.split()
|
|
|
|
if re.match('^\S', line) and len(words) > 3:
|
2013-02-28 10:53:51 +01:00
|
|
|
current_if = self.parse_interface_line(words)
|
|
|
|
interfaces[ current_if['device'] ] = current_if
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0].startswith('options='):
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_options_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'nd6':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_nd6_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'ether':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_ether_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'media:':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_media_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'status:':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_status_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'lladdr':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_lladdr_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'inet':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_inet_line(words, current_if, ips)
|
2013-02-13 18:37:17 +01:00
|
|
|
elif words[0] == 'inet6':
|
2013-02-28 10:53:51 +01:00
|
|
|
self.parse_inet6_line(words, current_if, ips)
|
|
|
|
else:
|
|
|
|
self.parse_unknown_line(words, current_if, ips)
|
|
|
|
|
2013-02-13 18:37:17 +01:00
|
|
|
return interfaces, ips
|
|
|
|
|
2013-02-28 10:53:51 +01:00
|
|
|
def parse_interface_line(self, words):
|
|
|
|
device = words[0][0:-1]
|
|
|
|
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
|
|
|
|
current_if['flags'] = self.get_options(words[1])
|
|
|
|
current_if['mtu'] = words[3]
|
|
|
|
current_if['macaddress'] = 'unknown' # will be overwritten later
|
|
|
|
return current_if
|
|
|
|
|
|
|
|
def parse_options_line(self, words, current_if, ips):
|
|
|
|
# Mac has options like this...
|
|
|
|
current_if['options'] = self.get_options(words[0])
|
|
|
|
|
|
|
|
def parse_nd6_line(self, words, current_if, ips):
|
|
|
|
# FreBSD has options like this...
|
|
|
|
current_if['options'] = self.get_options(words[1])
|
|
|
|
|
|
|
|
def parse_ether_line(self, words, current_if, ips):
|
|
|
|
current_if['macaddress'] = words[1]
|
|
|
|
|
|
|
|
def parse_media_line(self, words, current_if, ips):
|
|
|
|
# not sure if this is useful - we also drop information
|
|
|
|
current_if['media'] = words[1]
|
|
|
|
if len(words) > 2:
|
|
|
|
current_if['media_select'] = words[2]
|
|
|
|
if len(words) > 3:
|
|
|
|
current_if['media_type'] = words[3][1:]
|
|
|
|
if len(words) > 4:
|
|
|
|
current_if['media_options'] = self.get_options(words[4])
|
|
|
|
|
|
|
|
def parse_status_line(self, words, current_if, ips):
|
|
|
|
current_if['status'] = words[1]
|
|
|
|
|
|
|
|
def parse_lladdr_line(self, words, current_if, ips):
|
|
|
|
current_if['lladdr'] = words[1]
|
|
|
|
|
|
|
|
def parse_inet_line(self, words, current_if, ips):
|
|
|
|
address = {'address': words[1]}
|
|
|
|
# deal with hex netmask
|
|
|
|
if words[3].startswith('0x'):
|
|
|
|
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(words[3], base=16)))
|
|
|
|
else:
|
|
|
|
# otherwise assume this is a dotted quad
|
|
|
|
address['netmask'] = words[3]
|
|
|
|
# calculate the network
|
|
|
|
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
|
|
|
|
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
|
|
|
|
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
|
|
|
|
# broadcast may be given or we need to calculate
|
|
|
|
if len(words) > 5:
|
|
|
|
address['broadcast'] = words[5]
|
|
|
|
else:
|
|
|
|
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
|
|
|
|
# add to our list of addresses
|
|
|
|
if not words[1].startswith('127.'):
|
|
|
|
ips['all_ipv4_addresses'].append(address['address'])
|
|
|
|
current_if['ipv4'].append(address)
|
|
|
|
|
|
|
|
def parse_inet6_line(self, words, current_if, ips):
|
|
|
|
address = {'address': words[1]}
|
|
|
|
if (len(words) >= 4) and (words[2] == 'prefixlen'):
|
|
|
|
address['prefix'] = words[3]
|
|
|
|
if (len(words) >= 6) and (words[4] == 'scopeid'):
|
|
|
|
address['scope'] = words[5]
|
|
|
|
if not address['address'] == '::1' and not address['address'] == 'fe80::1%lo0':
|
|
|
|
ips['all_ipv6_addresses'].append(address['address'])
|
|
|
|
current_if['ipv6'].append(address)
|
|
|
|
|
|
|
|
def parse_unknown_line(self, words, current_if, ips):
|
|
|
|
# we are going to ignore unknown lines here - this may be
|
|
|
|
# a bad idea - but you can override it in your subclass
|
|
|
|
pass
|
|
|
|
|
2013-02-13 18:37:17 +01:00
|
|
|
def get_options(self, option_string):
|
|
|
|
start = option_string.find('<') + 1
|
2013-02-28 10:53:51 +01:00
|
|
|
end = option_string.rfind('>')
|
2013-02-13 18:37:17 +01:00
|
|
|
if (start > 0) and (end > 0) and (end > start + 1):
|
|
|
|
option_csv = option_string[start:end]
|
|
|
|
return option_csv.split(',')
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
|
|
|
|
def merge_default_interface(self, defaults, interfaces, ip_type):
|
|
|
|
if not 'interface' in defaults.keys():
|
|
|
|
return
|
|
|
|
ifinfo = interfaces[defaults['interface']]
|
|
|
|
# copy all the interface values across except addresses
|
|
|
|
for item in ifinfo.keys():
|
|
|
|
if item != 'ipv4' and item != 'ipv6':
|
|
|
|
defaults[item] = ifinfo[item]
|
|
|
|
if len(ifinfo[ip_type]) > 0:
|
|
|
|
for item in ifinfo[ip_type][0].keys():
|
|
|
|
defaults[item] = ifinfo[ip_type][0][item]
|
|
|
|
|
2013-02-28 10:07:54 +01:00
|
|
|
class DarwinNetwork(GenericBsdIfconfigNetwork, Network):
|
2013-02-13 18:37:17 +01:00
|
|
|
"""
|
|
|
|
This is the Mac OS X/Darwin Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged
|
|
|
|
"""
|
|
|
|
platform = 'Darwin'
|
|
|
|
|
2013-02-28 10:53:51 +01:00
|
|
|
# media line is different to the default FreeBSD one
|
|
|
|
def parse_media_line(self, words, current_if, ips):
|
|
|
|
# not sure if this is useful - we also drop information
|
|
|
|
current_if['media'] = 'Unknown' # Mac does not give us this
|
|
|
|
current_if['media_select'] = words[1]
|
|
|
|
if len(words) > 2:
|
|
|
|
current_if['media_type'] = words[2][1:]
|
|
|
|
if len(words) > 3:
|
|
|
|
current_if['media_options'] = self.get_options(words[3])
|
|
|
|
|
|
|
|
|
2013-02-28 10:07:54 +01:00
|
|
|
class FreeBSDNetwork(GenericBsdIfconfigNetwork, Network):
|
2013-02-13 18:37:17 +01:00
|
|
|
"""
|
|
|
|
This is the FreeBSD Network Class.
|
|
|
|
It uses the GenericBsdIfconfigNetwork unchanged
|
|
|
|
"""
|
|
|
|
platform = 'FreeBSD'
|
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
class Virtual(Facts):
|
|
|
|
"""
|
|
|
|
This is a generic Virtual subclass of Facts. This should be further
|
|
|
|
subclassed to implement per platform. If you subclass this,
|
|
|
|
you should define:
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
2012-09-17 05:58:16 +02:00
|
|
|
- container (e.g. solaris zones, freebsd jails, linux containers)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
All subclasses MUST define platform.
|
|
|
|
"""
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __new__(cls, *arguments, **keyword):
|
2012-07-18 23:46:28 +02:00
|
|
|
subclass = cls
|
|
|
|
for sc in Virtual.__subclasses__():
|
|
|
|
if sc.platform == platform.system():
|
|
|
|
subclass = sc
|
|
|
|
return super(cls, subclass).__new__(subclass, *arguments, **keyword)
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Facts.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
2012-07-19 01:22:43 +02:00
|
|
|
return self.facts
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
class LinuxVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a Linux-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
"""
|
|
|
|
platform = 'Linux'
|
|
|
|
|
2012-07-19 01:22:43 +02:00
|
|
|
def __init__(self):
|
|
|
|
Virtual.__init__(self)
|
2012-07-18 23:46:28 +02:00
|
|
|
|
|
|
|
def populate(self):
|
2012-07-19 01:22:43 +02:00
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
2012-11-22 02:45:47 +01:00
|
|
|
# For more information, check: http://people.redhat.com/~rjones/virt-what/
|
2012-07-19 01:22:43 +02:00
|
|
|
def get_virtual_facts(self):
|
2012-07-18 23:46:28 +02:00
|
|
|
if os.path.exists("/proc/xen"):
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
2012-11-06 22:49:33 +01:00
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2013-01-10 18:22:01 +01:00
|
|
|
try:
|
2012-11-06 22:49:33 +01:00
|
|
|
for line in open('/proc/xen/capabilities'):
|
|
|
|
if "control_d" in line:
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
2013-01-10 18:22:01 +01:00
|
|
|
except IOError:
|
|
|
|
pass
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
2013-02-15 16:17:58 +01:00
|
|
|
|
2012-11-22 02:45:47 +01:00
|
|
|
if os.path.exists('/proc/vz'):
|
2012-08-11 14:33:43 +02:00
|
|
|
self.facts['virtualization_type'] = 'openvz'
|
2012-11-22 02:45:47 +01:00
|
|
|
if os.path.exists('/proc/bc'):
|
2012-07-18 23:46:28 +02:00
|
|
|
self.facts['virtualization_role'] = 'host'
|
2012-08-11 14:33:43 +02:00
|
|
|
else:
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
product_name = get_file_content('/sys/devices/virtual/dmi/id/product_name')
|
2012-08-11 14:33:43 +02:00
|
|
|
|
2012-11-22 02:45:47 +01:00
|
|
|
if product_name in ['KVM', 'Bochs']:
|
2012-08-11 14:33:43 +02:00
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
2012-08-11 14:33:43 +02:00
|
|
|
|
2012-11-22 02:45:47 +01:00
|
|
|
if product_name == 'VMware Virtual Platform':
|
2012-08-11 14:33:43 +02:00
|
|
|
self.facts['virtualization_type'] = 'VMware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
bios_vendor = get_file_content('/sys/devices/virtual/dmi/id/bios_vendor')
|
|
|
|
|
|
|
|
if bios_vendor == 'Xen':
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if bios_vendor == 'innotek GmbH':
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
sys_vendor = get_file_content('/sys/devices/virtual/dmi/id/sys_vendor')
|
2012-08-11 14:33:43 +02:00
|
|
|
|
2012-11-22 02:45:47 +01:00
|
|
|
# FIXME: This does also match hyperv
|
|
|
|
if sys_vendor == 'Microsoft Corporation':
|
2012-08-11 14:33:43 +02:00
|
|
|
self.facts['virtualization_type'] = 'VirtualPC'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
if sys_vendor == 'Parallels Software International Inc.':
|
|
|
|
self.facts['virtualization_type'] = 'parallels'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
2012-08-11 14:33:43 +02:00
|
|
|
|
2013-02-13 02:24:19 +01:00
|
|
|
if os.path.exists('/proc/self/status'):
|
|
|
|
for line in open('/proc/self/status').readlines():
|
|
|
|
if re.match('^VxID: \d+', line):
|
|
|
|
self.facts['virtualization_type'] = 'linux_vserver'
|
|
|
|
if re.match('^VxID: 0', line):
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
|
|
|
else:
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
return
|
|
|
|
|
|
|
|
if os.path.exists('/proc/cpuinfo'):
|
|
|
|
for line in open('/proc/cpuinfo').readlines():
|
|
|
|
if re.match('^model name.*QEMU Virtual CPU', line):
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
elif re.match('^vendor_id.*User Mode Linux', line):
|
|
|
|
self.facts['virtualization_type'] = 'uml'
|
|
|
|
elif re.match('^model name.*UML', line):
|
|
|
|
self.facts['virtualization_type'] = 'uml'
|
|
|
|
elif re.match('^vendor_id.*PowerVM Lx86', line):
|
|
|
|
self.facts['virtualization_type'] = 'powervm_lx86'
|
|
|
|
elif re.match('^vendor_id.*IBM/S390', line):
|
|
|
|
self.facts['virtualization_type'] = 'ibm_systemz'
|
2012-11-22 02:45:47 +01:00
|
|
|
else:
|
2013-02-13 02:24:19 +01:00
|
|
|
continue
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
# Beware that we can have both kvm and virtualbox running on a single system
|
|
|
|
if os.path.exists("/proc/modules"):
|
2012-07-18 23:46:28 +02:00
|
|
|
modules = []
|
|
|
|
for line in open("/proc/modules").readlines():
|
|
|
|
data = line.split(" ", 1)
|
|
|
|
modules.append(data[0])
|
2012-11-22 02:45:47 +01:00
|
|
|
|
2012-07-18 23:46:28 +02:00
|
|
|
if 'kvm' in modules:
|
|
|
|
self.facts['virtualization_type'] = 'kvm'
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
|
|
|
|
|
|
|
if 'vboxdrv' in modules:
|
2012-07-18 23:46:28 +02:00
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'host'
|
2012-11-22 02:45:47 +01:00
|
|
|
return
|
2012-04-18 01:59:23 +02:00
|
|
|
|
2012-09-17 05:58:16 +02:00
|
|
|
class SunOSVirtual(Virtual):
|
|
|
|
"""
|
|
|
|
This is a SunOS-specific subclass of Virtual. It defines
|
|
|
|
- virtualization_type
|
|
|
|
- virtualization_role
|
|
|
|
- container
|
|
|
|
"""
|
|
|
|
platform = 'SunOS'
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
Virtual.__init__(self)
|
|
|
|
|
|
|
|
def populate(self):
|
|
|
|
self.get_virtual_facts()
|
|
|
|
return self.facts
|
|
|
|
|
|
|
|
def get_virtual_facts(self):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/sbin/prtdiag")
|
2012-09-17 05:58:16 +02:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'VMware' in line:
|
|
|
|
self.facts['virtualization_type'] = 'vmware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'Parallels' in line:
|
|
|
|
self.facts['virtualization_type'] = 'parallels'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'VirtualBox' in line:
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'HVM domU' in line:
|
|
|
|
self.facts['virtualization_type'] = 'xen'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
# Check if it's a zone
|
|
|
|
if os.path.exists("/usr/bin/zonename"):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/bin/zonename")
|
2012-09-17 05:58:16 +02:00
|
|
|
if out.rstrip() != "global":
|
|
|
|
self.facts['container'] = 'zone'
|
|
|
|
# Check if it's a branded zone (i.e. Solaris 8/9 zone)
|
|
|
|
if os.path.isdir('/.SUNWnative'):
|
|
|
|
self.facts['container'] = 'zone'
|
|
|
|
# If it's a zone check if we can detect if our global zone is itself virtualized.
|
|
|
|
# Relies on the "guest tools" (e.g. vmware tools) to be installed
|
|
|
|
if 'container' in self.facts and self.facts['container'] == 'zone':
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/sbin/modinfo")
|
2012-09-17 05:58:16 +02:00
|
|
|
for line in out.split('\n'):
|
|
|
|
if 'VMware' in line:
|
|
|
|
self.facts['virtualization_type'] = 'vmware'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
if 'VirtualBox' in line:
|
|
|
|
self.facts['virtualization_type'] = 'virtualbox'
|
|
|
|
self.facts['virtualization_role'] = 'guest'
|
|
|
|
|
2013-01-02 05:52:27 +01:00
|
|
|
def get_file_content(path, default=None):
|
|
|
|
data = default
|
2012-07-19 01:41:14 +02:00
|
|
|
if os.path.exists(path) and os.access(path, os.R_OK):
|
|
|
|
data = open(path).read().strip()
|
|
|
|
if len(data) == 0:
|
2013-01-02 05:52:27 +01:00
|
|
|
data = default
|
2012-07-19 01:41:14 +02:00
|
|
|
return data
|
|
|
|
|
2012-04-18 01:59:23 +02:00
|
|
|
def ansible_facts():
|
|
|
|
facts = {}
|
2012-07-19 01:22:43 +02:00
|
|
|
facts.update(Facts().populate())
|
|
|
|
facts.update(Hardware().populate())
|
|
|
|
facts.update(Network().populate())
|
|
|
|
facts.update(Virtual().populate())
|
|
|
|
return facts
|
2012-04-18 01:59:23 +02:00
|
|
|
|
2012-07-09 09:52:00 +02:00
|
|
|
# ===========================================
|
2012-06-26 19:49:26 +02:00
|
|
|
|
2012-07-28 23:14:23 +02:00
|
|
|
def run_setup(module):
|
|
|
|
|
|
|
|
setup_options = {}
|
|
|
|
facts = ansible_facts()
|
2012-08-07 02:07:02 +02:00
|
|
|
|
2012-07-28 23:14:23 +02:00
|
|
|
for (k, v) in facts.items():
|
2012-08-29 01:02:19 +02:00
|
|
|
setup_options["ansible_%s" % k.replace('-', '_')] = v
|
2012-07-28 23:14:23 +02:00
|
|
|
|
|
|
|
# if facter is installed, and we can use --json because
|
|
|
|
# ruby-json is ALSO installed, include facter data in the JSON
|
|
|
|
|
|
|
|
if os.path.exists("/usr/bin/facter"):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/bin/facter --json")
|
2012-08-11 18:35:58 +02:00
|
|
|
facter = True
|
|
|
|
try:
|
|
|
|
facter_ds = json.loads(out)
|
|
|
|
except:
|
|
|
|
facter = False
|
|
|
|
if facter:
|
|
|
|
for (k,v) in facter_ds.items():
|
|
|
|
setup_options["facter_%s" % k] = v
|
2012-07-28 23:14:23 +02:00
|
|
|
|
|
|
|
# ditto for ohai, but just top level string keys
|
|
|
|
# because it contains a lot of nested stuff we can't use for
|
|
|
|
# templating w/o making a nicer key for it (TODO)
|
|
|
|
|
|
|
|
if os.path.exists("/usr/bin/ohai"):
|
Update modules to use run_command in module_common.py
This updates apt, apt_repository, command, cron, easy_install, facter,
fireball, git, group, mount, ohai, pip, service, setup, subversion,
supervisorctl, svr4pkg, user, and yum to take advantage of run_command
in module_common.py.
2013-01-12 07:10:21 +01:00
|
|
|
rc, out, err = module.run_command("/usr/bin/ohai")
|
2012-10-31 01:42:07 +01:00
|
|
|
ohai = True
|
|
|
|
try:
|
|
|
|
ohai_ds = json.loads(out)
|
|
|
|
except:
|
|
|
|
ohai = False
|
|
|
|
if ohai:
|
|
|
|
for (k,v) in ohai_ds.items():
|
|
|
|
if type(v) == str or type(v) == unicode:
|
|
|
|
k2 = "ohai_%s" % k.replace('-', '_')
|
|
|
|
setup_options[k2] = v
|
2012-07-28 23:14:23 +02:00
|
|
|
|
2013-02-21 01:10:25 +01:00
|
|
|
setup_result = { 'ansible_facts': {} }
|
2012-07-28 23:14:23 +02:00
|
|
|
|
2013-02-21 01:10:25 +01:00
|
|
|
for (k,v) in setup_options.items():
|
2013-02-22 17:41:18 +01:00
|
|
|
if module.params['filter'] == '*' or fnmatch.fnmatch(k, module.params['filter']):
|
2013-02-21 01:10:25 +01:00
|
|
|
setup_result['ansible_facts'][k] = v
|
2012-07-28 23:14:23 +02:00
|
|
|
|
|
|
|
# hack to keep --verbose from showing all the setup module results
|
|
|
|
setup_result['verbose_override'] = True
|
|
|
|
|
|
|
|
return setup_result
|
|
|
|
|
|
|
|
def main():
|
2012-08-30 05:26:22 +02:00
|
|
|
global module
|
2012-07-28 23:14:23 +02:00
|
|
|
module = AnsibleModule(
|
2013-02-20 17:53:14 +01:00
|
|
|
argument_spec = dict(
|
|
|
|
filter=dict(default="*", required=False),
|
|
|
|
),
|
2013-02-04 01:46:25 +01:00
|
|
|
supports_check_mode = True,
|
2012-07-28 23:14:23 +02:00
|
|
|
)
|
|
|
|
data = run_setup(module)
|
|
|
|
module.exit_json(**data)
|
|
|
|
|
|
|
|
# this is magic, see lib/ansible/module_common.py
|
|
|
|
#<<INCLUDE_ANSIBLE_MODULE_COMMON>>
|
|
|
|
main()
|