pep8 fixes for contrib (#24344)

This commit is contained in:
Matt Martz 2017-05-09 16:38:08 -05:00 committed by GitHub
parent c7ae6b9fd5
commit d3249e7875
37 changed files with 326 additions and 380 deletions

View file

@ -53,64 +53,68 @@ except ImportError:
from ansible.module_utils.urls import open_url
def api_get(link, config):
try:
if link is None:
url = config.get('api','uri') + config.get('api','login_path')
headers = {"Accept": config.get('api','login_type')}
url = config.get('api', 'uri') + config.get('api', 'login_path')
headers = {"Accept": config.get('api', 'login_type')}
else:
url = link['href'] + '?limit=0'
headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''),
url_password=config.get('auth','apipass').replace('\n', ''))
result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
url_password=config.get('auth', 'apipass').replace('\n', ''))
return json.loads(result.read())
except:
return None
def save_cache(data, config):
''' saves item to cache '''
dpath = config.get('cache','cache_dir')
dpath = config.get('cache', 'cache_dir')
try:
cache = open('/'.join([dpath,'inventory']), 'w')
cache = open('/'.join([dpath, 'inventory']), 'w')
cache.write(json.dumps(data))
cache.close()
except IOError as e:
pass # not really sure what to do here
pass # not really sure what to do here
def get_cache(cache_item, config):
''' returns cached item '''
dpath = config.get('cache','cache_dir')
dpath = config.get('cache', 'cache_dir')
inv = {}
try:
cache = open('/'.join([dpath,'inventory']), 'r')
cache = open('/'.join([dpath, 'inventory']), 'r')
inv = cache.read()
cache.close()
except IOError as e:
pass # not really sure what to do here
pass # not really sure what to do here
return inv
def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache','cache_dir'):
dpath = config.get('cache','cache_dir')
if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache', 'cache_dir')
try:
existing = os.stat( '/'.join([dpath,'inventory']))
existing = os.stat('/'.join([dpath, 'inventory']))
except:
# cache doesn't exist or isn't accessible
return False
if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age')
if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)):
if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True
return False
def generate_inv_from_api(enterprise_entity,config):
def generate_inv_from_api(enterprise_entity, config):
try:
inventory['all'] = {}
inventory['all']['children'] = []
@ -118,22 +122,22 @@ def generate_inv_from_api(enterprise_entity,config):
inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity,config)
vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines'))
vms = api_get(vms_entity,config)
enterprise = api_get(enterprise_entity, config)
vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
vms = api_get(vms_entity, config)
for vmcollection in vms['collection']:
for link in vmcollection['links']:
if link['rel'] == 'virtualappliance':
vm_vapp = link['title'].replace('[','').replace(']','').replace(' ','_')
vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualdatacenter':
vm_vdc = link['title'].replace('[','').replace(']','').replace(' ','_')
vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualmachinetemplate':
vm_template = link['title'].replace('[','').replace(']','').replace(' ','_')
vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
# From abiquo.ini: Only adding to inventory VMs with public IP
if config.getboolean('defaults', 'public_ip_only') is True:
for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'):
if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
vm_nic = link['title']
break
else:
@ -166,10 +170,10 @@ def generate_inv_from_api(enterprise_entity,config):
inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') is True:
meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata'))
meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
try:
metadata = api_get(meta_entity,config)
if (config.getfloat("api","version") >= 3.0):
metadata = api_get(meta_entity, config)
if (config.getfloat("api", "version") >= 3.0):
vm_metadata = metadata['metadata']
else:
vm_metadata = metadata['metadata']['metadata']
@ -187,7 +191,8 @@ def generate_inv_from_api(enterprise_entity,config):
return inventory
except Exception as e:
# Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } }
return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api '''
@ -197,11 +202,12 @@ def get_inventory(enterprise, config):
else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise,config)
inv = generate_inv_from_api(enterprise, config)
save_cache(inv, config)
return json.dumps(inv)
if __name__ == '__main__':
inventory = {}
enterprise = {}
@ -214,8 +220,8 @@ if __name__ == '__main__':
break
try:
login = api_get(None,config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise'))
login = api_get(None, config)
enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
except Exception as e:
enterprise = None

View file

@ -82,7 +82,6 @@ class LibcloudInventory(object):
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@ -95,7 +94,6 @@ class LibcloudInventory(object):
return False
def read_settings(self):
''' Reads the settings from the libcloud.ini file '''
@ -108,17 +106,17 @@ class LibcloudInventory(object):
raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'):
self.provider = config.get('driver','provider')
self.provider = config.get('driver', 'provider')
else:
raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'):
self.key = config.get('driver','key')
self.key = config.get('driver', 'key')
else:
raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'):
self.secret = config.get('driver','secret')
self.secret = config.get('driver', 'secret')
else:
raise ValueError('libcloud.ini does not have a secret defined')
@ -146,7 +144,6 @@ class LibcloudInventory(object):
self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self):
'''
Command line argument processing
@ -154,14 +151,13 @@ class LibcloudInventory(object):
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on libcloud supported providers')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
'''
Do API calls to a location, and save data in cache files
@ -172,7 +168,6 @@ class LibcloudInventory(object):
self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self):
'''
Gets the list of all nodes
@ -181,7 +176,6 @@ class LibcloudInventory(object):
for node in self.conn.list_nodes():
self.add_node(node)
def get_node(self, node_id):
'''
Gets details about a specific node
@ -189,7 +183,6 @@ class LibcloudInventory(object):
return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node):
'''
Adds a node to the inventory and index, as long as it is
@ -244,10 +237,10 @@ class LibcloudInventory(object):
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# host migh not exist anymore
return self.json_format_dict({}, True)
@ -283,13 +276,12 @@ class LibcloudInventory(object):
else:
pass
# TODO Product codes if someone finds them useful
#print(key)
#print(type(value))
#print(value)
# print(key)
# print(type(value))
# print(value)
return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element):
'''
Pushed an element onto an array that may not have been defined in
@ -301,7 +293,6 @@ class LibcloudInventory(object):
else:
my_dict[key] = [element]
def get_inventory_from_cache(self):
'''
Reads the inventory from the cache file and returns it as a JSON
@ -312,7 +303,6 @@ class LibcloudInventory(object):
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
'''
Reads the index from the cache file sets self.index
@ -322,7 +312,6 @@ class LibcloudInventory(object):
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
'''
Writes data in JSON format to a file
@ -333,7 +322,6 @@ class LibcloudInventory(object):
cache.write(json_data)
cache.close()
def to_safe(self, word):
'''
Converts 'bad' characters in a string to underscores so they can be
@ -342,7 +330,6 @@ class LibcloudInventory(object):
return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False):
'''
Converts a dict to a JSON object and dumps it as a formatted
@ -354,6 +341,7 @@ class LibcloudInventory(object):
else:
return json.dumps(data)
def main():
LibcloudInventory()

View file

@ -278,10 +278,12 @@ Expected output format in Device mode
}
"""
def fail(msg):
sys.stderr.write("%s\n" % msg)
sys.exit(1)
class AosInventory(object):
def __init__(self):
@ -303,10 +305,10 @@ class AosInventory(object):
# ----------------------------------------------------
# Open session to AOS
# ----------------------------------------------------
aos = Session( server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos = Session(server=self.aos_server,
port=self.aos_server_port,
user=self.aos_username,
passwd=self.aos_password)
aos.login()
@ -314,10 +316,10 @@ class AosInventory(object):
self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos' )
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server )
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password )
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username )
self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ----------------------------------------------------
# Build the inventory
@ -336,7 +338,7 @@ class AosInventory(object):
for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name)
device = aos.Devices.find( uid=dev_id)
device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device)
@ -344,7 +346,7 @@ class AosInventory(object):
# Define admin State and Status
if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'] )
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device)
@ -496,7 +498,6 @@ class AosInventory(object):
except:
pass
def parse_cli_args(self):
""" Command line argument processing """
@ -554,7 +555,7 @@ class AosInventory(object):
'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name,'id', device.id)
self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items():

View file

@ -281,7 +281,7 @@ class AzureRM(object):
def log(self, msg):
if self.debug:
print (msg + u'\n')
print(msg + u'\n')
def fail(self, msg):
raise Exception(msg)
@ -440,7 +440,7 @@ class AzureInventory(object):
self.include_powerstate = False
self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty))
print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0)
def _parse_cli_args(self):
@ -448,13 +448,13 @@ class AzureInventory(object):
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file for an Azure subscription')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
help='Pretty print JSON output(default: False)')
parser.add_argument('--profile', action='store',
help='Azure profile contained in ~/.azure/credentials')
parser.add_argument('--subscription_id', action='store',
@ -487,7 +487,7 @@ class AzureInventory(object):
virtual_machines = self._compute_client.virtual_machines.list(resource_group)
except Exception as exc:
sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group,
str(exc)))
str(exc)))
if self._args.host or self.tags:
selected_machines = self._selected_machines(virtual_machines)
self._load_machines(selected_machines)
@ -510,7 +510,7 @@ class AzureInventory(object):
for machine in machines:
id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower()

View file

@ -110,9 +110,9 @@ class CobblerInventory(object):
if self.args.host:
data_to_print += self.get_host_info()
else:
self.inventory['_meta'] = { 'hostvars': {} }
self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] }
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print)
@ -179,7 +179,7 @@ class CobblerInventory(object):
for host in data:
# Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] #None
dns_name = host['hostname'] # None
ksmeta = None
interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts
@ -229,11 +229,11 @@ class CobblerInventory(object):
# Need to load index from cache
self.load_cache_from_cache()
if not self.args.host in self.cache:
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if not self.args.host in self.cache:
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, True)

View file

@ -85,6 +85,7 @@ from six import iteritems
from ansible.module_utils.urls import open_url
class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([
@ -117,7 +118,7 @@ class CollinsInventory(object):
self.parse_cli_args()
logging.basicConfig(format=CollinsDefaults.LOG_FORMAT,
filename=self.log_location)
filename=self.log_location)
self.log = logging.getLogger('CollinsInventory')
def _asset_get_attribute(self, asset, attrib):
@ -168,14 +169,13 @@ class CollinsInventory(object):
print(data_to_print)
return successful
def find_assets(self, attributes = {}, operation = 'AND'):
def find_assets(self, attributes={}, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """
# Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html
attributes_query = [ '='.join(attr_pair)
for attr_pair in iteritems(attributes) ]
attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
query_parameters = {
'details': ['True'],
'operation': [operation],
@ -190,8 +190,7 @@ class CollinsInventory(object):
# Locates all assets matching the provided query, exhausting pagination.
while True:
if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % \
self.collins_max_retries)
raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
query_parameters['page'] = cur_page
query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
@ -199,10 +198,10 @@ class CollinsInventory(object):
)
try:
response = open_url(query_url,
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
timeout=self.collins_timeout_secs,
url_username=self.collins_username,
url_password=self.collins_password,
force_basic_auth=True)
json_response = json.loads(response.read())
# Adds any assets found to the array of assets.
assets += json_response['data']['Data']
@ -212,8 +211,7 @@ class CollinsInventory(object):
cur_page += 1
num_retries = 0
except:
self.log.error("Error while communicating with Collins, retrying:\n%s",
traceback.format_exc())
self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc())
num_retries += 1
return assets
@ -232,19 +230,15 @@ class CollinsInventory(object):
def read_settings(self):
""" Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG',
os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME',
config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD',
config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE',
config.get('collins', 'asset_type'))
self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries')
@ -268,16 +262,12 @@ class CollinsInventory(object):
parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list',
action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host',
action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache',
action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins ' \
'(default: False - use cache files)')
parser.add_argument('--pretty',
action='store_true', default=False, help='Pretty print all JSON output')
parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins '
'(default: False - use cache files)')
parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args()
def update_cache(self):
@ -290,8 +280,7 @@ class CollinsInventory(object):
try:
server_assets = self.find_assets()
except:
self.log.error("Error while locating assets from Collins:\n%s",
traceback.format_exc())
self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc())
return False
for asset in server_assets:
@ -315,8 +304,7 @@ class CollinsInventory(object):
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping",
asset)
self.log.warning("No IP addresses found for asset '%s', skipping" % asset)
continue
elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning(
@ -384,11 +372,11 @@ class CollinsInventory(object):
# Need to load index from cache
self.load_cache_from_cache()
if not self.args.host in self.cache:
if self.args.host not in self.cache:
# try updating the cache
self.update_cache()
if not self.args.host in self.cache:
if self.args.host not in self.cache:
# host might not exist anymore
return self.json_format_dict({}, self.args.pretty)
@ -404,7 +392,7 @@ class CollinsInventory(object):
return True
except:
self.log.error("Error while loading inventory:\n%s",
traceback.format_exc())
traceback.format_exc())
self.inventory = {}
return False
@ -418,7 +406,7 @@ class CollinsInventory(object):
return True
except:
self.log.error("Error while loading host cache:\n%s",
traceback.format_exc())
traceback.format_exc())
self.cache = {}
return False

View file

@ -239,7 +239,6 @@ class ConsulInventory(object):
self.current_dc = datacenter
self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be
@ -257,8 +256,7 @@ class ConsulInventory(object):
suffix = self.config.get_availability_suffix(
'unavailable_suffix', '_unavailable')
self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node'])
service_name + suffix, node['Node'])
def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter'''
@ -295,7 +293,7 @@ class ConsulInventory(object):
if metadata and metadata['Value']:
try:
metadata = json.loads(metadata['Value'])
for k,v in metadata.items():
for k, v in metadata.items():
self.add_metadata(node_data, k, v)
except:
pass
@ -337,19 +335,19 @@ class ConsulInventory(object):
tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']:
tagname = service_name +'_'+tag
tagname = service_name + '_' + tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": { "hostvars" : self.node_metadata}}
self.inventory = {"_meta": {"hostvars": self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings:
for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list = False):
def add_metadata(self, node_data, key, value, is_list=False):
''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist '''
key = self.to_safe(key)
@ -371,16 +369,15 @@ class ConsulInventory(object):
if domain:
node_name = node_data['Node']
if self.current_dc:
return '%s.node.%s.%s' % ( node_name, self.current_dc, domain)
return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
else:
return '%s.node.%s' % ( node_name, domain)
return '%s.node.%s' % (node_name, domain)
else:
return node_data['Address']
def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the
dict '''
@ -439,16 +436,15 @@ class ConsulConfig(dict):
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description=
'Produce an Ansible Inventory file based nodes in a Consul cluster')
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster')
help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node, \
requires datacenter set in consul.ini.')
help='Get all inventory variables about a specific consul node,'
'requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter')
help='Get all inventory about a specific consul datacenter')
args = parser.parse_args()
arg_names = ['host', 'datacenter']
@ -462,11 +458,10 @@ class ConsulConfig(dict):
return self.has_config(suffix)
return default
def get_consul_api(self):
'''get an instance of the api based on the supplied configuration'''
host = 'localhost'
port = 8500
port = 8500
token = None
scheme = 'http'

View file

@ -152,7 +152,6 @@ except ImportError as e:
sys.exit("failed=True msg='`dopy` library required for this script'")
class DigitalOceanInventory(object):
###########################################################################
@ -163,8 +162,8 @@ class DigitalOceanInventory(object):
''' Main execution path '''
# DigitalOceanInventory data
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
self.data = {} # All DigitalOcean data
self.inventory = {} # Ansible Inventory
# Define defaults
self.cache_path = '.'
@ -240,7 +239,6 @@ or environment variables (DO_API_TOKEN)\n''')
print(json.dumps(json_data))
# That's all she wrote...
###########################################################################
# Script configuration
###########################################################################
@ -276,7 +274,6 @@ or environment variables (DO_API_TOKEN)\n''')
if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
@ -285,23 +282,23 @@ or environment variables (DO_API_TOKEN)\n''')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON')
parser.add_argument('--domains', action='store_true', help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results')
parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False,
parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token')
parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args()
@ -315,7 +312,6 @@ or environment variables (DO_API_TOKEN)\n''')
not self.args.all and not self.args.host):
self.args.list = True
###########################################################################
# Data Management
###########################################################################
@ -325,10 +321,10 @@ or environment variables (DO_API_TOKEN)\n''')
if self.args.force_cache:
return
# We always get fresh droplets
if self.is_cache_valid() and not (resource=='droplets' or resource is None):
if self.is_cache_valid() and not (resource == 'droplets' or resource is None):
return
if self.args.refresh_cache:
resource=None
resource = None
if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets()
@ -349,24 +345,23 @@ or environment variables (DO_API_TOKEN)\n''')
self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True
def build_inventory(self):
'''Build Ansible inventory of droplets'''
self.inventory = {
'all': {
'hosts': [],
'vars': self.group_variables
},
},
'_meta': {'hostvars': {}}
}
}
# add all droplets by id and name
for droplet in self.data['droplets']:
#when using private_networking, the API reports the private one in "ip_address".
# when using private_networking, the API reports the private one in "ip_address".
if 'private_networking' in droplet['features'] and not self.use_private_network:
for net in droplet['networks']['v4']:
if net['type']=='public':
dest=net['ip_address']
if net['type'] == 'public':
dest = net['ip_address']
else:
continue
else:
@ -384,7 +379,7 @@ or environment variables (DO_API_TOKEN)\n''')
'distro_' + self.to_safe(droplet['image']['distribution']),
'status_' + droplet['status']):
if group not in self.inventory:
self.inventory[group] = { 'hosts': [ ], 'vars': {} }
self.inventory[group] = {'hosts': [], 'vars': {}}
self.inventory[group]['hosts'].append(dest)
# groups that are not always present
@ -393,20 +388,19 @@ or environment variables (DO_API_TOKEN)\n''')
if group:
image = 'image_' + self.to_safe(group)
if image not in self.inventory:
self.inventory[image] = { 'hosts': [ ], 'vars': {} }
self.inventory[image] = {'hosts': [], 'vars': {}}
self.inventory[image]['hosts'].append(dest)
if droplet['tags']:
for tag in droplet['tags']:
if tag not in self.inventory:
self.inventory[tag] = { 'hosts': [ ], 'vars': {} }
self.inventory[tag] = {'hosts': [], 'vars': {}}
self.inventory[tag]['hosts'].append(dest)
# hostvars
info = self.do_namespace(droplet)
self.inventory['_meta']['hostvars'][dest] = info
def load_droplet_variables_for_host(self):
'''Generate a JSON response to a --host call'''
host = int(self.args.host)
@ -414,8 +408,6 @@ or environment variables (DO_API_TOKEN)\n''')
info = self.do_namespace(droplet)
return {'droplet': info}
###########################################################################
# Cache Management
###########################################################################
@ -429,7 +421,6 @@ or environment variables (DO_API_TOKEN)\n''')
return True
return False
def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
try:
@ -443,17 +434,15 @@ or environment variables (DO_API_TOKEN)\n''')
self.data = data['data']
self.inventory = data['inventory']
def write_to_cache(self):
''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'inventory': self.inventory }
data = {'data': self.data, 'inventory': self.inventory}
json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w')
cache.write(json_data)
cache.close()
###########################################################################
# Utilities
###########################################################################
@ -465,7 +454,6 @@ or environment variables (DO_API_TOKEN)\n''')
else:
my_dict[key] = [element]
def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word)
@ -474,11 +462,10 @@ or environment variables (DO_API_TOKEN)\n''')
''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace '''
info = {}
for k, v in data.items():
info['do_'+k] = v
info['do_' + k] = v
return info
###########################################################################
# Run the script
DigitalOceanInventory()

View file

@ -818,25 +818,25 @@ class DockerInventory(object):
parser = argparse.ArgumentParser(
description='Return Ansible inventory for one or more Docker hosts.')
parser.add_argument('--list', action='store_true', default=True,
help='List all containers (default: True)')
help='List all containers (default: True)')
parser.add_argument('--debug', action='store_true', default=False,
help='Send debug messages to STDOUT')
help='Send debug messages to STDOUT')
parser.add_argument('--host', action='store',
help='Only get information for a specific container.')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty print JSON output(default: False)')
help='Pretty print JSON output(default: False)')
parser.add_argument('--config-file', action='store', default=default_config,
help="Name of the config file to use. Default is %s" % (default_config))
parser.add_argument('--docker-host', action='store', default=None,
help="The base url or Unix sock path to connect to the docker daemon. Defaults to %s"
% (DEFAULT_DOCKER_HOST))
% (DEFAULT_DOCKER_HOST))
parser.add_argument('--tls-hostname', action='store', default='localhost',
help="Host name to expect in TLS certs. Defaults to 'localhost'")
parser.add_argument('--api-version', action='store', default=None,
help="Docker daemon API version. Defaults to %s" % (DEFAULT_DOCKER_API_VERSION))
parser.add_argument('--timeout', action='store', default=None,
help="Docker connection timeout in seconds. Defaults to %s"
% (DEFAULT_TIMEOUT_SECONDS))
% (DEFAULT_TIMEOUT_SECONDS))
parser.add_argument('--cacert-path', action='store', default=None,
help="Path to the TLS certificate authority pem file.")
parser.add_argument('--cert-path', action='store', default=None,

View file

@ -156,7 +156,7 @@ except ImportError:
class Ec2Inventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
@ -205,7 +205,6 @@ class Ec2Inventory(object):
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@ -218,7 +217,6 @@ class Ec2Inventory(object):
return False
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
@ -226,8 +224,9 @@ class Ec2Inventory(object):
scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '')
defaults = {'ec2': {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
defaults = {
'ec2': {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
}
}
@ -264,7 +263,7 @@ class Ec2Inventory(object):
env_region = os.environ.get('AWS_REGION')
if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [ env_region ]
self.regions = [env_region]
# Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable')
@ -502,16 +501,15 @@ class Ec2Inventory(object):
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
help='Get all the variables about a specific instance')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile',
help='Use boto profile for connections to EC2')
help='Use boto profile for connections to EC2')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
@ -580,10 +578,10 @@ class Ec2Inventory(object):
filters_dict = {}
for filter_key, filter_values in self.ec2_instance_filters.items():
filters_dict[filter_key] = filter_values
reservations.extend(conn.get_all_instances(filters = filters_dict))
reservations.extend(conn.get_all_instances(filters=filters_dict))
else:
for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
reservations.extend(conn.get_all_instances(filters={filter_key: filter_values}))
else:
reservations = conn.get_all_instances()
@ -597,7 +595,7 @@ class Ec2Inventory(object):
max_filter_value = 199
tags = []
for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]}))
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict)
for tag in tags:
@ -829,7 +827,7 @@ class Ec2Inventory(object):
# Select the best destination address
if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ])
dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags])
elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None:
@ -943,7 +941,7 @@ class Ec2Inventory(object):
self.push_group(self.inventory, 'security_groups', key)
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by AWS account ID
if self.group_by_aws_account:
@ -990,7 +988,6 @@ class Ec2Inventory(object):
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is
addressable '''
@ -1068,8 +1065,7 @@ class Ec2Inventory(object):
except AttributeError:
self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.']))
'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine
if self.group_by_rds_engine:
@ -1338,8 +1334,7 @@ class Ec2Inventory(object):
r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1]
not in self.route53_excluded_zones ]
route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
self.route53_records = {}
@ -1356,14 +1351,13 @@ class Ec2Inventory(object):
self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ]
instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address']
name_list = set()
@ -1419,13 +1413,13 @@ class Ec2Inventory(object):
elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {}
for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id
instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else:
pass
# TODO Product codes if someone finds them useful
#print key
#print type(value)
#print value
# print key
# print type(value)
# print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
@ -1469,9 +1463,9 @@ class Ec2Inventory(object):
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1
# Target: Redis Replication Groups
@ -1523,10 +1517,10 @@ class Ec2Inventory(object):
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)

View file

@ -35,7 +35,7 @@ except:
import simplejson as json
# Options
#------------------------------
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
@ -48,6 +48,7 @@ parser.add_option('--host', default=None, dest="host",
# helper functions
#
def get_ssh_config():
configs = []
for box in list_running_boxes():
@ -55,7 +56,8 @@ def get_ssh_config():
configs.append(config)
return configs
#list all the running instances in the fleet
# list all the running instances in the fleet
def list_running_boxes():
boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
@ -65,6 +67,7 @@ def list_running_boxes():
return boxes
def get_a_ssh_config(box_name):
config = {}
config['Host'] = box_name
@ -72,11 +75,12 @@ def get_a_ssh_config(box_name):
config['ansible_python_interpreter'] = '/opt/bin/python'
return config
# List out servers that vagrant has running
#------------------------------
# ------------------------------
if options.list:
ssh_config = get_ssh_config()
hosts = { 'coreos': []}
hosts = {'coreos': []}
for data in ssh_config:
hosts['coreos'].append(data['Host'])
@ -85,14 +89,14 @@ if options.list:
sys.exit(1)
# Get out the host details
#------------------------------
# ------------------------------
elif options.host:
result = {}
ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0:
#pass through the port, in case it's non standard.
# pass through the port, in case it's non standard.
result = details[0]
result
@ -101,7 +105,7 @@ elif options.host:
# Print out help
#------------------------------
# ------------------------------
else:
parser.print_help()
sys.exit(1)

View file

@ -46,6 +46,7 @@ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
from requests.auth import HTTPBasicAuth
def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string"""
@ -54,6 +55,7 @@ def json_format_dict(data, pretty=False):
else:
return json.dumps(data)
class ForemanInventory(object):
def __init__(self):

View file

@ -84,8 +84,8 @@ except ImportError:
# library is used.
pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v2"
USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION = "v2"
import sys
import os
@ -296,8 +296,8 @@ class GceInventory(object):
if not secrets_found:
args = [
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce', 'gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')}
@ -320,7 +320,7 @@ class GceInventory(object):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]]
@ -330,17 +330,16 @@ class GceInventory(object):
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args()
def node_to_dict(self, inst):
md = {}
@ -403,7 +402,7 @@ class GceInventory(object):
all_nodes = []
params, more_results = {'maxResults': 500}, True
while more_results:
self.driver.connection.gce_params=params
self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params
return all_nodes

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
import sys
import json

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
import sys
import json

View file

@ -113,6 +113,7 @@ load_chube_config()
# Imports for ansible
import ConfigParser
class LinodeInventory(object):
def __init__(self):
"""Main execution path."""
@ -171,11 +172,11 @@ class LinodeInventory(object):
"""Command line argument processing"""
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Linode')
parser.add_argument('--list', action='store_true', default=True,
help='List nodes (default: True)')
help='List nodes (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific node')
help='Get all the variables about a specific node')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
help='Force refresh of cache by making API requests to Linode (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
@ -245,10 +246,10 @@ class LinodeInventory(object):
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)

View file

@ -35,6 +35,7 @@ import sys
import lxc
import json
def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the
@ -51,7 +52,8 @@ def build_dict():
# Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection':'lxc'}}) for g in groups])
'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv):
"""Returns a JSON dictionary as expected by Ansible"""

View file

@ -32,6 +32,7 @@ try:
except ImportError:
import ConfigParser as configparser
class MDTInventory(object):
def __init__(self):
@ -95,7 +96,7 @@ class MDTInventory(object):
'''
Create empty inventory dictionary
'''
return {"_meta" : {"hostvars" : {}}}
return {"_meta": {"hostvars": {}}}
def read_settings(self):
'''
@ -119,7 +120,6 @@ class MDTInventory(object):
if config.has_option('tower', 'groupname'):
self.mdt_groupname = config.get('tower', 'groupname')
def parse_cli_args(self):
'''
Command line argument processing

View file

@ -47,6 +47,7 @@ except ImportError:
print("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
exit(1)
class NagiosLivestatusInventory(object):
def parse_ini_file(self):
@ -80,19 +81,19 @@ class NagiosLivestatusInventory(object):
# Local unix socket
unix_match = re.match('unix:(.*)', livestatus_uri)
if unix_match is not None:
backend_definition = { 'connection': unix_match.group(1) }
backend_definition = {'connection': unix_match.group(1)}
# Remote tcp connection
tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
if tcp_match is not None:
backend_definition = { 'connection': (tcp_match.group(1), int(tcp_match.group(2))) }
backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
# No valid livestatus_uri => exiting
if backend_definition is None:
raise Exception('livestatus_uri field is invalid (%s). Expected: unix:/path/to/live or tcp:host:port' % livestatus_uri)
# Updating backend_definition with current value
backend_definition['name'] = section
backend_definition['name'] = section
backend_definition['fields'] = fields_to_retrieve
for key, value in section_values.items():
backend_definition[key] = value
@ -101,8 +102,8 @@ class NagiosLivestatusInventory(object):
def parse_options(self):
parser = argparse.ArgumentParser()
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
parser.add_argument('--host', nargs=1)
parser.add_argument('--list', action='store_true')
parser.add_argument('--pretty', action='store_true')
self.options = parser.parse_args()
@ -113,7 +114,7 @@ class NagiosLivestatusInventory(object):
if hostname not in self.result[group]['hosts']:
self.result[group]['hosts'].append(hostname)
def query_backend(self, backend, host = None):
def query_backend(self, backend, host=None):
'''Query a livestatus backend'''
hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
@ -127,10 +128,10 @@ class NagiosLivestatusInventory(object):
hosts = hosts_request.call()
for host in hosts:
hostname = host[backend['host_field']]
hostname = host[backend['host_field']]
hostgroups = host[backend['group_field']]
if not isinstance(hostgroups, list):
hostgroups = [ hostgroups ]
hostgroups = [hostgroups]
self.add_host(hostname, 'all')
self.add_host(hostname, backend['name'])
for group in hostgroups:
@ -166,9 +167,9 @@ class NagiosLivestatusInventory(object):
self.query_backend(backend, self.options.host)
if self.options.host:
print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent = self.json_indent))
print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
elif self.options.list:
print(json.dumps(self.result, indent = self.json_indent))
print(json.dumps(self.result, indent=self.json_indent))
else:
print("usage: --list or --host HOSTNAME [--pretty]")
exit(1)

View file

@ -42,6 +42,7 @@ except ImportError:
print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
exit(1)
class NagiosNDOInventory(object):
def read_settings(self):

View file

@ -142,7 +142,7 @@ def get_metadata(server):
key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name
# TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
return results

View file

@ -150,6 +150,7 @@ from click.exceptions import UsageError
from six import string_types
def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr)

View file

@ -26,30 +26,31 @@
# Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
import sys
import json
#List openvz hosts
vzhosts = ['vzhost1','vzhost2','vzhost3']
#Add openvz hosts to the inventory and Add "_meta" trick
# List openvz hosts
vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
# Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
#default group, when description not defined
# default group, when description not defined
default_group = ['vzguest']
def get_guests():
#Loop through vzhosts
for h in vzhosts:
#SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
#Load Json info of guests
def get_guests():
# Loop through vzhosts
for h in vzhosts:
# SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
# Load Json info of guests
json_data = json.loads(pipe.stdout.read())
#loop through guests
# loop through guests
for j in json_data:
#Add information to host vars
# Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = {
'ctid': j['ctid'],
'veid': j['veid'],
@ -59,13 +60,13 @@ def get_guests():
'ip': j['ip']
}
#determine group from guest description
# determine group from guest description
if j['description'] is not None:
groups = j['description'].split(",")
else:
groups = default_group
#add guest to inventory
# add guest to inventory
for g in groups:
if g not in inventory:
inventory[g] = {'hosts': []}

View file

@ -230,7 +230,7 @@ class OVirtInventory(object):
"""
return [x.get_name() for x in inst.get_tags().list()]
def get_machine_type(self,inst):
def get_machine_type(self, inst):
inst_type = inst.get_instance_type()
if inst_type:
return self.driver.instancetypes.get(id=inst_type.id).name

View file

@ -63,10 +63,11 @@ except ImportError:
ini_section = 'packet'
class PacketInventory(object):
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
return {"_meta": {"hostvars": {}}}
def __init__(self):
''' Main execution path '''
@ -101,7 +102,6 @@ class PacketInventory(object):
print(data_to_print)
def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid '''
@ -224,14 +224,13 @@ class PacketInventory(object):
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on Packet')
parser.add_argument('--list', action='store_true', default=True,
help='List Devices (default: True)')
help='List Devices (default: True)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific device')
help='Get all the variables about a specific device')
parser.add_argument('--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args()
def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files '''
@ -244,7 +243,7 @@ class PacketInventory(object):
def connect(self):
''' create connection to api server'''
token=os.environ.get('PACKET_API_TOKEN')
token = os.environ.get('PACKET_API_TOKEN')
if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token)
@ -270,7 +269,7 @@ class PacketInventory(object):
try:
manager = self.connect()
devices = manager.list_devices(project_id=project.id, params = params)
devices = manager.list_devices(project_id=project.id, params=params)
for device in devices:
self.add_device(device, project)
@ -307,7 +306,6 @@ class PacketInventory(object):
if ip_address['public'] is True and ip_address['address_family'] == 4:
dest = ip_address['address']
if not dest:
# Skip devices we cannot address (e.g. private VPC subnet)
return
@ -373,7 +371,6 @@ class PacketInventory(object):
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device):
device_vars = {}
for key in vars(device):
@ -403,9 +400,9 @@ class PacketInventory(object):
device_vars[key] = k
else:
pass
#print key
#print type(value)
#print value
# print key
# print type(value)
# print value
return device_vars
@ -416,10 +413,10 @@ class PacketInventory(object):
# Need to load index from cache
self.load_index_from_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# try updating the cache
self.do_api_calls_update_cache()
if not self.args.host in self.index:
if self.args.host not in self.index:
# host might not exist anymore
return self.json_format_dict({}, True)
@ -455,7 +452,6 @@ class PacketInventory(object):
json_inventory = cache.read()
return json_inventory
def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index '''
@ -463,7 +459,6 @@ class PacketInventory(object):
json_index = cache.read()
self.index = json.loads(json_index)
def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file '''

View file

@ -38,10 +38,12 @@ from six import iteritems
from ansible.module_utils.urls import open_url
class ProxmoxNodeList(list):
def get_names(self):
return [node['node'] for node in self]
class ProxmoxVM(dict):
def get_variables(self):
variables = {}
@ -49,6 +51,7 @@ class ProxmoxVM(dict):
variables['proxmox_' + key] = value
return variables
class ProxmoxVMList(list):
def __init__(self, data=[]):
for item in data:
@ -68,14 +71,17 @@ class ProxmoxVMList(list):
return variables
class ProxmoxPoolList(list):
def get_names(self):
return [pool['poolid'] for pool in self]
class ProxmoxPool(dict):
def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object):
def __init__(self, options):
self.options = options
@ -139,6 +145,7 @@ class ProxmoxAPI(object):
def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options):
results = {
'all': {
@ -199,6 +206,7 @@ def main_list(options):
return results
def main_host(options):
proxmox_api = ProxmoxAPI(options)
proxmox_api.auth()
@ -211,6 +219,7 @@ def main_host(options):
return {}
def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list")
@ -235,5 +244,6 @@ def main():
print(json.dumps(data, indent=indent))
if __name__ == '__main__':
main()

View file

@ -229,18 +229,18 @@ def _list_into_cache(regions):
try:
# Ansible 2.3+
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', value_type='list')
'RAX_ACCESS_NETWORK', 'public', value_type='list')
except TypeError:
# Ansible 2.2.x and below
networks = get_config(p, 'rax', 'access_network',
'RAX_ACCESS_NETWORK', 'public', islist=True)
'RAX_ACCESS_NETWORK', 'public', islist=True)
try:
try:
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
'RAX_ACCESS_IP_VERSION', 4, value_type='list'))
except TypeError:
ip_versions = map(int, get_config(p, 'rax', 'access_ip_version',
'RAX_ACCESS_IP_VERSION', 4, islist=True))
'RAX_ACCESS_IP_VERSION', 4, islist=True))
except:
ip_versions = [4]
else:
@ -434,11 +434,11 @@ def setup():
try:
# Ansible 2.3+
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
value_type='list')
value_type='list')
except TypeError:
# Ansible 2.2.x and below
region_list = get_config(p, 'rax', 'regions', 'RAX_REGION', 'all',
islist=True)
islist=True)
for region in region_list:
region = region.strip().upper()

View file

@ -41,6 +41,7 @@ try:
except:
import simplejson as json
class SoftLayerInventory(object):
common_items = [
'id',
@ -52,7 +53,7 @@ class SoftLayerInventory(object):
'primaryIpAddress',
'datacenter',
'tagReferences.tag.name',
]
]
vs_items = [
'lastKnownPowerState.name',
@ -61,16 +62,16 @@ class SoftLayerInventory(object):
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
]
def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}}
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
@ -104,9 +105,9 @@ class SoftLayerInventory(object):
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
@ -174,7 +175,7 @@ class SoftLayerInventory(object):
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items,self.vs_items))
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
@ -183,7 +184,7 @@ class SoftLayerInventory(object):
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items,self.hw_items))
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:

View file

@ -56,12 +56,12 @@ try:
except:
import simplejson as json
base_dir = os.path.dirname(os.path.realpath(__file__))
base_dir = os.path.dirname(os.path.realpath(__file__))
default_ini_file = os.path.join(base_dir, "spacewalk.ini")
SW_REPORT = '/usr/bin/spacewalk-report'
CACHE_DIR = os.path.join(base_dir, ".spacewalk_reports")
CACHE_AGE = 300 # 5min
CACHE_AGE = 300 # 5min
INI_FILE = os.path.expanduser(os.path.expandvars(os.environ.get("SPACEWALK_INI_PATH", default_ini_file)))
@ -76,7 +76,8 @@ if not os.path.exists(CACHE_DIR):
os.chmod(CACHE_DIR, 0o2775)
# Helper functions
#------------------------------
# ------------------------------
def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified
@ -94,7 +95,7 @@ def spacewalk_report(name):
lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ]
keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]:
values = line.strip().split(',')
if len(keys) == len(values):
@ -102,7 +103,7 @@ def spacewalk_report(name):
# Options
#------------------------------
# ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true",
@ -120,20 +121,20 @@ parser.add_option('-p', default=False, dest="prefix_org_name", action="store_tru
# read spacewalk.ini if present
#------------------------------
# ------------------------------
if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser()
config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name')
if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id
#------------------------------
# ------------------------------
org_groups = {}
try:
for group in spacewalk_report('system-groups'):
@ -146,14 +147,14 @@ except (OSError) as e:
# List out the known server from Spacewalk
#------------------------------
# ------------------------------
if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {}
try:
for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() )
host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e:
print('Problem executing the command "%s inventory": %s' %
@ -161,11 +162,11 @@ if options.list:
sys.exit(2)
groups = {}
meta = { "hostvars" : {} }
meta = {"hostvars": {}}
try:
for system in spacewalk_report('system-groups-systems'):
# first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ]
org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name:
if options.prefix_org_name:
@ -181,16 +182,16 @@ if options.list:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems:
else:
if group_name not in groups:
groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ]
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' %
@ -201,15 +202,15 @@ if options.list:
for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else:
final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] )
final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta
print(json.dumps( final ))
#print(json.dumps(groups))
print(json.dumps(final))
# print(json.dumps(groups))
sys.exit(0)
# Return a details information concerning the spacewalk server
#------------------------------
# ------------------------------
elif options.host:
host_details = {}
@ -229,7 +230,7 @@ elif options.host:
for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else:
print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) )
print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0)
else:

View file

@ -58,9 +58,9 @@ SSH_CONF = '~/.ssh/config'
_key = 'ssh_config'
_ssh_to_ansible = [('user', 'ansible_ssh_user'),
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
('hostname', 'ansible_ssh_host'),
('identityfile', 'ansible_ssh_private_key_file'),
('port', 'ansible_ssh_port')]
def get_config():

View file

@ -64,9 +64,9 @@ CONFIG_FILES = ['/etc/stacki/stacki.yml', '/etc/ansible/stacki.yml']
def stack_auth(params):
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
endpoint = params['stacki_endpoint']
auth_creds = {'USERNAME': params['stacki_user'],
'PASSWORD': params['stacki_password']}
client = requests.session()
client.get(endpoint)
@ -99,17 +99,18 @@ def stack_build_header(auth_creds):
def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({ "cmd": "list host"}),
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header)
return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({ "cmd": "list host interface"}),
stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header)
return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()),
@ -159,7 +160,6 @@ def parse_args():
def main():
args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script')

View file

@ -16,20 +16,21 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
try:
import json
except ImportError:
import simplejson as json
class SetEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
return json.JSONEncoder.default(self, obj)
VBOX="VBoxManage"
VBOX = "VBoxManage"
def get_hosts(host=None):
@ -39,7 +40,7 @@ def get_hosts(host=None):
if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else:
returned = { 'all': set(), '_metadata': {} }
returned = {'all': set(), '_metadata': {}}
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except:
sys.exit(1)
@ -50,7 +51,7 @@ def get_hosts(host=None):
for line in p.stdout.readlines():
try:
k,v = line.split(':',1)
k, v = line.split(':', 1)
except:
continue
@ -62,11 +63,11 @@ def get_hosts(host=None):
if v not in hostvars:
curname = v
hostvars[curname] = {}
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE)
try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
ipinfo = x.stdout.read()
if 'Value' in ipinfo:
a,ip = ipinfo.split(':',1)
a, ip = ipinfo.split(':', 1)
hostvars[curname]['ansible_ssh_host'] = ip.strip()
except:
pass
@ -83,11 +84,11 @@ def get_hosts(host=None):
returned['all'].add(curname)
continue
pref_k = 'vbox_' + k.strip().replace(' ','_')
pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '):
if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k]= v
hostvars[curname][prevkey][pref_k] = v
else:
if v != '':
hostvars[curname][pref_k] = v

View file

@ -47,9 +47,11 @@ try:
from logging import NullHandler
except ImportError:
from logging import Handler
class NullHandler(Handler):
def emit(self, record):
pass
logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler())
@ -66,7 +68,7 @@ class VMwareInventory(object):
if os.environ.get('VMWARE_INI', ''):
config_files = [os.environ['VMWARE_INI']]
else:
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
config_files = [os.path.abspath(sys.argv[0]).rstrip('.py') + '.ini', 'vmware.ini']
for config_file in config_files:
if os.path.exists(config_file):
self.config.read(config_file)
@ -362,7 +364,7 @@ class VMwareInventory(object):
# Loop through all VMs on physical host.
for vm in host.vm:
if prefix_filter:
if vm.name.startswith( prefix_filter ):
if vm.name.startswith(prefix_filter):
continue
self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name)

View file

@ -54,6 +54,7 @@ except ImportError as e:
# Imports for ansible
import ConfigParser
class AzureInventory(object):
def __init__(self):
"""Main execution path."""
@ -171,10 +172,9 @@ class AzureInventory(object):
parser.add_argument('--list-images', action='store',
help='Get all available images.')
parser.add_argument('--refresh-cache',
action='store_true', default=False,
help='Force refresh of thecache by making API requests to Azure '
'(default: False - use cache files)',
)
action='store_true', default=False,
help='Force refresh of thecache by making API requests to Azure '
'(default: False - use cache files)')
parser.add_argument('--host', action='store',
help='Get all information about an instance.')
self.args = parser.parse_args()
@ -198,7 +198,7 @@ class AzureInventory(object):
associated with a cloud service.
"""
try:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name, embed_detail=True).deployments.deployments:
self.add_deployment(cloud_service, deployment)
except Exception as e:
sys.exit("Error: Failed to access deployments - {0}".format(str(e)))

View file

@ -49,6 +49,7 @@ try:
except:
import simplejson as json
class ZabbixInventory(object):
def read_settings(self):
@ -96,7 +97,7 @@ class ZabbixInventory(object):
for group in host['groups']:
groupname = group['name']
if not groupname in data:
if groupname not in data:
data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname)

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE
from subprocess import Popen, PIPE
import sys
import json

View file

@ -1,42 +1,6 @@
contrib/inventory/abiquo.py
contrib/inventory/apache-libcloud.py
contrib/inventory/apstra_aos.py
contrib/inventory/azure_rm.py
contrib/inventory/cobbler.py
contrib/inventory/collins.py
contrib/inventory/consul_io.py
contrib/inventory/digital_ocean.py
contrib/inventory/docker.py
contrib/inventory/ec2.py
contrib/inventory/fleet.py
contrib/inventory/foreman.py
contrib/inventory/freeipa.py
contrib/inventory/gce.py
contrib/inventory/jail.py
contrib/inventory/libvirt_lxc.py
contrib/inventory/linode.py
contrib/inventory/lxc_inventory.py
contrib/inventory/mdt_dynamic_inventory.py
contrib/inventory/nagios_livestatus.py
contrib/inventory/nagios_ndo.py
contrib/inventory/nova.py
contrib/inventory/nsot.py
contrib/inventory/openvz.py
contrib/inventory/ovirt.py
contrib/inventory/packet_net.py
contrib/inventory/proxmox.py
contrib/inventory/rackhd.py
contrib/inventory/rax.py
contrib/inventory/softlayer.py
contrib/inventory/spacewalk.py
contrib/inventory/ssh_config.py
contrib/inventory/stacki.py
contrib/inventory/vbox.py
contrib/inventory/vmware.py
contrib/inventory/vmware_inventory.py
contrib/inventory/windows_azure.py
contrib/inventory/zabbix.py
contrib/inventory/zone.py
docs/api/conf.py
docs/bin/dump_keywords.py
docs/bin/plugin_formatter.py