pep8 fixes for contrib (#24344)

This commit is contained in:
Matt Martz 2017-05-09 16:38:08 -05:00 committed by GitHub
parent c7ae6b9fd5
commit d3249e7875
37 changed files with 326 additions and 380 deletions

View file

@ -53,25 +53,27 @@ except ImportError:
from ansible.module_utils.urls import open_url from ansible.module_utils.urls import open_url
def api_get(link, config): def api_get(link, config):
try: try:
if link is None: if link is None:
url = config.get('api','uri') + config.get('api','login_path') url = config.get('api', 'uri') + config.get('api', 'login_path')
headers = {"Accept": config.get('api','login_type')} headers = {"Accept": config.get('api', 'login_type')}
else: else:
url = link['href'] + '?limit=0' url = link['href'] + '?limit=0'
headers = {"Accept": link['type']} headers = {"Accept": link['type']}
result = open_url(url, headers=headers, url_username=config.get('auth','apiuser').replace('\n', ''), result = open_url(url, headers=headers, url_username=config.get('auth', 'apiuser').replace('\n', ''),
url_password=config.get('auth','apipass').replace('\n', '')) url_password=config.get('auth', 'apipass').replace('\n', ''))
return json.loads(result.read()) return json.loads(result.read())
except: except:
return None return None
def save_cache(data, config): def save_cache(data, config):
''' saves item to cache ''' ''' saves item to cache '''
dpath = config.get('cache','cache_dir') dpath = config.get('cache', 'cache_dir')
try: try:
cache = open('/'.join([dpath,'inventory']), 'w') cache = open('/'.join([dpath, 'inventory']), 'w')
cache.write(json.dumps(data)) cache.write(json.dumps(data))
cache.close() cache.close()
except IOError as e: except IOError as e:
@ -80,10 +82,10 @@ def save_cache(data, config):
def get_cache(cache_item, config): def get_cache(cache_item, config):
''' returns cached item ''' ''' returns cached item '''
dpath = config.get('cache','cache_dir') dpath = config.get('cache', 'cache_dir')
inv = {} inv = {}
try: try:
cache = open('/'.join([dpath,'inventory']), 'r') cache = open('/'.join([dpath, 'inventory']), 'r')
inv = cache.read() inv = cache.read()
cache.close() cache.close()
except IOError as e: except IOError as e:
@ -91,26 +93,28 @@ def get_cache(cache_item, config):
return inv return inv
def cache_available(config): def cache_available(config):
''' checks if we have a 'fresh' cache available for item requested ''' ''' checks if we have a 'fresh' cache available for item requested '''
if config.has_option('cache','cache_dir'): if config.has_option('cache', 'cache_dir'):
dpath = config.get('cache','cache_dir') dpath = config.get('cache', 'cache_dir')
try: try:
existing = os.stat( '/'.join([dpath,'inventory'])) existing = os.stat('/'.join([dpath, 'inventory']))
except: except:
# cache doesn't exist or isn't accessible # cache doesn't exist or isn't accessible
return False return False
if config.has_option('cache', 'cache_max_age'): if config.has_option('cache', 'cache_max_age'):
maxage = config.get('cache', 'cache_max_age') maxage = config.get('cache', 'cache_max_age')
if ((int(time.time()) - int(existing.st_mtime)) <= int(maxage)): if (int(time.time()) - int(existing.st_mtime)) <= int(maxage):
return True return True
return False return False
def generate_inv_from_api(enterprise_entity,config):
def generate_inv_from_api(enterprise_entity, config):
try: try:
inventory['all'] = {} inventory['all'] = {}
inventory['all']['children'] = [] inventory['all']['children'] = []
@ -118,22 +122,22 @@ def generate_inv_from_api(enterprise_entity,config):
inventory['_meta'] = {} inventory['_meta'] = {}
inventory['_meta']['hostvars'] = {} inventory['_meta']['hostvars'] = {}
enterprise = api_get(enterprise_entity,config) enterprise = api_get(enterprise_entity, config)
vms_entity = next(link for link in (enterprise['links']) if (link['rel']=='virtualmachines')) vms_entity = next(link for link in enterprise['links'] if link['rel'] == 'virtualmachines')
vms = api_get(vms_entity,config) vms = api_get(vms_entity, config)
for vmcollection in vms['collection']: for vmcollection in vms['collection']:
for link in vmcollection['links']: for link in vmcollection['links']:
if link['rel'] == 'virtualappliance': if link['rel'] == 'virtualappliance':
vm_vapp = link['title'].replace('[','').replace(']','').replace(' ','_') vm_vapp = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualdatacenter': elif link['rel'] == 'virtualdatacenter':
vm_vdc = link['title'].replace('[','').replace(']','').replace(' ','_') vm_vdc = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
elif link['rel'] == 'virtualmachinetemplate': elif link['rel'] == 'virtualmachinetemplate':
vm_template = link['title'].replace('[','').replace(']','').replace(' ','_') vm_template = link['title'].replace('[', '').replace(']', '').replace(' ', '_')
# From abiquo.ini: Only adding to inventory VMs with public IP # From abiquo.ini: Only adding to inventory VMs with public IP
if config.getboolean('defaults', 'public_ip_only') is True: if config.getboolean('defaults', 'public_ip_only') is True:
for link in vmcollection['links']: for link in vmcollection['links']:
if (link['type']=='application/vnd.abiquo.publicip+json' and link['rel']=='ip'): if link['type'] == 'application/vnd.abiquo.publicip+json' and link['rel'] == 'ip':
vm_nic = link['title'] vm_nic = link['title']
break break
else: else:
@ -166,10 +170,10 @@ def generate_inv_from_api(enterprise_entity,config):
inventory[vm_template]['children'] = [] inventory[vm_template]['children'] = []
inventory[vm_template]['hosts'] = [] inventory[vm_template]['hosts'] = []
if config.getboolean('defaults', 'get_metadata') is True: if config.getboolean('defaults', 'get_metadata') is True:
meta_entity = next(link for link in (vmcollection['links']) if (link['rel']=='metadata')) meta_entity = next(link for link in vmcollection['links'] if link['rel'] == 'metadata')
try: try:
metadata = api_get(meta_entity,config) metadata = api_get(meta_entity, config)
if (config.getfloat("api","version") >= 3.0): if (config.getfloat("api", "version") >= 3.0):
vm_metadata = metadata['metadata'] vm_metadata = metadata['metadata']
else: else:
vm_metadata = metadata['metadata']['metadata'] vm_metadata = metadata['metadata']['metadata']
@ -187,7 +191,8 @@ def generate_inv_from_api(enterprise_entity,config):
return inventory return inventory
except Exception as e: except Exception as e:
# Return empty hosts output # Return empty hosts output
return { 'all': {'hosts': []}, '_meta': { 'hostvars': {} } } return {'all': {'hosts': []}, '_meta': {'hostvars': {}}}
def get_inventory(enterprise, config): def get_inventory(enterprise, config):
''' Reads the inventory from cache or Abiquo api ''' ''' Reads the inventory from cache or Abiquo api '''
@ -197,11 +202,12 @@ def get_inventory(enterprise, config):
else: else:
default_group = os.path.basename(sys.argv[0]).rstrip('.py') default_group = os.path.basename(sys.argv[0]).rstrip('.py')
# MAKE ABIQUO API CALLS # # MAKE ABIQUO API CALLS #
inv = generate_inv_from_api(enterprise,config) inv = generate_inv_from_api(enterprise, config)
save_cache(inv, config) save_cache(inv, config)
return json.dumps(inv) return json.dumps(inv)
if __name__ == '__main__': if __name__ == '__main__':
inventory = {} inventory = {}
enterprise = {} enterprise = {}
@ -214,8 +220,8 @@ if __name__ == '__main__':
break break
try: try:
login = api_get(None,config) login = api_get(None, config)
enterprise = next(link for link in (login['links']) if (link['rel']=='enterprise')) enterprise = next(link for link in login['links'] if link['rel'] == 'enterprise')
except Exception as e: except Exception as e:
enterprise = None enterprise = None

View file

@ -82,7 +82,6 @@ class LibcloudInventory(object):
print(data_to_print) print(data_to_print)
def is_cache_valid(self): def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid ''' ''' Determines if the cache files have expired, or if it is still valid '''
@ -95,7 +94,6 @@ class LibcloudInventory(object):
return False return False
def read_settings(self): def read_settings(self):
''' Reads the settings from the libcloud.ini file ''' ''' Reads the settings from the libcloud.ini file '''
@ -108,17 +106,17 @@ class LibcloudInventory(object):
raise ValueError('libcloud.ini file must contain a [driver] section') raise ValueError('libcloud.ini file must contain a [driver] section')
if config.has_option('driver', 'provider'): if config.has_option('driver', 'provider'):
self.provider = config.get('driver','provider') self.provider = config.get('driver', 'provider')
else: else:
raise ValueError('libcloud.ini does not have a provider defined') raise ValueError('libcloud.ini does not have a provider defined')
if config.has_option('driver', 'key'): if config.has_option('driver', 'key'):
self.key = config.get('driver','key') self.key = config.get('driver', 'key')
else: else:
raise ValueError('libcloud.ini does not have a key defined') raise ValueError('libcloud.ini does not have a key defined')
if config.has_option('driver', 'secret'): if config.has_option('driver', 'secret'):
self.secret = config.get('driver','secret') self.secret = config.get('driver', 'secret')
else: else:
raise ValueError('libcloud.ini does not have a secret defined') raise ValueError('libcloud.ini does not have a secret defined')
@ -146,7 +144,6 @@ class LibcloudInventory(object):
self.cache_path_index = cache_path + "/ansible-libcloud.index" self.cache_path_index = cache_path + "/ansible-libcloud.index"
self.cache_max_age = config.getint('cache', 'cache_max_age') self.cache_max_age = config.getint('cache', 'cache_max_age')
def parse_cli_args(self): def parse_cli_args(self):
''' '''
Command line argument processing Command line argument processing
@ -161,7 +158,6 @@ class LibcloudInventory(object):
help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)') help='Force refresh of cache by making API requests to libcloud supported providers (default: False - use cache files)')
self.args = parser.parse_args() self.args = parser.parse_args()
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' '''
Do API calls to a location, and save data in cache files Do API calls to a location, and save data in cache files
@ -172,7 +168,6 @@ class LibcloudInventory(object):
self.write_to_cache(self.inventory, self.cache_path_cache) self.write_to_cache(self.inventory, self.cache_path_cache)
self.write_to_cache(self.index, self.cache_path_index) self.write_to_cache(self.index, self.cache_path_index)
def get_nodes(self): def get_nodes(self):
''' '''
Gets the list of all nodes Gets the list of all nodes
@ -181,7 +176,6 @@ class LibcloudInventory(object):
for node in self.conn.list_nodes(): for node in self.conn.list_nodes():
self.add_node(node) self.add_node(node)
def get_node(self, node_id): def get_node(self, node_id):
''' '''
Gets details about a specific node Gets details about a specific node
@ -189,7 +183,6 @@ class LibcloudInventory(object):
return [node for node in self.conn.list_nodes() if node.id == node_id][0] return [node for node in self.conn.list_nodes() if node.id == node_id][0]
def add_node(self, node): def add_node(self, node):
''' '''
Adds a node to the inventory and index, as long as it is Adds a node to the inventory and index, as long as it is
@ -244,10 +237,10 @@ class LibcloudInventory(object):
# Need to load index from cache # Need to load index from cache
self.load_index_from_cache() self.load_index_from_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# try updating the cache # try updating the cache
self.do_api_calls_update_cache() self.do_api_calls_update_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# host migh not exist anymore # host migh not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)
@ -283,13 +276,12 @@ class LibcloudInventory(object):
else: else:
pass pass
# TODO Product codes if someone finds them useful # TODO Product codes if someone finds them useful
#print(key) # print(key)
#print(type(value)) # print(type(value))
#print(value) # print(value)
return self.json_format_dict(instance_vars, True) return self.json_format_dict(instance_vars, True)
def push(self, my_dict, key, element): def push(self, my_dict, key, element):
''' '''
Pushed an element onto an array that may not have been defined in Pushed an element onto an array that may not have been defined in
@ -301,7 +293,6 @@ class LibcloudInventory(object):
else: else:
my_dict[key] = [element] my_dict[key] = [element]
def get_inventory_from_cache(self): def get_inventory_from_cache(self):
''' '''
Reads the inventory from the cache file and returns it as a JSON Reads the inventory from the cache file and returns it as a JSON
@ -312,7 +303,6 @@ class LibcloudInventory(object):
json_inventory = cache.read() json_inventory = cache.read()
return json_inventory return json_inventory
def load_index_from_cache(self): def load_index_from_cache(self):
''' '''
Reads the index from the cache file sets self.index Reads the index from the cache file sets self.index
@ -322,7 +312,6 @@ class LibcloudInventory(object):
json_index = cache.read() json_index = cache.read()
self.index = json.loads(json_index) self.index = json.loads(json_index)
def write_to_cache(self, data, filename): def write_to_cache(self, data, filename):
''' '''
Writes data in JSON format to a file Writes data in JSON format to a file
@ -333,7 +322,6 @@ class LibcloudInventory(object):
cache.write(json_data) cache.write(json_data)
cache.close() cache.close()
def to_safe(self, word): def to_safe(self, word):
''' '''
Converts 'bad' characters in a string to underscores so they can be Converts 'bad' characters in a string to underscores so they can be
@ -342,7 +330,6 @@ class LibcloudInventory(object):
return re.sub("[^A-Za-z0-9\-]", "_", word) return re.sub("[^A-Za-z0-9\-]", "_", word)
def json_format_dict(self, data, pretty=False): def json_format_dict(self, data, pretty=False):
''' '''
Converts a dict to a JSON object and dumps it as a formatted Converts a dict to a JSON object and dumps it as a formatted
@ -354,6 +341,7 @@ class LibcloudInventory(object):
else: else:
return json.dumps(data) return json.dumps(data)
def main(): def main():
LibcloudInventory() LibcloudInventory()

View file

@ -278,10 +278,12 @@ Expected output format in Device mode
} }
""" """
def fail(msg): def fail(msg):
sys.stderr.write("%s\n" % msg) sys.stderr.write("%s\n" % msg)
sys.exit(1) sys.exit(1)
class AosInventory(object): class AosInventory(object):
def __init__(self): def __init__(self):
@ -303,7 +305,7 @@ class AosInventory(object):
# ---------------------------------------------------- # ----------------------------------------------------
# Open session to AOS # Open session to AOS
# ---------------------------------------------------- # ----------------------------------------------------
aos = Session( server=self.aos_server, aos = Session(server=self.aos_server,
port=self.aos_server_port, port=self.aos_server_port,
user=self.aos_username, user=self.aos_username,
passwd=self.aos_password) passwd=self.aos_password)
@ -314,10 +316,10 @@ class AosInventory(object):
self.add_var_to_group('all', 'aos_session', aos.session) self.add_var_to_group('all', 'aos_session', aos.session)
# Add the AOS server itself in the inventory # Add the AOS server itself in the inventory
self.add_host_to_group("all", 'aos' ) self.add_host_to_group("all", 'aos')
self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server ) self.add_var_to_host("aos", "ansible_ssh_host", self.aos_server)
self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password ) self.add_var_to_host("aos", "ansible_ssh_pass", self.aos_password)
self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username ) self.add_var_to_host("aos", "ansible_ssh_user", self.aos_username)
# ---------------------------------------------------- # ----------------------------------------------------
# Build the inventory # Build the inventory
@ -336,7 +338,7 @@ class AosInventory(object):
for dev_name, dev_id in bp.params['devices'].value.items(): for dev_name, dev_id in bp.params['devices'].value.items():
self.add_host_to_group('all', dev_name) self.add_host_to_group('all', dev_name)
device = aos.Devices.find( uid=dev_id) device = aos.Devices.find(uid=dev_id)
if 'facts' in device.value.keys(): if 'facts' in device.value.keys():
self.add_device_facts_to_var(dev_name, device) self.add_device_facts_to_var(dev_name, device)
@ -344,7 +346,7 @@ class AosInventory(object):
# Define admin State and Status # Define admin State and Status
if 'user_config' in device.value.keys(): if 'user_config' in device.value.keys():
if 'admin_state' in device.value['user_config'].keys(): if 'admin_state' in device.value['user_config'].keys():
self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'] ) self.add_var_to_host(dev_name, 'admin_state', device.value['user_config']['admin_state'])
self.add_device_status_to_var(dev_name, device) self.add_device_status_to_var(dev_name, device)
@ -496,7 +498,6 @@ class AosInventory(object):
except: except:
pass pass
def parse_cli_args(self): def parse_cli_args(self):
""" Command line argument processing """ """ Command line argument processing """
@ -554,7 +555,7 @@ class AosInventory(object):
'ansible_ssh_host', 'ansible_ssh_host',
device.value['facts']['mgmt_ipaddr']) device.value['facts']['mgmt_ipaddr'])
self.add_var_to_host(device_name,'id', device.id) self.add_var_to_host(device_name, 'id', device.id)
# self.add_host_to_group('all', device.name) # self.add_host_to_group('all', device.name)
for key, value in device.value['facts'].items(): for key, value in device.value['facts'].items():

View file

@ -281,7 +281,7 @@ class AzureRM(object):
def log(self, msg): def log(self, msg):
if self.debug: if self.debug:
print (msg + u'\n') print(msg + u'\n')
def fail(self, msg): def fail(self, msg):
raise Exception(msg) raise Exception(msg)
@ -440,7 +440,7 @@ class AzureInventory(object):
self.include_powerstate = False self.include_powerstate = False
self.get_inventory() self.get_inventory()
print (self._json_format_dict(pretty=self._args.pretty)) print(self._json_format_dict(pretty=self._args.pretty))
sys.exit(0) sys.exit(0)
def _parse_cli_args(self): def _parse_cli_args(self):
@ -510,7 +510,7 @@ class AzureInventory(object):
for machine in machines: for machine in machines:
id_dict = azure_id_to_dict(machine.id) id_dict = azure_id_to_dict(machine.id)
#TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets
# fixed, we should remove the .lower(). Opened Issue # fixed, we should remove the .lower(). Opened Issue
# #574: https://github.com/Azure/azure-sdk-for-python/issues/574 # #574: https://github.com/Azure/azure-sdk-for-python/issues/574
resource_group = id_dict['resourceGroups'].lower() resource_group = id_dict['resourceGroups'].lower()

View file

@ -110,9 +110,9 @@ class CobblerInventory(object):
if self.args.host: if self.args.host:
data_to_print += self.get_host_info() data_to_print += self.get_host_info()
else: else:
self.inventory['_meta'] = { 'hostvars': {} } self.inventory['_meta'] = {'hostvars': {}}
for hostname in self.cache: for hostname in self.cache:
self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname] } self.inventory['_meta']['hostvars'][hostname] = {'cobbler': self.cache[hostname]}
data_to_print += self.json_format_dict(self.inventory, True) data_to_print += self.json_format_dict(self.inventory, True)
print(data_to_print) print(data_to_print)
@ -179,7 +179,7 @@ class CobblerInventory(object):
for host in data: for host in data:
# Get the FQDN for the host and add it to the right groups # Get the FQDN for the host and add it to the right groups
dns_name = host['hostname'] #None dns_name = host['hostname'] # None
ksmeta = None ksmeta = None
interfaces = host['interfaces'] interfaces = host['interfaces']
# hostname is often empty for non-static IP hosts # hostname is often empty for non-static IP hosts
@ -229,11 +229,11 @@ class CobblerInventory(object):
# Need to load index from cache # Need to load index from cache
self.load_cache_from_cache() self.load_cache_from_cache()
if not self.args.host in self.cache: if self.args.host not in self.cache:
# try updating the cache # try updating the cache
self.update_cache() self.update_cache()
if not self.args.host in self.cache: if self.args.host not in self.cache:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)

View file

@ -85,6 +85,7 @@ from six import iteritems
from ansible.module_utils.urls import open_url from ansible.module_utils.urls import open_url
class CollinsDefaults(object): class CollinsDefaults(object):
ASSETS_API_ENDPOINT = '%s/api/assets' ASSETS_API_ENDPOINT = '%s/api/assets'
SPECIAL_ATTRIBUTES = set([ SPECIAL_ATTRIBUTES = set([
@ -168,14 +169,13 @@ class CollinsInventory(object):
print(data_to_print) print(data_to_print)
return successful return successful
def find_assets(self, attributes = {}, operation = 'AND'): def find_assets(self, attributes={}, operation='AND'):
""" Obtains Collins assets matching the provided attributes. """ """ Obtains Collins assets matching the provided attributes. """
# Formats asset search query to locate assets matching attributes, using # Formats asset search query to locate assets matching attributes, using
# the CQL search feature as described here: # the CQL search feature as described here:
# http://tumblr.github.io/collins/recipes.html # http://tumblr.github.io/collins/recipes.html
attributes_query = [ '='.join(attr_pair) attributes_query = ['='.join(attr_pair) for attr_pair in iteritems(attributes)]
for attr_pair in iteritems(attributes) ]
query_parameters = { query_parameters = {
'details': ['True'], 'details': ['True'],
'operation': [operation], 'operation': [operation],
@ -190,8 +190,7 @@ class CollinsInventory(object):
# Locates all assets matching the provided query, exhausting pagination. # Locates all assets matching the provided query, exhausting pagination.
while True: while True:
if num_retries == self.collins_max_retries: if num_retries == self.collins_max_retries:
raise MaxRetriesError("Maximum of %s retries reached; giving up" % \ raise MaxRetriesError("Maximum of %s retries reached; giving up" % self.collins_max_retries)
self.collins_max_retries)
query_parameters['page'] = cur_page query_parameters['page'] = cur_page
query_url = "%s?%s" % ( query_url = "%s?%s" % (
(CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host), (CollinsDefaults.ASSETS_API_ENDPOINT % self.collins_host),
@ -212,8 +211,7 @@ class CollinsInventory(object):
cur_page += 1 cur_page += 1
num_retries = 0 num_retries = 0
except: except:
self.log.error("Error while communicating with Collins, retrying:\n%s", self.log.error("Error while communicating with Collins, retrying:\n%s" % traceback.format_exc())
traceback.format_exc())
num_retries += 1 num_retries += 1
return assets return assets
@ -232,19 +230,15 @@ class CollinsInventory(object):
def read_settings(self): def read_settings(self):
""" Reads the settings from the collins.ini file """ """ Reads the settings from the collins.ini file """
config_loc = os.getenv('COLLINS_CONFIG', config_loc = os.getenv('COLLINS_CONFIG', os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
config = ConfigParser.SafeConfigParser() config = ConfigParser.SafeConfigParser()
config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini') config.read(os.path.dirname(os.path.realpath(__file__)) + '/collins.ini')
self.collins_host = config.get('collins', 'host') self.collins_host = config.get('collins', 'host')
self.collins_username = os.getenv('COLLINS_USERNAME', self.collins_username = os.getenv('COLLINS_USERNAME', config.get('collins', 'username'))
config.get('collins', 'username')) self.collins_password = os.getenv('COLLINS_PASSWORD', config.get('collins', 'password'))
self.collins_password = os.getenv('COLLINS_PASSWORD', self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE', config.get('collins', 'asset_type'))
config.get('collins', 'password'))
self.collins_asset_type = os.getenv('COLLINS_ASSET_TYPE',
config.get('collins', 'asset_type'))
self.collins_timeout_secs = config.getint('collins', 'timeout_secs') self.collins_timeout_secs = config.getint('collins', 'timeout_secs')
self.collins_max_retries = config.getint('collins', 'max_retries') self.collins_max_retries = config.getint('collins', 'max_retries')
@ -268,16 +262,12 @@ class CollinsInventory(object):
parser = argparse.ArgumentParser( parser = argparse.ArgumentParser(
description='Produces an Ansible Inventory file based on Collins') description='Produces an Ansible Inventory file based on Collins')
parser.add_argument('--list', parser.add_argument('--list', action='store_true', default=True, help='List instances (default: True)')
action='store_true', default=True, help='List instances (default: True)') parser.add_argument('--host', action='store', help='Get all the variables about a specific instance')
parser.add_argument('--host', parser.add_argument('--refresh-cache', action='store_true', default=False,
action='store', help='Get all the variables about a specific instance') help='Force refresh of cache by making API requests to Collins '
parser.add_argument('--refresh-cache',
action='store_true', default=False,
help='Force refresh of cache by making API requests to Collins ' \
'(default: False - use cache files)') '(default: False - use cache files)')
parser.add_argument('--pretty', parser.add_argument('--pretty', action='store_true', default=False, help='Pretty print all JSON output')
action='store_true', default=False, help='Pretty print all JSON output')
self.args = parser.parse_args() self.args = parser.parse_args()
def update_cache(self): def update_cache(self):
@ -290,8 +280,7 @@ class CollinsInventory(object):
try: try:
server_assets = self.find_assets() server_assets = self.find_assets()
except: except:
self.log.error("Error while locating assets from Collins:\n%s", self.log.error("Error while locating assets from Collins:\n%s" % traceback.format_exc())
traceback.format_exc())
return False return False
for asset in server_assets: for asset in server_assets:
@ -315,8 +304,7 @@ class CollinsInventory(object):
if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'): if self.prefer_hostnames and self._asset_has_attribute(asset, 'HOSTNAME'):
asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME') asset_identifier = self._asset_get_attribute(asset, 'HOSTNAME')
elif 'ADDRESSES' not in asset: elif 'ADDRESSES' not in asset:
self.log.warning("No IP addresses found for asset '%s', skipping", self.log.warning("No IP addresses found for asset '%s', skipping" % asset)
asset)
continue continue
elif len(asset['ADDRESSES']) < ip_index + 1: elif len(asset['ADDRESSES']) < ip_index + 1:
self.log.warning( self.log.warning(
@ -384,11 +372,11 @@ class CollinsInventory(object):
# Need to load index from cache # Need to load index from cache
self.load_cache_from_cache() self.load_cache_from_cache()
if not self.args.host in self.cache: if self.args.host not in self.cache:
# try updating the cache # try updating the cache
self.update_cache() self.update_cache()
if not self.args.host in self.cache: if self.args.host not in self.cache:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, self.args.pretty) return self.json_format_dict({}, self.args.pretty)

View file

@ -239,7 +239,6 @@ class ConsulInventory(object):
self.current_dc = datacenter self.current_dc = datacenter
self.load_data_for_datacenter(datacenter) self.load_data_for_datacenter(datacenter)
def load_availability_groups(self, node, datacenter): def load_availability_groups(self, node, datacenter):
'''check the health of each service on a node and add add the node to either '''check the health of each service on a node and add add the node to either
an 'available' or 'unavailable' grouping. The suffix for each group can be an 'available' or 'unavailable' grouping. The suffix for each group can be
@ -259,7 +258,6 @@ class ConsulInventory(object):
self.add_node_to_map(self.nodes_by_availability, self.add_node_to_map(self.nodes_by_availability,
service_name + suffix, node['Node']) service_name + suffix, node['Node'])
def load_data_for_datacenter(self, datacenter): def load_data_for_datacenter(self, datacenter):
'''processes all the nodes in a particular datacenter''' '''processes all the nodes in a particular datacenter'''
index, nodes = self.consul_api.catalog.nodes(dc=datacenter) index, nodes = self.consul_api.catalog.nodes(dc=datacenter)
@ -295,7 +293,7 @@ class ConsulInventory(object):
if metadata and metadata['Value']: if metadata and metadata['Value']:
try: try:
metadata = json.loads(metadata['Value']) metadata = json.loads(metadata['Value'])
for k,v in metadata.items(): for k, v in metadata.items():
self.add_metadata(node_data, k, v) self.add_metadata(node_data, k, v)
except: except:
pass pass
@ -337,19 +335,19 @@ class ConsulInventory(object):
tags = service['Tags'] tags = service['Tags']
self.add_metadata(node_data, "consul_%s_tags" % service_name, tags) self.add_metadata(node_data, "consul_%s_tags" % service_name, tags)
for tag in service['Tags']: for tag in service['Tags']:
tagname = service_name +'_'+tag tagname = service_name + '_' + tag
self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node']) self.add_node_to_map(self.nodes_by_tag, tagname, node_data['Node'])
def combine_all_results(self): def combine_all_results(self):
'''prunes and sorts all groupings for combination into the final map''' '''prunes and sorts all groupings for combination into the final map'''
self.inventory = {"_meta": { "hostvars" : self.node_metadata}} self.inventory = {"_meta": {"hostvars": self.node_metadata}}
groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service, groupings = [self.nodes, self.nodes_by_datacenter, self.nodes_by_service,
self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability] self.nodes_by_tag, self.nodes_by_kv, self.nodes_by_availability]
for grouping in groupings: for grouping in groupings:
for name, addresses in grouping.items(): for name, addresses in grouping.items():
self.inventory[name] = sorted(list(set(addresses))) self.inventory[name] = sorted(list(set(addresses)))
def add_metadata(self, node_data, key, value, is_list = False): def add_metadata(self, node_data, key, value, is_list=False):
''' Pushed an element onto a metadata dict for the node, creating ''' Pushed an element onto a metadata dict for the node, creating
the dict if it doesn't exist ''' the dict if it doesn't exist '''
key = self.to_safe(key) key = self.to_safe(key)
@ -371,16 +369,15 @@ class ConsulInventory(object):
if domain: if domain:
node_name = node_data['Node'] node_name = node_data['Node']
if self.current_dc: if self.current_dc:
return '%s.node.%s.%s' % ( node_name, self.current_dc, domain) return '%s.node.%s.%s' % (node_name, self.current_dc, domain)
else: else:
return '%s.node.%s' % ( node_name, domain) return '%s.node.%s' % (node_name, domain)
else: else:
return node_data['Address'] return node_data['Address']
def add_node_to_map(self, map, name, node): def add_node_to_map(self, map, name, node):
self.push(map, name, self.get_inventory_name(node)) self.push(map, name, self.get_inventory_name(node))
def push(self, my_dict, key, element): def push(self, my_dict, key, element):
''' Pushed an element onto an array that may not have been defined in the ''' Pushed an element onto an array that may not have been defined in the
dict ''' dict '''
@ -439,14 +436,13 @@ class ConsulConfig(dict):
def read_cli_args(self): def read_cli_args(self):
''' Command line argument processing ''' ''' Command line argument processing '''
parser = argparse.ArgumentParser(description= parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based nodes in a Consul cluster')
'Produce an Ansible Inventory file based nodes in a Consul cluster')
parser.add_argument('--list', action='store_true', parser.add_argument('--list', action='store_true',
help='Get all inventory variables from all nodes in the consul cluster') help='Get all inventory variables from all nodes in the consul cluster')
parser.add_argument('--host', action='store', parser.add_argument('--host', action='store',
help='Get all inventory variables about a specific consul node, \ help='Get all inventory variables about a specific consul node,'
requires datacenter set in consul.ini.') 'requires datacenter set in consul.ini.')
parser.add_argument('--datacenter', action='store', parser.add_argument('--datacenter', action='store',
help='Get all inventory about a specific consul datacenter') help='Get all inventory about a specific consul datacenter')
@ -462,7 +458,6 @@ class ConsulConfig(dict):
return self.has_config(suffix) return self.has_config(suffix)
return default return default
def get_consul_api(self): def get_consul_api(self):
'''get an instance of the api based on the supplied configuration''' '''get an instance of the api based on the supplied configuration'''
host = 'localhost' host = 'localhost'

View file

@ -152,7 +152,6 @@ except ImportError as e:
sys.exit("failed=True msg='`dopy` library required for this script'") sys.exit("failed=True msg='`dopy` library required for this script'")
class DigitalOceanInventory(object): class DigitalOceanInventory(object):
########################################################################### ###########################################################################
@ -240,7 +239,6 @@ or environment variables (DO_API_TOKEN)\n''')
print(json.dumps(json_data)) print(json.dumps(json_data))
# That's all she wrote... # That's all she wrote...
########################################################################### ###########################################################################
# Script configuration # Script configuration
########################################################################### ###########################################################################
@ -276,7 +274,6 @@ or environment variables (DO_API_TOKEN)\n''')
if os.getenv("DO_API_KEY"): if os.getenv("DO_API_KEY"):
self.api_token = os.getenv("DO_API_KEY") self.api_token = os.getenv("DO_API_KEY")
def read_cli_args(self): def read_cli_args(self):
''' Command line argument processing ''' ''' Command line argument processing '''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials')
@ -285,23 +282,23 @@ or environment variables (DO_API_TOKEN)\n''')
parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet')
parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON')
parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON') parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON')
parser.add_argument('--regions', action='store_true', help='List Regions as JSON') parser.add_argument('--regions', action='store_true', help='List Regions as JSON')
parser.add_argument('--images', action='store_true', help='List Images as JSON') parser.add_argument('--images', action='store_true', help='List Images as JSON')
parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON')
parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON')
parser.add_argument('--domains', action='store_true',help='List Domains as JSON') parser.add_argument('--domains', action='store_true', help='List Domains as JSON')
parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results')
parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)')
parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)')
parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache')
parser.add_argument('--refresh-cache','-r', action='store_true', default=False, parser.add_argument('--refresh-cache', '-r', action='store_true', default=False,
help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)')
parser.add_argument('--env','-e', action='store_true', help='Display DO_API_TOKEN') parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN')
parser.add_argument('--api-token','-a', action='store', help='DigitalOcean API Token') parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token')
self.args = parser.parse_args() self.args = parser.parse_args()
@ -315,7 +312,6 @@ or environment variables (DO_API_TOKEN)\n''')
not self.args.all and not self.args.host): not self.args.all and not self.args.host):
self.args.list = True self.args.list = True
########################################################################### ###########################################################################
# Data Management # Data Management
########################################################################### ###########################################################################
@ -325,10 +321,10 @@ or environment variables (DO_API_TOKEN)\n''')
if self.args.force_cache: if self.args.force_cache:
return return
# We always get fresh droplets # We always get fresh droplets
if self.is_cache_valid() and not (resource=='droplets' or resource is None): if self.is_cache_valid() and not (resource == 'droplets' or resource is None):
return return
if self.args.refresh_cache: if self.args.refresh_cache:
resource=None resource = None
if resource == 'droplets' or resource is None: if resource == 'droplets' or resource is None:
self.data['droplets'] = self.manager.all_active_droplets() self.data['droplets'] = self.manager.all_active_droplets()
@ -349,7 +345,6 @@ or environment variables (DO_API_TOKEN)\n''')
self.data['domains'] = self.manager.all_domains() self.data['domains'] = self.manager.all_domains()
self.cache_refreshed = True self.cache_refreshed = True
def build_inventory(self): def build_inventory(self):
'''Build Ansible inventory of droplets''' '''Build Ansible inventory of droplets'''
self.inventory = { self.inventory = {
@ -362,11 +357,11 @@ or environment variables (DO_API_TOKEN)\n''')
# add all droplets by id and name # add all droplets by id and name
for droplet in self.data['droplets']: for droplet in self.data['droplets']:
#when using private_networking, the API reports the private one in "ip_address". # when using private_networking, the API reports the private one in "ip_address".
if 'private_networking' in droplet['features'] and not self.use_private_network: if 'private_networking' in droplet['features'] and not self.use_private_network:
for net in droplet['networks']['v4']: for net in droplet['networks']['v4']:
if net['type']=='public': if net['type'] == 'public':
dest=net['ip_address'] dest = net['ip_address']
else: else:
continue continue
else: else:
@ -384,7 +379,7 @@ or environment variables (DO_API_TOKEN)\n''')
'distro_' + self.to_safe(droplet['image']['distribution']), 'distro_' + self.to_safe(droplet['image']['distribution']),
'status_' + droplet['status']): 'status_' + droplet['status']):
if group not in self.inventory: if group not in self.inventory:
self.inventory[group] = { 'hosts': [ ], 'vars': {} } self.inventory[group] = {'hosts': [], 'vars': {}}
self.inventory[group]['hosts'].append(dest) self.inventory[group]['hosts'].append(dest)
# groups that are not always present # groups that are not always present
@ -393,20 +388,19 @@ or environment variables (DO_API_TOKEN)\n''')
if group: if group:
image = 'image_' + self.to_safe(group) image = 'image_' + self.to_safe(group)
if image not in self.inventory: if image not in self.inventory:
self.inventory[image] = { 'hosts': [ ], 'vars': {} } self.inventory[image] = {'hosts': [], 'vars': {}}
self.inventory[image]['hosts'].append(dest) self.inventory[image]['hosts'].append(dest)
if droplet['tags']: if droplet['tags']:
for tag in droplet['tags']: for tag in droplet['tags']:
if tag not in self.inventory: if tag not in self.inventory:
self.inventory[tag] = { 'hosts': [ ], 'vars': {} } self.inventory[tag] = {'hosts': [], 'vars': {}}
self.inventory[tag]['hosts'].append(dest) self.inventory[tag]['hosts'].append(dest)
# hostvars # hostvars
info = self.do_namespace(droplet) info = self.do_namespace(droplet)
self.inventory['_meta']['hostvars'][dest] = info self.inventory['_meta']['hostvars'][dest] = info
def load_droplet_variables_for_host(self): def load_droplet_variables_for_host(self):
'''Generate a JSON response to a --host call''' '''Generate a JSON response to a --host call'''
host = int(self.args.host) host = int(self.args.host)
@ -414,8 +408,6 @@ or environment variables (DO_API_TOKEN)\n''')
info = self.do_namespace(droplet) info = self.do_namespace(droplet)
return {'droplet': info} return {'droplet': info}
########################################################################### ###########################################################################
# Cache Management # Cache Management
########################################################################### ###########################################################################
@ -429,7 +421,6 @@ or environment variables (DO_API_TOKEN)\n''')
return True return True
return False return False
def load_from_cache(self): def load_from_cache(self):
''' Reads the data from the cache file and assigns it to member variables as Python Objects''' ''' Reads the data from the cache file and assigns it to member variables as Python Objects'''
try: try:
@ -443,17 +434,15 @@ or environment variables (DO_API_TOKEN)\n''')
self.data = data['data'] self.data = data['data']
self.inventory = data['inventory'] self.inventory = data['inventory']
def write_to_cache(self): def write_to_cache(self):
''' Writes data in JSON format to a file ''' ''' Writes data in JSON format to a file '''
data = { 'data': self.data, 'inventory': self.inventory } data = {'data': self.data, 'inventory': self.inventory}
json_data = json.dumps(data, sort_keys=True, indent=2) json_data = json.dumps(data, sort_keys=True, indent=2)
cache = open(self.cache_filename, 'w') cache = open(self.cache_filename, 'w')
cache.write(json_data) cache.write(json_data)
cache.close() cache.close()
########################################################################### ###########################################################################
# Utilities # Utilities
########################################################################### ###########################################################################
@ -465,7 +454,6 @@ or environment variables (DO_API_TOKEN)\n''')
else: else:
my_dict[key] = [element] my_dict[key] = [element]
def to_safe(self, word): def to_safe(self, word):
''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups '''
return re.sub("[^A-Za-z0-9\-\.]", "_", word) return re.sub("[^A-Za-z0-9\-\.]", "_", word)
@ -474,11 +462,10 @@ or environment variables (DO_API_TOKEN)\n''')
''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace '''
info = {} info = {}
for k, v in data.items(): for k, v in data.items():
info['do_'+k] = v info['do_' + k] = v
return info return info
########################################################################### ###########################################################################
# Run the script # Run the script
DigitalOceanInventory() DigitalOceanInventory()

View file

@ -156,7 +156,7 @@ except ImportError:
class Ec2Inventory(object): class Ec2Inventory(object):
def _empty_inventory(self): def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}} return {"_meta": {"hostvars": {}}}
def __init__(self): def __init__(self):
''' Main execution path ''' ''' Main execution path '''
@ -205,7 +205,6 @@ class Ec2Inventory(object):
print(data_to_print) print(data_to_print)
def is_cache_valid(self): def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid ''' ''' Determines if the cache files have expired, or if it is still valid '''
@ -218,7 +217,6 @@ class Ec2Inventory(object):
return False return False
def read_settings(self): def read_settings(self):
''' Reads the settings from the ec2.ini file ''' ''' Reads the settings from the ec2.ini file '''
@ -226,7 +224,8 @@ class Ec2Inventory(object):
scriptbasename = os.path.basename(scriptbasename) scriptbasename = os.path.basename(scriptbasename)
scriptbasename = scriptbasename.replace('.py', '') scriptbasename = scriptbasename.replace('.py', '')
defaults = {'ec2': { defaults = {
'ec2': {
'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename)
} }
} }
@ -264,7 +263,7 @@ class Ec2Inventory(object):
env_region = os.environ.get('AWS_REGION') env_region = os.environ.get('AWS_REGION')
if env_region is None: if env_region is None:
env_region = os.environ.get('AWS_DEFAULT_REGION') env_region = os.environ.get('AWS_DEFAULT_REGION')
self.regions = [ env_region ] self.regions = [env_region]
# Destination addresses # Destination addresses
self.destination_variable = config.get('ec2', 'destination_variable') self.destination_variable = config.get('ec2', 'destination_variable')
@ -511,7 +510,6 @@ class Ec2Inventory(object):
help='Use boto profile for connections to EC2') help='Use boto profile for connections to EC2')
self.args = parser.parse_args() self.args = parser.parse_args()
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files ''' ''' Do API calls to each region, and save data in cache files '''
@ -580,10 +578,10 @@ class Ec2Inventory(object):
filters_dict = {} filters_dict = {}
for filter_key, filter_values in self.ec2_instance_filters.items(): for filter_key, filter_values in self.ec2_instance_filters.items():
filters_dict[filter_key] = filter_values filters_dict[filter_key] = filter_values
reservations.extend(conn.get_all_instances(filters = filters_dict)) reservations.extend(conn.get_all_instances(filters=filters_dict))
else: else:
for filter_key, filter_values in self.ec2_instance_filters.items(): for filter_key, filter_values in self.ec2_instance_filters.items():
reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) reservations.extend(conn.get_all_instances(filters={filter_key: filter_values}))
else: else:
reservations = conn.get_all_instances() reservations = conn.get_all_instances()
@ -597,7 +595,7 @@ class Ec2Inventory(object):
max_filter_value = 199 max_filter_value = 199
tags = [] tags = []
for i in range(0, len(instance_ids), max_filter_value): for i in range(0, len(instance_ids), max_filter_value):
tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i+max_filter_value]})) tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]}))
tags_by_instance_id = defaultdict(dict) tags_by_instance_id = defaultdict(dict)
for tag in tags: for tag in tags:
@ -829,7 +827,7 @@ class Ec2Inventory(object):
# Select the best destination address # Select the best destination address
if self.destination_format and self.destination_format_tags: if self.destination_format and self.destination_format_tags:
dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags ]) dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags])
elif instance.subnet_id: elif instance.subnet_id:
dest = getattr(instance, self.vpc_destination_variable, None) dest = getattr(instance, self.vpc_destination_variable, None)
if dest is None: if dest is None:
@ -990,7 +988,6 @@ class Ec2Inventory(object):
self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance)
self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest self.inventory["_meta"]["hostvars"][hostname]['ansible_ssh_host'] = dest
def add_rds_instance(self, instance, region): def add_rds_instance(self, instance, region):
''' Adds an RDS instance to the inventory and index, as long as it is ''' Adds an RDS instance to the inventory and index, as long as it is
addressable ''' addressable '''
@ -1070,7 +1067,6 @@ class Ec2Inventory(object):
self.fail_with_error('\n'.join(['Package boto seems a bit older.', self.fail_with_error('\n'.join(['Package boto seems a bit older.',
'Please upgrade boto >= 2.3.0.'])) 'Please upgrade boto >= 2.3.0.']))
# Inventory: Group by engine # Inventory: Group by engine
if self.group_by_rds_engine: if self.group_by_rds_engine:
self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname)
@ -1338,8 +1334,7 @@ class Ec2Inventory(object):
r53_conn = route53.Route53Connection() r53_conn = route53.Route53Connection()
all_zones = r53_conn.get_zones() all_zones = r53_conn.get_zones()
route53_zones = [ zone for zone in all_zones if zone.name[:-1] route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones]
not in self.route53_excluded_zones ]
self.route53_records = {} self.route53_records = {}
@ -1356,14 +1351,13 @@ class Ec2Inventory(object):
self.route53_records.setdefault(resource, set()) self.route53_records.setdefault(resource, set())
self.route53_records[resource].add(record_name) self.route53_records[resource].add(record_name)
def get_instance_route53_names(self, instance): def get_instance_route53_names(self, instance):
''' Check if an instance is referenced in the records we have from ''' Check if an instance is referenced in the records we have from
Route53. If it is, return the list of domain names pointing to said Route53. If it is, return the list of domain names pointing to said
instance. If nothing points to it, return an empty list. ''' instance. If nothing points to it, return an empty list. '''
instance_attributes = [ 'public_dns_name', 'private_dns_name', instance_attributes = ['public_dns_name', 'private_dns_name',
'ip_address', 'private_ip_address' ] 'ip_address', 'private_ip_address']
name_list = set() name_list = set()
@ -1419,13 +1413,13 @@ class Ec2Inventory(object):
elif key == 'ec2_block_device_mapping': elif key == 'ec2_block_device_mapping':
instance_vars["ec2_block_devices"] = {} instance_vars["ec2_block_devices"] = {}
for k, v in value.items(): for k, v in value.items():
instance_vars["ec2_block_devices"][ os.path.basename(k) ] = v.volume_id instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id
else: else:
pass pass
# TODO Product codes if someone finds them useful # TODO Product codes if someone finds them useful
#print key # print key
#print type(value) # print type(value)
#print value # print value
instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id
@ -1469,9 +1463,9 @@ class Ec2Inventory(object):
host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
elif node['CurrentRole'] == 'replica': elif node['CurrentRole'] == 'replica':
host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address'] host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address']
host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port'] host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port']
host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId'] host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId']
replica_count += 1 replica_count += 1
# Target: Redis Replication Groups # Target: Redis Replication Groups
@ -1523,10 +1517,10 @@ class Ec2Inventory(object):
# Need to load index from cache # Need to load index from cache
self.load_index_from_cache() self.load_index_from_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# try updating the cache # try updating the cache
self.do_api_calls_update_cache() self.do_api_calls_update_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)

View file

@ -35,7 +35,7 @@ except:
import simplejson as json import simplejson as json
# Options # Options
#------------------------------ # ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>") parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true", parser.add_option('--list', default=False, dest="list", action="store_true",
@ -48,6 +48,7 @@ parser.add_option('--host', default=None, dest="host",
# helper functions # helper functions
# #
def get_ssh_config(): def get_ssh_config():
configs = [] configs = []
for box in list_running_boxes(): for box in list_running_boxes():
@ -55,7 +56,8 @@ def get_ssh_config():
configs.append(config) configs.append(config)
return configs return configs
#list all the running instances in the fleet
# list all the running instances in the fleet
def list_running_boxes(): def list_running_boxes():
boxes = [] boxes = []
for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'): for line in subprocess.check_output(["fleetctl", "list-machines"]).split('\n'):
@ -65,6 +67,7 @@ def list_running_boxes():
return boxes return boxes
def get_a_ssh_config(box_name): def get_a_ssh_config(box_name):
config = {} config = {}
config['Host'] = box_name config['Host'] = box_name
@ -72,11 +75,12 @@ def get_a_ssh_config(box_name):
config['ansible_python_interpreter'] = '/opt/bin/python' config['ansible_python_interpreter'] = '/opt/bin/python'
return config return config
# List out servers that vagrant has running # List out servers that vagrant has running
#------------------------------ # ------------------------------
if options.list: if options.list:
ssh_config = get_ssh_config() ssh_config = get_ssh_config()
hosts = { 'coreos': []} hosts = {'coreos': []}
for data in ssh_config: for data in ssh_config:
hosts['coreos'].append(data['Host']) hosts['coreos'].append(data['Host'])
@ -85,14 +89,14 @@ if options.list:
sys.exit(1) sys.exit(1)
# Get out the host details # Get out the host details
#------------------------------ # ------------------------------
elif options.host: elif options.host:
result = {} result = {}
ssh_config = get_ssh_config() ssh_config = get_ssh_config()
details = filter(lambda x: (x['Host'] == options.host), ssh_config) details = filter(lambda x: (x['Host'] == options.host), ssh_config)
if len(details) > 0: if len(details) > 0:
#pass through the port, in case it's non standard. # pass through the port, in case it's non standard.
result = details[0] result = details[0]
result result
@ -101,7 +105,7 @@ elif options.host:
# Print out help # Print out help
#------------------------------ # ------------------------------
else: else:
parser.print_help() parser.print_help()
sys.exit(1) sys.exit(1)

View file

@ -46,6 +46,7 @@ if LooseVersion(requests.__version__) < LooseVersion('1.1.0'):
from requests.auth import HTTPBasicAuth from requests.auth import HTTPBasicAuth
def json_format_dict(data, pretty=False): def json_format_dict(data, pretty=False):
"""Converts a dict to a JSON object and dumps it as a formatted string""" """Converts a dict to a JSON object and dumps it as a formatted string"""
@ -54,6 +55,7 @@ def json_format_dict(data, pretty=False):
else: else:
return json.dumps(data) return json.dumps(data)
class ForemanInventory(object): class ForemanInventory(object):
def __init__(self): def __init__(self):

View file

@ -84,8 +84,8 @@ except ImportError:
# library is used. # library is used.
pass pass
USER_AGENT_PRODUCT="Ansible-gce_inventory_plugin" USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin"
USER_AGENT_VERSION="v2" USER_AGENT_VERSION = "v2"
import sys import sys
import os import os
@ -296,8 +296,8 @@ class GceInventory(object):
if not secrets_found: if not secrets_found:
args = [ args = [
self.config.get('gce','gce_service_account_email_address'), self.config.get('gce', 'gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path') self.config.get('gce', 'gce_service_account_pem_file_path')
] ]
kwargs = {'project': self.config.get('gce', 'gce_project_id'), kwargs = {'project': self.config.get('gce', 'gce_project_id'),
'datacenter': self.config.get('gce', 'gce_zone')} 'datacenter': self.config.get('gce', 'gce_zone')}
@ -320,7 +320,7 @@ class GceInventory(object):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call''' If provided, this will be used to filter the results of the grouped_instances call'''
import csv import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True) reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True)
zones = [r for r in reader] zones = [r for r in reader]
return [z for z in zones[0]] return [z for z in zones[0]]
@ -340,7 +340,6 @@ class GceInventory(object):
help='Force refresh of cache by making API requests (default: False - use cache files)') help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args() self.args = parser.parse_args()
def node_to_dict(self, inst): def node_to_dict(self, inst):
md = {} md = {}
@ -403,7 +402,7 @@ class GceInventory(object):
all_nodes = [] all_nodes = []
params, more_results = {'maxResults': 500}, True params, more_results = {'maxResults': 500}, True
while more_results: while more_results:
self.driver.connection.gce_params=params self.driver.connection.gce_params = params
all_nodes.extend(self.driver.list_nodes()) all_nodes.extend(self.driver.list_nodes())
more_results = 'pageToken' in params more_results = 'pageToken' in params
return all_nodes return all_nodes

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
import sys import sys
import json import json

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
import sys import sys
import json import json

View file

@ -113,6 +113,7 @@ load_chube_config()
# Imports for ansible # Imports for ansible
import ConfigParser import ConfigParser
class LinodeInventory(object): class LinodeInventory(object):
def __init__(self): def __init__(self):
"""Main execution path.""" """Main execution path."""
@ -245,10 +246,10 @@ class LinodeInventory(object):
# Need to load index from cache # Need to load index from cache
self.load_index_from_cache() self.load_index_from_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# try updating the cache # try updating the cache
self.do_api_calls_update_cache() self.do_api_calls_update_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)

View file

@ -35,6 +35,7 @@ import sys
import lxc import lxc
import json import json
def build_dict(): def build_dict():
"""Returns a dictionary keyed to the defined LXC groups. All """Returns a dictionary keyed to the defined LXC groups. All
containers, including the ones not in any group, are included in the containers, including the ones not in any group, are included in the
@ -51,7 +52,8 @@ def build_dict():
# Create a dictionary for each group (including the 'all' group # Create a dictionary for each group (including the 'all' group
return dict([(g, {'hosts': [k for k, v in containers.items() if g in v], return dict([(g, {'hosts': [k for k, v in containers.items() if g in v],
'vars': {'ansible_connection':'lxc'}}) for g in groups]) 'vars': {'ansible_connection': 'lxc'}}) for g in groups])
def main(argv): def main(argv):
"""Returns a JSON dictionary as expected by Ansible""" """Returns a JSON dictionary as expected by Ansible"""

View file

@ -32,6 +32,7 @@ try:
except ImportError: except ImportError:
import ConfigParser as configparser import ConfigParser as configparser
class MDTInventory(object): class MDTInventory(object):
def __init__(self): def __init__(self):
@ -95,7 +96,7 @@ class MDTInventory(object):
''' '''
Create empty inventory dictionary Create empty inventory dictionary
''' '''
return {"_meta" : {"hostvars" : {}}} return {"_meta": {"hostvars": {}}}
def read_settings(self): def read_settings(self):
''' '''
@ -119,7 +120,6 @@ class MDTInventory(object):
if config.has_option('tower', 'groupname'): if config.has_option('tower', 'groupname'):
self.mdt_groupname = config.get('tower', 'groupname') self.mdt_groupname = config.get('tower', 'groupname')
def parse_cli_args(self): def parse_cli_args(self):
''' '''
Command line argument processing Command line argument processing

View file

@ -47,6 +47,7 @@ except ImportError:
print("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus") print("Error: mk_livestatus is needed. Try something like: pip install python-mk-livestatus")
exit(1) exit(1)
class NagiosLivestatusInventory(object): class NagiosLivestatusInventory(object):
def parse_ini_file(self): def parse_ini_file(self):
@ -80,12 +81,12 @@ class NagiosLivestatusInventory(object):
# Local unix socket # Local unix socket
unix_match = re.match('unix:(.*)', livestatus_uri) unix_match = re.match('unix:(.*)', livestatus_uri)
if unix_match is not None: if unix_match is not None:
backend_definition = { 'connection': unix_match.group(1) } backend_definition = {'connection': unix_match.group(1)}
# Remote tcp connection # Remote tcp connection
tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri) tcp_match = re.match('tcp:(.*):([^:]*)', livestatus_uri)
if tcp_match is not None: if tcp_match is not None:
backend_definition = { 'connection': (tcp_match.group(1), int(tcp_match.group(2))) } backend_definition = {'connection': (tcp_match.group(1), int(tcp_match.group(2)))}
# No valid livestatus_uri => exiting # No valid livestatus_uri => exiting
if backend_definition is None: if backend_definition is None:
@ -113,7 +114,7 @@ class NagiosLivestatusInventory(object):
if hostname not in self.result[group]['hosts']: if hostname not in self.result[group]['hosts']:
self.result[group]['hosts'].append(hostname) self.result[group]['hosts'].append(hostname)
def query_backend(self, backend, host = None): def query_backend(self, backend, host=None):
'''Query a livestatus backend''' '''Query a livestatus backend'''
hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field']) hosts_request = Socket(backend['connection']).hosts.columns(backend['host_field'], backend['group_field'])
@ -130,7 +131,7 @@ class NagiosLivestatusInventory(object):
hostname = host[backend['host_field']] hostname = host[backend['host_field']]
hostgroups = host[backend['group_field']] hostgroups = host[backend['group_field']]
if not isinstance(hostgroups, list): if not isinstance(hostgroups, list):
hostgroups = [ hostgroups ] hostgroups = [hostgroups]
self.add_host(hostname, 'all') self.add_host(hostname, 'all')
self.add_host(hostname, backend['name']) self.add_host(hostname, backend['name'])
for group in hostgroups: for group in hostgroups:
@ -166,9 +167,9 @@ class NagiosLivestatusInventory(object):
self.query_backend(backend, self.options.host) self.query_backend(backend, self.options.host)
if self.options.host: if self.options.host:
print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent = self.json_indent)) print(json.dumps(self.result['_meta']['hostvars'][self.options.host[0]], indent=self.json_indent))
elif self.options.list: elif self.options.list:
print(json.dumps(self.result, indent = self.json_indent)) print(json.dumps(self.result, indent=self.json_indent))
else: else:
print("usage: --list or --host HOSTNAME [--pretty]") print("usage: --list or --host HOSTNAME [--pretty]")
exit(1) exit(1)

View file

@ -42,6 +42,7 @@ except ImportError:
print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy") print("Error: SQLAlchemy is needed. Try something like: pip install sqlalchemy")
exit(1) exit(1)
class NagiosNDOInventory(object): class NagiosNDOInventory(object):
def read_settings(self): def read_settings(self):

View file

@ -142,7 +142,7 @@ def get_metadata(server):
key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower() key = 'os_' + re.sub(r"[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class) # Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name # TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager': if key != 'os_manager':
results[key] = value results[key] = value
return results return results

View file

@ -150,6 +150,7 @@ from click.exceptions import UsageError
from six import string_types from six import string_types
def warning(*objs): def warning(*objs):
print("WARNING: ", *objs, file=sys.stderr) print("WARNING: ", *objs, file=sys.stderr)

View file

@ -26,30 +26,31 @@
# Groups are determined by the description field of openvz guests # Groups are determined by the description field of openvz guests
# multiple groups can be separated by commas: webserver,dbserver # multiple groups can be separated by commas: webserver,dbserver
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
import sys import sys
import json import json
#List openvz hosts # List openvz hosts
vzhosts = ['vzhost1','vzhost2','vzhost3'] vzhosts = ['vzhost1', 'vzhost2', 'vzhost3']
#Add openvz hosts to the inventory and Add "_meta" trick # Add openvz hosts to the inventory and Add "_meta" trick
inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}} inventory = {'vzhosts': {'hosts': vzhosts}, '_meta': {'hostvars': {}}}
#default group, when description not defined # default group, when description not defined
default_group = ['vzguest'] default_group = ['vzguest']
def get_guests():
#Loop through vzhosts
for h in vzhosts:
#SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h,'vzlist','-j'], stdout=PIPE, universal_newlines=True)
#Load Json info of guests def get_guests():
# Loop through vzhosts
for h in vzhosts:
# SSH to vzhost and get the list of guests in json
pipe = Popen(['ssh', h, 'vzlist', '-j'], stdout=PIPE, universal_newlines=True)
# Load Json info of guests
json_data = json.loads(pipe.stdout.read()) json_data = json.loads(pipe.stdout.read())
#loop through guests # loop through guests
for j in json_data: for j in json_data:
#Add information to host vars # Add information to host vars
inventory['_meta']['hostvars'][j['hostname']] = { inventory['_meta']['hostvars'][j['hostname']] = {
'ctid': j['ctid'], 'ctid': j['ctid'],
'veid': j['veid'], 'veid': j['veid'],
@ -59,13 +60,13 @@ def get_guests():
'ip': j['ip'] 'ip': j['ip']
} }
#determine group from guest description # determine group from guest description
if j['description'] is not None: if j['description'] is not None:
groups = j['description'].split(",") groups = j['description'].split(",")
else: else:
groups = default_group groups = default_group
#add guest to inventory # add guest to inventory
for g in groups: for g in groups:
if g not in inventory: if g not in inventory:
inventory[g] = {'hosts': []} inventory[g] = {'hosts': []}

View file

@ -230,7 +230,7 @@ class OVirtInventory(object):
""" """
return [x.get_name() for x in inst.get_tags().list()] return [x.get_name() for x in inst.get_tags().list()]
def get_machine_type(self,inst): def get_machine_type(self, inst):
inst_type = inst.get_instance_type() inst_type = inst.get_instance_type()
if inst_type: if inst_type:
return self.driver.instancetypes.get(id=inst_type.id).name return self.driver.instancetypes.get(id=inst_type.id).name

View file

@ -63,10 +63,11 @@ except ImportError:
ini_section = 'packet' ini_section = 'packet'
class PacketInventory(object): class PacketInventory(object):
def _empty_inventory(self): def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}} return {"_meta": {"hostvars": {}}}
def __init__(self): def __init__(self):
''' Main execution path ''' ''' Main execution path '''
@ -101,7 +102,6 @@ class PacketInventory(object):
print(data_to_print) print(data_to_print)
def is_cache_valid(self): def is_cache_valid(self):
''' Determines if the cache files have expired, or if it is still valid ''' ''' Determines if the cache files have expired, or if it is still valid '''
@ -231,7 +231,6 @@ class PacketInventory(object):
help='Force refresh of cache by making API requests to Packet (default: False - use cache files)') help='Force refresh of cache by making API requests to Packet (default: False - use cache files)')
self.args = parser.parse_args() self.args = parser.parse_args()
def do_api_calls_update_cache(self): def do_api_calls_update_cache(self):
''' Do API calls to each region, and save data in cache files ''' ''' Do API calls to each region, and save data in cache files '''
@ -244,7 +243,7 @@ class PacketInventory(object):
def connect(self): def connect(self):
''' create connection to api server''' ''' create connection to api server'''
token=os.environ.get('PACKET_API_TOKEN') token = os.environ.get('PACKET_API_TOKEN')
if token is None: if token is None:
raise Exception("Error reading token from environment (PACKET_API_TOKEN)!") raise Exception("Error reading token from environment (PACKET_API_TOKEN)!")
manager = packet.Manager(auth_token=token) manager = packet.Manager(auth_token=token)
@ -270,7 +269,7 @@ class PacketInventory(object):
try: try:
manager = self.connect() manager = self.connect()
devices = manager.list_devices(project_id=project.id, params = params) devices = manager.list_devices(project_id=project.id, params=params)
for device in devices: for device in devices:
self.add_device(device, project) self.add_device(device, project)
@ -307,7 +306,6 @@ class PacketInventory(object):
if ip_address['public'] is True and ip_address['address_family'] == 4: if ip_address['public'] is True and ip_address['address_family'] == 4:
dest = ip_address['address'] dest = ip_address['address']
if not dest: if not dest:
# Skip devices we cannot address (e.g. private VPC subnet) # Skip devices we cannot address (e.g. private VPC subnet)
return return
@ -373,7 +371,6 @@ class PacketInventory(object):
self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device) self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_device(device)
def get_host_info_dict_from_device(self, device): def get_host_info_dict_from_device(self, device):
device_vars = {} device_vars = {}
for key in vars(device): for key in vars(device):
@ -403,9 +400,9 @@ class PacketInventory(object):
device_vars[key] = k device_vars[key] = k
else: else:
pass pass
#print key # print key
#print type(value) # print type(value)
#print value # print value
return device_vars return device_vars
@ -416,10 +413,10 @@ class PacketInventory(object):
# Need to load index from cache # Need to load index from cache
self.load_index_from_cache() self.load_index_from_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# try updating the cache # try updating the cache
self.do_api_calls_update_cache() self.do_api_calls_update_cache()
if not self.args.host in self.index: if self.args.host not in self.index:
# host might not exist anymore # host might not exist anymore
return self.json_format_dict({}, True) return self.json_format_dict({}, True)
@ -455,7 +452,6 @@ class PacketInventory(object):
json_inventory = cache.read() json_inventory = cache.read()
return json_inventory return json_inventory
def load_index_from_cache(self): def load_index_from_cache(self):
''' Reads the index from the cache file sets self.index ''' ''' Reads the index from the cache file sets self.index '''
@ -463,7 +459,6 @@ class PacketInventory(object):
json_index = cache.read() json_index = cache.read()
self.index = json.loads(json_index) self.index = json.loads(json_index)
def write_to_cache(self, data, filename): def write_to_cache(self, data, filename):
''' Writes data in JSON format to a file ''' ''' Writes data in JSON format to a file '''

View file

@ -38,10 +38,12 @@ from six import iteritems
from ansible.module_utils.urls import open_url from ansible.module_utils.urls import open_url
class ProxmoxNodeList(list): class ProxmoxNodeList(list):
def get_names(self): def get_names(self):
return [node['node'] for node in self] return [node['node'] for node in self]
class ProxmoxVM(dict): class ProxmoxVM(dict):
def get_variables(self): def get_variables(self):
variables = {} variables = {}
@ -49,6 +51,7 @@ class ProxmoxVM(dict):
variables['proxmox_' + key] = value variables['proxmox_' + key] = value
return variables return variables
class ProxmoxVMList(list): class ProxmoxVMList(list):
def __init__(self, data=[]): def __init__(self, data=[]):
for item in data: for item in data:
@ -68,14 +71,17 @@ class ProxmoxVMList(list):
return variables return variables
class ProxmoxPoolList(list): class ProxmoxPoolList(list):
def get_names(self): def get_names(self):
return [pool['poolid'] for pool in self] return [pool['poolid'] for pool in self]
class ProxmoxPool(dict): class ProxmoxPool(dict):
def get_members_name(self): def get_members_name(self):
return [member['name'] for member in self['members'] if member['template'] != 1] return [member['name'] for member in self['members'] if member['template'] != 1]
class ProxmoxAPI(object): class ProxmoxAPI(object):
def __init__(self, options): def __init__(self, options):
self.options = options self.options = options
@ -139,6 +145,7 @@ class ProxmoxAPI(object):
def pool(self, poolid): def pool(self, poolid):
return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid))) return ProxmoxPool(self.get('api2/json/pools/{}'.format(poolid)))
def main_list(options): def main_list(options):
results = { results = {
'all': { 'all': {
@ -199,6 +206,7 @@ def main_list(options):
return results return results
def main_host(options): def main_host(options):
proxmox_api = ProxmoxAPI(options) proxmox_api = ProxmoxAPI(options)
proxmox_api.auth() proxmox_api.auth()
@ -211,6 +219,7 @@ def main_host(options):
return {} return {}
def main(): def main():
parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME') parser = OptionParser(usage='%prog [options] --list | --host HOSTNAME')
parser.add_option('--list', action="store_true", default=False, dest="list") parser.add_option('--list', action="store_true", default=False, dest="list")
@ -235,5 +244,6 @@ def main():
print(json.dumps(data, indent=indent)) print(json.dumps(data, indent=indent))
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View file

@ -41,6 +41,7 @@ try:
except: except:
import simplejson as json import simplejson as json
class SoftLayerInventory(object): class SoftLayerInventory(object):
common_items = [ common_items = [
'id', 'id',
@ -70,7 +71,7 @@ class SoftLayerInventory(object):
] ]
def _empty_inventory(self): def _empty_inventory(self):
return {"_meta" : {"hostvars" : {}}} return {"_meta": {"hostvars": {}}}
def __init__(self): def __init__(self):
'''Main path''' '''Main path'''
@ -174,7 +175,7 @@ class SoftLayerInventory(object):
def get_virtual_servers(self): def get_virtual_servers(self):
'''Get all the CCI instances''' '''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client) vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items,self.vs_items)) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask) instances = vs.list_instances(mask=mask)
for instance in instances: for instance in instances:
@ -183,7 +184,7 @@ class SoftLayerInventory(object):
def get_physical_servers(self): def get_physical_servers(self):
'''Get all the hardware instances''' '''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client) hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items,self.hw_items)) mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask) instances = hw.list_hardware(mask=mask)
for instance in instances: for instance in instances:

View file

@ -76,7 +76,8 @@ if not os.path.exists(CACHE_DIR):
os.chmod(CACHE_DIR, 0o2775) os.chmod(CACHE_DIR, 0o2775)
# Helper functions # Helper functions
#------------------------------ # ------------------------------
def spacewalk_report(name): def spacewalk_report(name):
"""Yield a dictionary form of each CSV output produced by the specified """Yield a dictionary form of each CSV output produced by the specified
@ -94,7 +95,7 @@ def spacewalk_report(name):
lines = open(cache_filename, 'r').readlines() lines = open(cache_filename, 'r').readlines()
keys = lines[0].strip().split(',') keys = lines[0].strip().split(',')
# add 'spacewalk_' prefix to the keys # add 'spacewalk_' prefix to the keys
keys = [ 'spacewalk_' + key for key in keys ] keys = ['spacewalk_' + key for key in keys]
for line in lines[1:]: for line in lines[1:]:
values = line.strip().split(',') values = line.strip().split(',')
if len(keys) == len(values): if len(keys) == len(values):
@ -102,7 +103,7 @@ def spacewalk_report(name):
# Options # Options
#------------------------------ # ------------------------------
parser = OptionParser(usage="%prog [options] --list | --host <machine>") parser = OptionParser(usage="%prog [options] --list | --host <machine>")
parser.add_option('--list', default=False, dest="list", action="store_true", parser.add_option('--list', default=False, dest="list", action="store_true",
@ -120,20 +121,20 @@ parser.add_option('-p', default=False, dest="prefix_org_name", action="store_tru
# read spacewalk.ini if present # read spacewalk.ini if present
#------------------------------ # ------------------------------
if os.path.exists(INI_FILE): if os.path.exists(INI_FILE):
config = ConfigParser.SafeConfigParser() config = ConfigParser.SafeConfigParser()
config.read(INI_FILE) config.read(INI_FILE)
if config.has_option('spacewalk' , 'cache_age'): if config.has_option('spacewalk', 'cache_age'):
CACHE_AGE = config.get('spacewalk' , 'cache_age') CACHE_AGE = config.get('spacewalk', 'cache_age')
if not options.org_number and config.has_option('spacewalk' , 'org_number'): if not options.org_number and config.has_option('spacewalk', 'org_number'):
options.org_number = config.get('spacewalk' , 'org_number') options.org_number = config.get('spacewalk', 'org_number')
if not options.prefix_org_name and config.has_option('spacewalk' , 'prefix_org_name'): if not options.prefix_org_name and config.has_option('spacewalk', 'prefix_org_name'):
options.prefix_org_name = config.getboolean('spacewalk' , 'prefix_org_name') options.prefix_org_name = config.getboolean('spacewalk', 'prefix_org_name')
# Generate dictionary for mapping group_id to org_id # Generate dictionary for mapping group_id to org_id
#------------------------------ # ------------------------------
org_groups = {} org_groups = {}
try: try:
for group in spacewalk_report('system-groups'): for group in spacewalk_report('system-groups'):
@ -146,14 +147,14 @@ except (OSError) as e:
# List out the known server from Spacewalk # List out the known server from Spacewalk
#------------------------------ # ------------------------------
if options.list: if options.list:
# to build the "_meta"-Group with hostvars first create dictionary for later use # to build the "_meta"-Group with hostvars first create dictionary for later use
host_vars = {} host_vars = {}
try: try:
for item in spacewalk_report('inventory'): for item in spacewalk_report('inventory'):
host_vars[ item['spacewalk_profile_name'] ] = dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in item.items() ) host_vars[item['spacewalk_profile_name']] = dict((key, (value.split(';') if ';' in value else value)) for key, value in item.items())
except (OSError) as e: except (OSError) as e:
print('Problem executing the command "%s inventory": %s' % print('Problem executing the command "%s inventory": %s' %
@ -161,11 +162,11 @@ if options.list:
sys.exit(2) sys.exit(2)
groups = {} groups = {}
meta = { "hostvars" : {} } meta = {"hostvars": {}}
try: try:
for system in spacewalk_report('system-groups-systems'): for system in spacewalk_report('system-groups-systems'):
# first get org_id of system # first get org_id of system
org_id = org_groups[ system['spacewalk_group_id'] ] org_id = org_groups[system['spacewalk_group_id']]
# shall we add the org_id as prefix to the group name: # shall we add the org_id as prefix to the group name:
if options.prefix_org_name: if options.prefix_org_name:
@ -181,16 +182,16 @@ if options.list:
groups[group_name] = set() groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name']) groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
# or we list all groups and systems: # or we list all groups and systems:
else: else:
if group_name not in groups: if group_name not in groups:
groups[group_name] = set() groups[group_name] = set()
groups[group_name].add(system['spacewalk_server_name']) groups[group_name].add(system['spacewalk_server_name'])
if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta[ "hostvars" ]: if system['spacewalk_server_name'] in host_vars and not system['spacewalk_server_name'] in meta["hostvars"]:
meta[ "hostvars" ][ system['spacewalk_server_name'] ] = host_vars[ system['spacewalk_server_name'] ] meta["hostvars"][system['spacewalk_server_name']] = host_vars[system['spacewalk_server_name']]
except (OSError) as e: except (OSError) as e:
print('Problem executing the command "%s system-groups-systems": %s' % print('Problem executing the command "%s system-groups-systems": %s' %
@ -201,15 +202,15 @@ if options.list:
for group, systems in iteritems(groups): for group, systems in iteritems(groups):
print('[%s]\n%s\n' % (group, '\n'.join(systems))) print('[%s]\n%s\n' % (group, '\n'.join(systems)))
else: else:
final = dict( [ (k, list(s)) for k, s in iteritems(groups) ] ) final = dict([(k, list(s)) for k, s in iteritems(groups)])
final["_meta"] = meta final["_meta"] = meta
print(json.dumps( final )) print(json.dumps(final))
#print(json.dumps(groups)) # print(json.dumps(groups))
sys.exit(0) sys.exit(0)
# Return a details information concerning the spacewalk server # Return a details information concerning the spacewalk server
#------------------------------ # ------------------------------
elif options.host: elif options.host:
host_details = {} host_details = {}
@ -229,7 +230,7 @@ elif options.host:
for k, v in iteritems(host_details): for k, v in iteritems(host_details):
print(' %s: %s' % (k, '\n '.join(v.split(';')))) print(' %s: %s' % (k, '\n '.join(v.split(';'))))
else: else:
print( json.dumps( dict( ( key, ( value.split(';') if ';' in value else value) ) for key, value in host_details.items() ) ) ) print(json.dumps(dict((key, (value.split(';') if ';' in value else value)) for key, value in host_details.items())))
sys.exit(0) sys.exit(0)
else: else:

View file

@ -99,17 +99,18 @@ def stack_build_header(auth_creds):
def stack_host_list(endpoint, header, client): def stack_host_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({ "cmd": "list host"}), stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host"}),
headers=header) headers=header)
return json.loads(stack_r.json()) return json.loads(stack_r.json())
def stack_net_list(endpoint, header, client): def stack_net_list(endpoint, header, client):
stack_r = client.post(endpoint, data=json.dumps({ "cmd": "list host interface"}), stack_r = client.post(endpoint, data=json.dumps({"cmd": "list host interface"}),
headers=header) headers=header)
return json.loads(stack_r.json()) return json.loads(stack_r.json())
def format_meta(hostdata, intfdata, config): def format_meta(hostdata, intfdata, config):
use_hostnames = config['use_hostnames'] use_hostnames = config['use_hostnames']
meta = dict(all=dict(hosts=list()), meta = dict(all=dict(hosts=list()),
@ -159,7 +160,6 @@ def parse_args():
def main(): def main():
args = parse_args() args = parse_args()
if StrictVersion(requests.__version__) < StrictVersion("2.4.3"): if StrictVersion(requests.__version__) < StrictVersion("2.4.3"):
sys.exit('requests>=2.4.3 is required for this inventory script') sys.exit('requests>=2.4.3 is required for this inventory script')

View file

@ -16,20 +16,21 @@
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys import sys
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
try: try:
import json import json
except ImportError: except ImportError:
import simplejson as json import simplejson as json
class SetEncoder(json.JSONEncoder): class SetEncoder(json.JSONEncoder):
def default(self, obj): def default(self, obj):
if isinstance(obj, set): if isinstance(obj, set):
return list(obj) return list(obj)
return json.JSONEncoder.default(self, obj) return json.JSONEncoder.default(self, obj)
VBOX="VBoxManage" VBOX = "VBoxManage"
def get_hosts(host=None): def get_hosts(host=None):
@ -39,7 +40,7 @@ def get_hosts(host=None):
if host: if host:
p = Popen([VBOX, 'showvminfo', host], stdout=PIPE) p = Popen([VBOX, 'showvminfo', host], stdout=PIPE)
else: else:
returned = { 'all': set(), '_metadata': {} } returned = {'all': set(), '_metadata': {}}
p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE) p = Popen([VBOX, 'list', '-l', 'vms'], stdout=PIPE)
except: except:
sys.exit(1) sys.exit(1)
@ -50,7 +51,7 @@ def get_hosts(host=None):
for line in p.stdout.readlines(): for line in p.stdout.readlines():
try: try:
k,v = line.split(':',1) k, v = line.split(':', 1)
except: except:
continue continue
@ -63,10 +64,10 @@ def get_hosts(host=None):
curname = v curname = v
hostvars[curname] = {} hostvars[curname] = {}
try: # try to get network info try: # try to get network info
x = Popen([VBOX, 'guestproperty', 'get', curname,"/VirtualBox/GuestInfo/Net/0/V4/IP"],stdout=PIPE) x = Popen([VBOX, 'guestproperty', 'get', curname, "/VirtualBox/GuestInfo/Net/0/V4/IP"], stdout=PIPE)
ipinfo = x.stdout.read() ipinfo = x.stdout.read()
if 'Value' in ipinfo: if 'Value' in ipinfo:
a,ip = ipinfo.split(':',1) a, ip = ipinfo.split(':', 1)
hostvars[curname]['ansible_ssh_host'] = ip.strip() hostvars[curname]['ansible_ssh_host'] = ip.strip()
except: except:
pass pass
@ -83,11 +84,11 @@ def get_hosts(host=None):
returned['all'].add(curname) returned['all'].add(curname)
continue continue
pref_k = 'vbox_' + k.strip().replace(' ','_') pref_k = 'vbox_' + k.strip().replace(' ', '_')
if k.startswith(' '): if k.startswith(' '):
if prevkey not in hostvars[curname]: if prevkey not in hostvars[curname]:
hostvars[curname][prevkey] = {} hostvars[curname][prevkey] = {}
hostvars[curname][prevkey][pref_k]= v hostvars[curname][prevkey][pref_k] = v
else: else:
if v != '': if v != '':
hostvars[curname][pref_k] = v hostvars[curname][pref_k] = v

View file

@ -47,9 +47,11 @@ try:
from logging import NullHandler from logging import NullHandler
except ImportError: except ImportError:
from logging import Handler from logging import Handler
class NullHandler(Handler): class NullHandler(Handler):
def emit(self, record): def emit(self, record):
pass pass
logging.getLogger('psphere').addHandler(NullHandler()) logging.getLogger('psphere').addHandler(NullHandler())
logging.getLogger('suds').addHandler(NullHandler()) logging.getLogger('suds').addHandler(NullHandler())
@ -362,7 +364,7 @@ class VMwareInventory(object):
# Loop through all VMs on physical host. # Loop through all VMs on physical host.
for vm in host.vm: for vm in host.vm:
if prefix_filter: if prefix_filter:
if vm.name.startswith( prefix_filter ): if vm.name.startswith(prefix_filter):
continue continue
self._add_host(inv, 'all', vm.name) self._add_host(inv, 'all', vm.name)
self._add_host(inv, vm_group, vm.name) self._add_host(inv, vm_group, vm.name)

View file

@ -54,6 +54,7 @@ except ImportError as e:
# Imports for ansible # Imports for ansible
import ConfigParser import ConfigParser
class AzureInventory(object): class AzureInventory(object):
def __init__(self): def __init__(self):
"""Main execution path.""" """Main execution path."""
@ -173,8 +174,7 @@ class AzureInventory(object):
parser.add_argument('--refresh-cache', parser.add_argument('--refresh-cache',
action='store_true', default=False, action='store_true', default=False,
help='Force refresh of thecache by making API requests to Azure ' help='Force refresh of thecache by making API requests to Azure '
'(default: False - use cache files)', '(default: False - use cache files)')
)
parser.add_argument('--host', action='store', parser.add_argument('--host', action='store',
help='Get all information about an instance.') help='Get all information about an instance.')
self.args = parser.parse_args() self.args = parser.parse_args()
@ -198,7 +198,7 @@ class AzureInventory(object):
associated with a cloud service. associated with a cloud service.
""" """
try: try:
for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name,embed_detail=True).deployments.deployments: for deployment in self.sms.get_hosted_service_properties(cloud_service.service_name, embed_detail=True).deployments.deployments:
self.add_deployment(cloud_service, deployment) self.add_deployment(cloud_service, deployment)
except Exception as e: except Exception as e:
sys.exit("Error: Failed to access deployments - {0}".format(str(e))) sys.exit("Error: Failed to access deployments - {0}".format(str(e)))

View file

@ -49,6 +49,7 @@ try:
except: except:
import simplejson as json import simplejson as json
class ZabbixInventory(object): class ZabbixInventory(object):
def read_settings(self): def read_settings(self):
@ -96,7 +97,7 @@ class ZabbixInventory(object):
for group in host['groups']: for group in host['groups']:
groupname = group['name'] groupname = group['name']
if not groupname in data: if groupname not in data:
data[groupname] = self.hoststub() data[groupname] = self.hoststub()
data[groupname]['hosts'].append(hostname) data[groupname]['hosts'].append(hostname)

View file

@ -17,7 +17,7 @@
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>. # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from subprocess import Popen,PIPE from subprocess import Popen, PIPE
import sys import sys
import json import json

View file

@ -1,42 +1,6 @@
contrib/inventory/abiquo.py
contrib/inventory/apache-libcloud.py
contrib/inventory/apstra_aos.py
contrib/inventory/azure_rm.py
contrib/inventory/cobbler.py
contrib/inventory/collins.py
contrib/inventory/consul_io.py
contrib/inventory/digital_ocean.py
contrib/inventory/docker.py
contrib/inventory/ec2.py
contrib/inventory/fleet.py
contrib/inventory/foreman.py
contrib/inventory/freeipa.py contrib/inventory/freeipa.py
contrib/inventory/gce.py
contrib/inventory/jail.py
contrib/inventory/libvirt_lxc.py
contrib/inventory/linode.py
contrib/inventory/lxc_inventory.py
contrib/inventory/mdt_dynamic_inventory.py
contrib/inventory/nagios_livestatus.py
contrib/inventory/nagios_ndo.py
contrib/inventory/nova.py
contrib/inventory/nsot.py
contrib/inventory/openvz.py
contrib/inventory/ovirt.py
contrib/inventory/packet_net.py
contrib/inventory/proxmox.py
contrib/inventory/rackhd.py contrib/inventory/rackhd.py
contrib/inventory/rax.py
contrib/inventory/softlayer.py
contrib/inventory/spacewalk.py
contrib/inventory/ssh_config.py
contrib/inventory/stacki.py
contrib/inventory/vbox.py
contrib/inventory/vmware.py
contrib/inventory/vmware_inventory.py contrib/inventory/vmware_inventory.py
contrib/inventory/windows_azure.py
contrib/inventory/zabbix.py
contrib/inventory/zone.py
docs/api/conf.py docs/api/conf.py
docs/bin/dump_keywords.py docs/bin/dump_keywords.py
docs/bin/plugin_formatter.py docs/bin/plugin_formatter.py