[Monitoring] Migrate server to NP (#56675)

* First pass

* First pass

* Add new routes

* Getting closer

* Remove legacy server code, and other fixes

* Register the plugin with xpack

* Pass a legacy client to telemetry

* Suport callWithInternalUser

* Remove this

* More NP work

* Fix some tests

* Fix broken test

* Move over new telemetry changes, and fix other issues

* Fix TODO item

* Reuse the same schema as elasticsearch module

* Use a singular config definition here

* Disable this for now

* Use the right method

* Use custom config again

* Tweak the config to make this optional

* Remove these

* Remove these unnecessary files

* Fix jest test

* Fix some linting issues

* Fix type issue

* Fix localization issues

* Use the elasticsearch config

* Remove todos

* Fix this check

* Move kibana alerting over

* PR feedback

* Use new metrics core service

* Change config for xpack_api_polling_frequency_millis

* Make sure this is disabled for now

* Disable both

* Update this to the new function

* Tighten up legacy api needs

* Check for existence

* Fix jest tests

* Cleaning up the plugin definition

* Create custom type in our plugin

* Revert this change

* Fix CI issues

* Add these tests back

* Just use a different collector type

* Handle errors better

* Use custom type

* PR feedback

* Fix type issues

* PR feedback
This commit is contained in:
Chris Roberson 2020-03-20 14:02:15 -04:00 committed by GitHub
parent da2ec4bf40
commit 3a396027f6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
387 changed files with 2234 additions and 2134 deletions

View file

@ -20,6 +20,7 @@
import { encryptTelemetry } from './collectors';
import { CallCluster } from '../../elasticsearch';
import { UsageCollectionSetup } from '../../../../plugins/usage_collection/server';
import { Cluster } from '../../elasticsearch';
import { ESLicense } from './telemetry_collection/get_local_license';
export type EncryptedStatsGetterConfig = { unencrypted: false } & {
@ -70,7 +71,7 @@ export type LicenseGetter = (
interface CollectionConfig<T extends BasicStatsPayload> {
title: string;
priority: number;
esCluster: string;
esCluster: string | Cluster;
statsGetter: StatsGetter<T>;
clusterDetailsGetter: ClusterDetailsGetter;
licenseGetter: LicenseGetter;
@ -79,7 +80,7 @@ interface Collection {
statsGetter: StatsGetter;
licenseGetter: LicenseGetter;
clusterDetailsGetter: ClusterDetailsGetter;
esCluster: string;
esCluster: string | Cluster;
title: string;
}
@ -135,9 +136,10 @@ export class TelemetryCollectionManager {
): Promise<StatsCollectionConfig> => {
const { start, end } = config;
const server = config.unencrypted ? config.req.server : config.server;
const { callWithRequest, callWithInternalUser } = server.plugins.elasticsearch.getCluster(
collection.esCluster
);
const { callWithRequest, callWithInternalUser } =
typeof collection.esCluster === 'string'
? server.plugins.elasticsearch.getCluster(collection.esCluster)
: collection.esCluster;
const callCluster = config.unencrypted
? (...args: any[]) => callWithRequest(config.req, ...args)
: callWithInternalUser;

View file

@ -17,4 +17,4 @@
* under the License.
*/
export const KIBANA_STATS_TYPE = 'kibana_stats'; // kibana stats per 5s intervals
export const KIBANA_STATS_TYPE = 'oss_kibana_stats'; // kibana stats per 5s intervals

View file

@ -28,7 +28,7 @@
"xpack.main": "legacy/plugins/xpack_main",
"xpack.maps": ["plugins/maps", "legacy/plugins/maps"],
"xpack.ml": ["plugins/ml", "legacy/plugins/ml"],
"xpack.monitoring": "legacy/plugins/monitoring",
"xpack.monitoring": ["plugins/monitoring", "legacy/plugins/monitoring"],
"xpack.remoteClusters": "plugins/remote_clusters",
"xpack.reporting": ["plugins/reporting", "legacy/plugins/reporting"],
"xpack.rollupJobs": "legacy/plugins/rollup",

View file

@ -4,8 +4,6 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { XPACK_INFO_API_DEFAULT_POLL_FREQUENCY_IN_MILLIS } from '../../server/lib/constants';
/**
* User-configurable settings for xpack.monitoring via configuration schema
* @param {Object} Joi - HapiJS Joi module that allows for schema validation
@ -132,9 +130,9 @@ export const config = Joi => {
email_address: Joi.string().email(),
}).default(),
}).default(),
xpack_api_polling_frequency_millis: Joi.number().default(
XPACK_INFO_API_DEFAULT_POLL_FREQUENCY_IN_MILLIS
),
licensing: Joi.object({
api_polling_frequency: Joi.number().default(30001),
}),
agent: Joi.object({
interval: Joi.string()
.regex(/[\d\.]+[yMwdhms]/)

View file

@ -0,0 +1,59 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { get } from 'lodash';
import { resolve } from 'path';
import { config } from './config';
import { getUiExports } from './ui_exports';
import { KIBANA_ALERTING_ENABLED } from './common/constants';
import { telemetryCollectionManager } from '../../../../src/legacy/core_plugins/telemetry/server';
/**
* Invokes plugin modules to instantiate the Monitoring plugin for Kibana
* @param kibana {Object} Kibana plugin instance
* @return {Object} Monitoring UI Kibana plugin object
*/
const deps = ['kibana', 'elasticsearch', 'xpack_main'];
if (KIBANA_ALERTING_ENABLED) {
deps.push(...['alerting', 'actions']);
}
export const monitoring = kibana => {
return new kibana.Plugin({
require: deps,
id: 'monitoring',
configPrefix: 'monitoring',
publicDir: resolve(__dirname, 'public'),
init(server) {
const serverConfig = server.config();
const npMonitoring = server.newPlatform.setup.plugins.monitoring;
if (npMonitoring) {
const kbnServerStatus = this.kbnServer.status;
npMonitoring.registerLegacyAPI({
telemetryCollectionManager,
getServerStatus: () => {
const status = kbnServerStatus.toJSON();
return get(status, 'overall.state');
},
});
}
server.injectUiAppVars('monitoring', () => {
return {
maxBucketSize: serverConfig.get('monitoring.ui.max_bucket_size'),
minIntervalSeconds: serverConfig.get('monitoring.ui.min_interval_seconds'),
kbnIndex: serverConfig.get('kibana.index'),
showLicenseExpiration: serverConfig.get('monitoring.ui.show_license_expiration'),
showCgroupMetricsElasticsearch: serverConfig.get(
'monitoring.ui.container.elasticsearch.enabled'
),
showCgroupMetricsLogstash: serverConfig.get('monitoring.ui.container.logstash.enabled'), // Note, not currently used, but see https://github.com/elastic/x-pack-kibana/issues/1559 part 2
};
});
},
config,
uiExports: getUiExports(),
});
};

View file

@ -1,141 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { resolve } from 'path';
import KbnServer, { Server } from 'src/legacy/server/kbn_server';
import {
LegacyPluginApi,
LegacyPluginSpec,
LegacyPluginOptions,
} from 'src/legacy/plugin_discovery/types';
import { KIBANA_ALERTING_ENABLED } from './common/constants';
// @ts-ignore
import { getUiExports } from './ui_exports';
// @ts-ignore
import { config as configDefaults } from './config';
// @ts-ignore
import { deprecations } from './deprecations';
// @ts-ignore
import { Plugin } from './server/plugin';
// @ts-ignore
import { initInfraSource } from './server/lib/logs/init_infra_source';
type InfraPlugin = any; // TODO
type PluginsSetup = any; // TODO
type LegacySetup = any; // TODO
const deps = ['kibana', 'elasticsearch', 'xpack_main'];
if (KIBANA_ALERTING_ENABLED) {
deps.push(...['alerting', 'actions']);
}
const validConfigOptions: string[] = [
'monitoring.ui.enabled',
'monitoring.kibana.collection.enabled',
'monitoring.ui.max_bucket_size',
'monitoring.ui.min_interval_seconds',
'kibana.index',
'monitoring.ui.show_license_expiration',
'monitoring.ui.container.elasticsearch.enabled',
'monitoring.ui.container.logstash.enabled',
'monitoring.tests.cloud_detector.enabled',
'monitoring.kibana.collection.interval',
'monitoring.elasticsearch.hosts',
'monitoring.elasticsearch',
'monitoring.ui.elasticsearch.hosts',
'monitoring.ui.elasticsearch',
'monitoring.xpack_api_polling_frequency_millis',
'server.uuid',
'server.name',
'server.host',
'server.port',
'monitoring.cluster_alerts.email_notifications.enabled',
'monitoring.cluster_alerts.email_notifications.email_address',
'monitoring.ui.ccs.enabled',
'monitoring.ui.elasticsearch.logFetchCount',
'monitoring.ui.logs.index',
];
interface LegacyPluginOptionsWithKbnServer extends LegacyPluginOptions {
kbnServer?: KbnServer;
}
/**
* Invokes plugin modules to instantiate the Monitoring plugin for Kibana
* @param kibana {Object} Kibana plugin instance
* @return {Object} Monitoring UI Kibana plugin object
*/
export const monitoring = (kibana: LegacyPluginApi): LegacyPluginSpec => {
return new kibana.Plugin({
require: deps,
id: 'monitoring',
configPrefix: 'monitoring',
publicDir: resolve(__dirname, 'public'),
config: configDefaults,
uiExports: getUiExports(),
deprecations,
async init(server: Server) {
const serverConfig = server.config();
const { getOSInfo, plugins, injectUiAppVars } = server as typeof server & { getOSInfo?: any };
const log = (...args: Parameters<typeof server.log>) => server.log(...args);
const route = (...args: Parameters<typeof server.route>) => server.route(...args);
const expose = (...args: Parameters<typeof server.expose>) => server.expose(...args);
const serverFacade = {
config: () => ({
get: (key: string) => {
if (validConfigOptions.includes(key)) {
return serverConfig.get(key);
}
throw new Error(`Unknown key '${key}'`);
},
}),
injectUiAppVars,
log,
logger: server.newPlatform.coreContext.logger,
getOSInfo,
events: {
on: (...args: Parameters<typeof server.events.on>) => server.events.on(...args),
},
route,
expose,
_hapi: server,
_kbnServer: this.kbnServer,
};
const legacyPlugins = plugins as Partial<typeof plugins> & { infra?: InfraPlugin };
const { xpack_main, elasticsearch, infra } = legacyPlugins;
const {
core: coreSetup,
plugins: { usageCollection, licensing, alerting },
} = server.newPlatform.setup;
const pluginsSetup: PluginsSetup = {
usageCollection,
licensing,
alerting,
};
const __LEGACY: LegacySetup = {
...serverFacade,
plugins: {
xpack_main,
elasticsearch,
infra,
},
};
const plugin = new Plugin();
await plugin.setup(coreSetup, pluginsSetup, __LEGACY);
},
postInit(server: Server) {
const { infra } = server.plugins as Partial<typeof server.plugins> & { infra?: InfraPlugin };
initInfraSource(server.config(), infra);
},
} as Partial<LegacyPluginOptionsWithKbnServer>);
};

View file

@ -56,6 +56,7 @@ uiRoutes
const globalState = $injector.get('globalState');
const storage = $injector.get('localStorage');
const showLicenseExpiration = $injector.get('showLicenseExpiration');
this.data = $route.current.locals.clusters;
$scope.$watch(

View file

@ -1,87 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { BehaviorSubject } from 'rxjs';
import expect from '@kbn/expect';
import sinon from 'sinon';
import { XPackInfo } from '../../../xpack_main/server/lib/xpack_info';
import { licensingMock } from '../../../../../plugins/licensing/server/mocks';
const createLicense = (type = 'basic') => {
return licensingMock.createLicense({
license: {
uid: 'custom-uid',
type,
mode: 'basic',
status: 'active',
expiryDateInMillis: 1286575200000,
},
features: {
monitoring: {
description: '...',
isAvailable: true,
isEnabled: true,
},
},
});
};
describe('XPackInfo', () => {
let mockServer;
let mockElasticsearchPlugin;
beforeEach(() => {
mockServer = sinon.stub({
plugins: { elasticsearch: mockElasticsearchPlugin },
events: { on() {} },
newPlatform: {
setup: {
plugins: {
licensing: {},
},
},
},
});
});
describe('refreshNow()', () => {
it('check new platform licensing plugin', async () => {
const refresh = sinon.spy();
const license$ = new BehaviorSubject(createLicense());
const xPackInfo = new XPackInfo(mockServer, {
licensing: {
license$,
refresh,
},
});
let changed = false;
license$.subscribe(() => (changed = true));
await xPackInfo.refreshNow();
expect(changed).to.be(true);
sinon.assert.calledOnce(refresh);
});
});
describe('Change type', () => {
it('trigger event when license type changes', async () => {
const license$ = new BehaviorSubject(createLicense());
const refresh = () => void 0;
const xPackInfo = new XPackInfo(mockServer, {
licensing: {
license$,
refresh,
},
});
let changed = false;
license$.subscribe(() => (changed = true));
await license$.next(createLicense('gold'));
expect(xPackInfo.license.getType()).to.be('gold');
expect(changed).to.be(true);
});
});
});

View file

@ -1,145 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import expect from '@kbn/expect';
import sinon from 'sinon';
import { noop } from 'lodash';
import { exposeClient, hasMonitoringCluster } from '../instantiate_client';
function getMockServerFromConnectionUrl(monitoringClusterUrl) {
const server = {
monitoring: {
ui: {
elasticsearch: {
hosts: monitoringClusterUrl ? [monitoringClusterUrl] : [],
username: 'monitoring-user-internal-test',
password: 'monitoring-p@ssw0rd!-internal-test',
ssl: {},
customHeaders: {
'x-custom-headers-test': 'connection-monitoring',
},
},
},
},
};
return {
elasticsearchConfig: server.monitoring.ui.elasticsearch,
elasticsearchPlugin: {
getCluster: sinon
.stub()
.withArgs('admin')
.returns({
config: sinon.stub().returns(server.elasticsearch),
}),
createCluster: sinon.stub(),
},
events: {
on: noop,
},
expose: sinon.stub(),
log: sinon.stub(),
};
}
describe('Instantiate Client', () => {
describe('Logging', () => {
it('logs that the config was sourced from the production options', () => {
const server = getMockServerFromConnectionUrl(null); // pass null for URL to create the client using prod config
exposeClient(server);
expect(server.log.getCall(0).args).to.eql([
['monitoring', 'es-client'],
'config sourced from: production cluster',
]);
});
it('logs that the config was sourced from the monitoring options', () => {
const server = getMockServerFromConnectionUrl('monitoring-cluster.test:9200');
exposeClient(server);
expect(server.log.getCall(0).args).to.eql([
['monitoring', 'es-client'],
'config sourced from: monitoring cluster',
]);
});
});
describe('Custom Headers Configuration', () => {
it('Does not add xpack.monitoring.elasticsearch.customHeaders if connected to production cluster', () => {
const server = getMockServerFromConnectionUrl(null); // pass null for URL to create the client using prod config
exposeClient(server);
const createCluster = server.elasticsearchPlugin.createCluster;
const createClusterCall = createCluster.getCall(0);
sinon.assert.calledOnce(createCluster);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClusterCall.args[1].customHeaders).to.eql(undefined);
});
it('Adds xpack.monitoring.elasticsearch.customHeaders if connected to monitoring cluster', () => {
const server = getMockServerFromConnectionUrl('http://monitoring-cluster.test:9200'); // pass null for URL to create the client using prod config
exposeClient(server);
const createCluster = server.elasticsearchPlugin.createCluster;
const createClusterCall = createCluster.getCall(0);
sinon.assert.calledOnce(createCluster);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClusterCall.args[1].customHeaders).to.eql({
'x-custom-headers-test': 'connection-monitoring',
});
});
});
describe('Use a connection to production cluster', () => {
it('exposes an authenticated client using production host settings', () => {
const server = getMockServerFromConnectionUrl(null); // pass null for URL to create the client using prod config
exposeClient(server);
const createCluster = server.elasticsearchPlugin.createCluster;
const createClusterCall = createCluster.getCall(0);
const createClientOptions = createClusterCall.args[1];
sinon.assert.calledOnce(createCluster);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClientOptions.hosts).to.eql(undefined);
});
});
describe('Use a connection to monitoring cluster', () => {
it('exposes an authenticated client using monitoring host settings', () => {
const server = getMockServerFromConnectionUrl('http://monitoring-cluster.test:9200');
exposeClient(server);
const createCluster = server.elasticsearchPlugin.createCluster;
const createClusterCall = createCluster.getCall(0);
const createClientOptions = createClusterCall.args[1];
sinon.assert.calledOnce(createCluster);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClientOptions.hosts[0]).to.eql('http://monitoring-cluster.test:9200');
expect(createClientOptions.username).to.eql('monitoring-user-internal-test');
expect(createClientOptions.password).to.eql('monitoring-p@ssw0rd!-internal-test');
});
});
describe('hasMonitoringCluster', () => {
it('returns true if monitoring is configured', () => {
const server = getMockServerFromConnectionUrl('http://monitoring-cluster.test:9200'); // pass null for URL to create the client using prod config
expect(hasMonitoringCluster(server.elasticsearchConfig)).to.be(true);
});
it('returns false if monitoring is not configured', () => {
const server = getMockServerFromConnectionUrl(null);
expect(hasMonitoringCluster(server.elasticsearchConfig)).to.be(false);
});
});
});

View file

@ -1,15 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
export const mockReadFileSync = jest.fn();
jest.mock('fs', () => ({ readFileSync: mockReadFileSync }));
export const mockReadPkcs12Keystore = jest.fn();
export const mockReadPkcs12Truststore = jest.fn();
jest.mock('../../../../../../src/core/utils', () => ({
readPkcs12Keystore: mockReadPkcs12Keystore,
readPkcs12Truststore: mockReadPkcs12Truststore,
}));

View file

@ -1,181 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import {
mockReadFileSync,
mockReadPkcs12Keystore,
mockReadPkcs12Truststore,
} from './parse_elasticsearch_config.test.mocks';
import { parseElasticsearchConfig } from './parse_elasticsearch_config';
const parse = (config: any) => {
return parseElasticsearchConfig({
get: () => config,
});
};
describe('reads files', () => {
beforeEach(() => {
mockReadFileSync.mockReset();
mockReadFileSync.mockImplementation((path: string) => `content-of-${path}`);
mockReadPkcs12Keystore.mockReset();
mockReadPkcs12Keystore.mockImplementation((path: string) => ({
key: `content-of-${path}.key`,
cert: `content-of-${path}.cert`,
ca: [`content-of-${path}.ca`],
}));
mockReadPkcs12Truststore.mockReset();
mockReadPkcs12Truststore.mockImplementation((path: string) => [`content-of-${path}`]);
});
it('reads certificate authorities when ssl.keystore.path is specified', () => {
const configValue = parse({ ssl: { keystore: { path: 'some-path' } } });
expect(mockReadPkcs12Keystore).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificateAuthorities).toEqual(['content-of-some-path.ca']);
});
it('reads certificate authorities when ssl.truststore.path is specified', () => {
const configValue = parse({ ssl: { truststore: { path: 'some-path' } } });
expect(mockReadPkcs12Truststore).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificateAuthorities).toEqual(['content-of-some-path']);
});
it('reads certificate authorities when ssl.certificateAuthorities is specified', () => {
let configValue = parse({ ssl: { certificateAuthorities: 'some-path' } });
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificateAuthorities).toEqual(['content-of-some-path']);
mockReadFileSync.mockClear();
configValue = parse({ ssl: { certificateAuthorities: ['some-path'] } });
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificateAuthorities).toEqual(['content-of-some-path']);
mockReadFileSync.mockClear();
configValue = parse({ ssl: { certificateAuthorities: ['some-path', 'another-path'] } });
expect(mockReadFileSync).toHaveBeenCalledTimes(2);
expect(configValue.ssl.certificateAuthorities).toEqual([
'content-of-some-path',
'content-of-another-path',
]);
});
it('reads certificate authorities when ssl.keystore.path, ssl.truststore.path, and ssl.certificateAuthorities are specified', () => {
const configValue = parse({
ssl: {
keystore: { path: 'some-path' },
truststore: { path: 'another-path' },
certificateAuthorities: 'yet-another-path',
},
});
expect(mockReadPkcs12Keystore).toHaveBeenCalledTimes(1);
expect(mockReadPkcs12Truststore).toHaveBeenCalledTimes(1);
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificateAuthorities).toEqual([
'content-of-some-path.ca',
'content-of-another-path',
'content-of-yet-another-path',
]);
});
it('reads a private key and certificate when ssl.keystore.path is specified', () => {
const configValue = parse({ ssl: { keystore: { path: 'some-path' } } });
expect(mockReadPkcs12Keystore).toHaveBeenCalledTimes(1);
expect(configValue.ssl.key).toEqual('content-of-some-path.key');
expect(configValue.ssl.certificate).toEqual('content-of-some-path.cert');
});
it('reads a private key when ssl.key is specified', () => {
const configValue = parse({ ssl: { key: 'some-path' } });
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
expect(configValue.ssl.key).toEqual('content-of-some-path');
});
it('reads a certificate when ssl.certificate is specified', () => {
const configValue = parse({ ssl: { certificate: 'some-path' } });
expect(mockReadFileSync).toHaveBeenCalledTimes(1);
expect(configValue.ssl.certificate).toEqual('content-of-some-path');
});
});
describe('throws when config is invalid', () => {
beforeAll(() => {
const realFs = jest.requireActual('fs');
mockReadFileSync.mockImplementation((path: string) => realFs.readFileSync(path));
const utils = jest.requireActual('../../../../../../src/core/utils');
mockReadPkcs12Keystore.mockImplementation((path: string, password?: string) =>
utils.readPkcs12Keystore(path, password)
);
mockReadPkcs12Truststore.mockImplementation((path: string, password?: string) =>
utils.readPkcs12Truststore(path, password)
);
});
it('throws if key is invalid', () => {
const value = { ssl: { key: '/invalid/key' } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"ENOENT: no such file or directory, open '/invalid/key'"`
);
});
it('throws if certificate is invalid', () => {
const value = { ssl: { certificate: '/invalid/cert' } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"ENOENT: no such file or directory, open '/invalid/cert'"`
);
});
it('throws if certificateAuthorities is invalid', () => {
const value = { ssl: { certificateAuthorities: '/invalid/ca' } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"ENOENT: no such file or directory, open '/invalid/ca'"`
);
});
it('throws if keystore path is invalid', () => {
const value = { ssl: { keystore: { path: '/invalid/keystore' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"ENOENT: no such file or directory, open '/invalid/keystore'"`
);
});
it('throws if keystore does not contain a key', () => {
mockReadPkcs12Keystore.mockReturnValueOnce({});
const value = { ssl: { keystore: { path: 'some-path' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"Did not find key in Elasticsearch keystore."`
);
});
it('throws if keystore does not contain a certificate', () => {
mockReadPkcs12Keystore.mockReturnValueOnce({ key: 'foo' });
const value = { ssl: { keystore: { path: 'some-path' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"Did not find certificate in Elasticsearch keystore."`
);
});
it('throws if truststore path is invalid', () => {
const value = { ssl: { keystore: { path: '/invalid/truststore' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"ENOENT: no such file or directory, open '/invalid/truststore'"`
);
});
it('throws if key and keystore.path are both specified', () => {
const value = { ssl: { key: 'foo', keystore: { path: 'bar' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"[config validation of [monitoring.ui.elasticsearch].ssl]: cannot use [key] when [keystore.path] is specified"`
);
});
it('throws if certificate and keystore.path are both specified', () => {
const value = { ssl: { certificate: 'foo', keystore: { path: 'bar' } } };
expect(() => parse(value)).toThrowErrorMatchingInlineSnapshot(
`"[config validation of [monitoring.ui.elasticsearch].ssl]: cannot use [certificate] when [keystore.path] is specified"`
);
});
});

View file

@ -1,114 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { readFileSync } from 'fs';
import { readPkcs12Truststore, readPkcs12Keystore } from '../../../../../../src/core/utils';
const KEY = 'monitoring.ui.elasticsearch';
/*
* Parse a config object's Elasticsearch configuration, reading any
* certificates/keys from the filesystem
*
* TODO: this code can be removed when this plugin is migrated to the Kibana Platform,
* at that point the ElasticsearchClient and ElasticsearchConfig should be used instead
*/
export const parseElasticsearchConfig = (config: any, configKey: string = KEY) => {
const es = config.get(configKey);
if (!es) {
return {};
}
const errorPrefix = `[config validation of [${configKey}].ssl]`;
if (es.ssl?.key && es.ssl?.keystore?.path) {
throw new Error(`${errorPrefix}: cannot use [key] when [keystore.path] is specified`);
}
if (es.ssl?.certificate && es.ssl?.keystore?.path) {
throw new Error(`${errorPrefix}: cannot use [certificate] when [keystore.path] is specified`);
}
const { alwaysPresentCertificate, verificationMode } = es.ssl;
const { key, keyPassphrase, certificate, certificateAuthorities } = readKeyAndCerts(es);
return {
...es,
ssl: {
alwaysPresentCertificate,
key,
keyPassphrase,
certificate,
certificateAuthorities,
verificationMode,
},
};
};
const readKeyAndCerts = (rawConfig: any) => {
let key: string | undefined;
let keyPassphrase: string | undefined;
let certificate: string | undefined;
let certificateAuthorities: string[] | undefined;
const addCAs = (ca: string[] | undefined) => {
if (ca && ca.length) {
certificateAuthorities = [...(certificateAuthorities || []), ...ca];
}
};
if (rawConfig.ssl.keystore?.path) {
const keystore = readPkcs12Keystore(
rawConfig.ssl.keystore.path,
rawConfig.ssl.keystore.password
);
if (!keystore.key) {
throw new Error(`Did not find key in Elasticsearch keystore.`);
} else if (!keystore.cert) {
throw new Error(`Did not find certificate in Elasticsearch keystore.`);
}
key = keystore.key;
certificate = keystore.cert;
addCAs(keystore.ca);
} else {
if (rawConfig.ssl.key) {
key = readFile(rawConfig.ssl.key);
keyPassphrase = rawConfig.ssl.keyPassphrase;
}
if (rawConfig.ssl.certificate) {
certificate = readFile(rawConfig.ssl.certificate);
}
}
if (rawConfig.ssl.truststore?.path) {
const ca = readPkcs12Truststore(
rawConfig.ssl.truststore.path,
rawConfig.ssl.truststore.password
);
addCAs(ca);
}
const ca = rawConfig.ssl.certificateAuthorities;
if (ca) {
const parsed: string[] = [];
const paths = Array.isArray(ca) ? ca : [ca];
if (paths.length > 0) {
for (const path of paths) {
parsed.push(readFile(path));
}
addCAs(parsed);
}
}
return {
key,
keyPassphrase,
certificate,
certificateAuthorities,
};
};
const readFile = (file: string) => {
return readFileSync(file, 'utf8');
};

View file

@ -1,44 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { checkLicenseGenerator } from './cluster_alerts/check_license';
import { hasMonitoringCluster } from './es_client/instantiate_client';
import { LOGGING_TAG } from '../common/constants';
import { XPackInfo } from '../../xpack_main/server/lib/xpack_info';
/*
* Expose xpackInfo for the Monitoring cluster as server.plugins.monitoring.info
*/
export const initMonitoringXpackInfo = async ({
config,
server,
client,
xpackMainPlugin,
licensing,
expose,
log,
}) => {
const xpackInfo = hasMonitoringCluster(config)
? new XPackInfo(server, {
licensing: licensing.createLicensePoller(
client,
config.get('monitoring.xpack_api_polling_frequency_millis')
),
})
: xpackMainPlugin.info;
xpackInfo.feature('monitoring').registerLicenseCheckResultsGenerator(checkLicenseGenerator);
expose('info', xpackInfo);
// check if X-Pack is installed on Monitoring Cluster
const xpackInfoTest = await xpackInfo.refreshNow();
if (!xpackInfoTest.isAvailable()) {
log(
[LOGGING_TAG, 'warning'],
`X-Pack Monitoring Cluster Alerts will not be available: ${xpackInfoTest.unavailableReason()}`
);
}
};

View file

@ -1,95 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import {
LOGGING_TAG,
KIBANA_MONITORING_LOGGING_TAG,
KIBANA_STATS_TYPE_MONITORING,
} from '../../../common/constants';
import { opsBuffer } from './ops_buffer';
import Oppsy from 'oppsy';
import { cloneDeep } from 'lodash';
let bufferHadEvents = false;
class OpsMonitor {
constructor(hapiServer, buffer, interval) {
this._buffer = buffer;
this._interval = interval;
this._oppsy = new Oppsy(hapiServer);
this._server = hapiServer;
}
start = () => {
this._oppsy.on('ops', event => {
// Oppsy has a bad race condition that will modify this data before
// we ship it off to the buffer. Let's create our copy first.
event = cloneDeep(event);
// Oppsy used to provide this, but doesn't anymore. Grab it ourselves.
this._server.listener.getConnections((_, count) => {
event.concurrent_connections = count;
this._buffer.push(event);
});
});
this._oppsy.on('error', console.log);
this._oppsy.start(this._interval);
};
stop = () => {
this._oppsy.stop();
this._oppsy.removeAllListeners();
};
}
/*
* Initialize a collector for Kibana Ops Stats
*/
export function getOpsStatsCollector(
usageCollection,
{ elasticsearchPlugin, kbnServerConfig, log, config, getOSInfo, hapiServer }
) {
const buffer = opsBuffer({ log, config, getOSInfo });
const interval = kbnServerConfig.get('ops.interval');
const opsMonitor = new OpsMonitor(hapiServer, buffer, interval);
/* Handle stopping / restarting the event listener if Elasticsearch stops and restarts
* NOTE it is possible for the plugin status to go from red to red and
* trigger handlers twice
*/
elasticsearchPlugin.status.on('red', opsMonitor.stop);
elasticsearchPlugin.status.on('green', opsMonitor.start);
// `process` is a NodeJS global, and is always available without using require/import
process.on('SIGHUP', () => {
log(
['info', LOGGING_TAG, KIBANA_MONITORING_LOGGING_TAG],
'Re-initializing Kibana Monitoring due to SIGHUP'
);
setTimeout(() => {
opsMonitor.stop();
opsMonitor.start();
log(
['info', LOGGING_TAG, KIBANA_MONITORING_LOGGING_TAG],
'Re-initializing Kibana Monitoring due to SIGHUP'
);
}, 5 * 1000); // wait 5 seconds to avoid race condition with reloading logging configuration
});
return usageCollection.makeStatsCollector({
type: KIBANA_STATS_TYPE_MONITORING,
init: opsMonitor.start,
isReady: () => {
if (!bufferHadEvents) {
bufferHadEvents = buffer.hasEvents();
}
return bufferHadEvents;
},
fetch: async () => {
return await buffer.flush();
},
});
}

View file

@ -1,124 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { EventRoller } from '../event_roller';
import expect from '@kbn/expect';
const events = [
{
requests: {
'5601': {
total: 103,
disconnects: 0,
statusCodes: { '200': 15, '304': 88 },
},
},
responseTimes: { '5601': { avg: 5.213592233009709, max: 36 } },
osload: [1.90380859375, 1.84033203125, 1.82666015625],
osmem: { total: 17179869184, free: 613638144 },
osup: 4615,
psup: 62.388,
psmem: {
rss: 518164480,
heapTotal: 474275840,
heapUsed: 318428400,
external: 5172252,
},
concurrent_connections: 6,
psdelay: 0.4091129992157221,
},
{
requests: {
'5601': {
total: 35,
disconnects: 0,
statusCodes: { '200': 5, '304': 30 },
},
},
responseTimes: { '5601': { avg: 4.6, max: 29 } },
sockets: {
http: { total: 1, '169.254.169.254:80:': 1 },
https: { total: 0 },
},
osload: [1.9111328125, 1.8427734375, 1.82763671875],
osmem: { total: 17179869184, free: 641744896 },
osup: 4620,
psup: 67.39,
psmem: {
rss: 518193152,
heapTotal: 474275840,
heapUsed: 315669840,
external: 5083177,
},
concurrent_connections: 6,
psdelay: 0.6715770000591874,
},
];
describe('Event Roller', () => {
it('constructs an event roller object', () => {
const eventRoller = new EventRoller();
expect(eventRoller.rollup).to.be(null);
expect(eventRoller.getFromRollup()).to.be(undefined);
expect(eventRoller.getFromRollup('concurrent_connections')).to.be(undefined);
});
it('adds events and rolls them up', () => {
const eventRoller = new EventRoller();
const [event1, event2] = events;
eventRoller.addEvent(event1);
eventRoller.addEvent(event2);
const flush = eventRoller.flush();
// delete unpredictable fields
delete flush.timestamp;
delete flush.process.memory.heap.size_limit;
expect(flush).to.eql({
concurrent_connections: 12, // 6 + 6
os: {
load: { '1m': 1.9111328125, '5m': 1.8427734375, '15m': 1.82763671875 }, // just the latest
memory: {
total_in_bytes: 17179869184,
free_in_bytes: 641744896,
used_in_bytes: 16538124288, // just the latest
},
uptime_in_millis: 4620000, // converted from latest osup
},
process: {
event_loop_delay: 1.0806899992749095, // 0.4091129992157221 + 0.6715770000591874
memory: {
heap: {
total_in_bytes: 474275840,
used_in_bytes: 315669840,
},
resident_set_size_in_bytes: 518193152, // just the latest
},
uptime_in_millis: 67390, // latest from psup
},
requests: {
disconnects: 0,
total: 138, // 103 + 35
},
response_times: {
average: 5.213592233009709, // max of 5.213592233009709, 4.6
max: 36, // max of 36, 29
},
});
});
it('forgets the rollup after flush', () => {
const eventRoller = new EventRoller();
const [event1, event2] = events;
eventRoller.addEvent(event1);
eventRoller.addEvent(event2);
const flush1 = eventRoller.flush(); // eslint-disable-line no-unused-vars
const flush2 = eventRoller.flush();
expect(flush2).to.be(null);
});
});

View file

@ -1,43 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import _ from 'lodash';
import { mapRequests } from '../map_requests';
import expect from '@kbn/expect';
describe('Map requests', () => {
it('flatten ports', () => {
const requests = { '5603': { total: 1, disconnects: 0, statusCodes: {} } };
const expected = { total: 1, disconnects: 0 };
expect(_.isEqual(mapRequests(requests), expected)).to.be(true);
});
it('combine values', () => {
const requests = {
'5603': { total: 1, disconnects: 0, statusCodes: {} },
'5604': {
total: 1,
disconnects: 44,
statusCodes: {
'200': 2,
'201': 4,
},
},
'5605': {
total: 1,
disconnects: 0,
statusCodes: {
'200': 20,
},
},
};
const expected = {
total: 3,
disconnects: 44,
};
expect(_.isEqual(mapRequests(requests), expected)).to.be(true);
});
});

View file

@ -1,57 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { isEqual } from 'lodash';
import { mapResponseTimes } from '../map_response_times';
import expect from '@kbn/expect';
describe('Map response times', () => {
it('flatten ports', () => {
const responseTimes = { '5603': { avg: 30, max: 250 } };
const expected = { average: 30, max: 250 };
expect(isEqual(mapResponseTimes(responseTimes), expected)).to.be(true);
});
it('combine empty', () => {
const responseTimes = {};
const expected = { average: 0, max: 0 };
expect(isEqual(mapResponseTimes(responseTimes), expected)).to.be(true);
});
it('combine results', () => {
const responseTimes = {
'5600': {
avg: 1,
max: 10,
},
'5602': {
avg: 3,
max: 200,
},
};
const expected = { average: 3, max: 200 };
expect(isEqual(mapResponseTimes(responseTimes), expected)).to.be(true);
});
it('combine results with different maximums for average and max value', () => {
const responseTimes = {
'5600': {
avg: 5,
max: 10,
},
'5602': {
avg: 3,
max: 200,
},
'5604': {
// no average
max: 105,
},
};
const expected = { average: 5, max: 200 };
expect(isEqual(mapResponseTimes(responseTimes), expected)).to.be(true);
});
});

View file

@ -1,87 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { get, partialRight, assign, max, sum } from 'lodash';
import moment from 'moment';
import v8 from 'v8';
import { mapRequests } from './map_requests';
import { mapResponseTimes } from './map_response_times';
// rollup functions are for objects with unpredictable keys (e.g., {'200': 1, '201': 2} + {'200':2} = {'200': 3, '201': 2})
const maxRollup = partialRight(assign, (latest, prev) => max([latest, prev]));
export class EventRoller {
constructor() {
this.rollup = null;
}
getFromRollup(path) {
return get(this.rollup, path);
}
hasEvents() {
return this.rollup !== null;
}
rollupEvent(event) {
const heapStats = v8.getHeapStatistics();
const requests = mapRequests(event.requests);
return {
concurrent_connections: sum([
event.concurrent_connections,
this.getFromRollup('concurrent_connections'),
]),
// memory/os stats use the latest event's details
os: {
load: {
'1m': get(event, 'osload[0]'),
'5m': get(event, 'osload[1]'),
'15m': get(event, 'osload[2]'),
},
memory: {
total_in_bytes: get(event, 'osmem.total'),
free_in_bytes: get(event, 'osmem.free'),
used_in_bytes: get(event, 'osmem.total') - get(event, 'osmem.free'),
},
uptime_in_millis: event.osup * 1000, // seconds to milliseconds
},
process: {
event_loop_delay: sum([event.psdelay, this.getFromRollup('process.event_loop_delay')]),
memory: {
heap: {
total_in_bytes: get(event, 'psmem.heapTotal'),
used_in_bytes: get(event, 'psmem.heapUsed'),
size_limit: heapStats.heap_size_limit,
},
resident_set_size_in_bytes: get(event, 'psmem.rss'),
},
uptime_in_millis: event.psup * 1000, // seconds to milliseconds
},
requests: {
disconnects: sum([requests.disconnects, this.getFromRollup('requests.disconnects')]),
total: sum([requests.total, this.getFromRollup('requests.total')]),
},
response_times: maxRollup(
mapResponseTimes(event.responseTimes),
this.getFromRollup('response_times')
),
timestamp: moment.utc().toISOString(),
};
}
addEvent(event) {
// update internal state with new event data
this.rollup = this.rollupEvent(event);
}
flush() {
// reset the internal state and return it
const rollup = this.rollup;
this.rollup = null;
return rollup;
}
}

View file

@ -1,19 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import _ from 'lodash';
export function mapRequests(requests) {
return _.reduce(
_.values(requests),
(result, value) => {
result.total += value.total;
result.disconnects += value.disconnects;
return result;
},
{ total: 0, disconnects: 0 }
);
}

View file

@ -1,25 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import _ from 'lodash';
export function mapResponseTimes(times) {
const responseTimes = _.reduce(
_.values(times),
(result, value) => {
if (value.avg) {
result.avg = Math.max(result.avg, value.avg);
}
result.max = Math.max(result.max, value.max);
return result;
},
{ avg: 0, max: 0 }
);
return {
average: responseTimes.avg,
max: responseTimes.max,
};
}

View file

@ -1,55 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { EventRoller } from './event_roller';
import { CloudDetector } from '../../../cloud';
/**
* Manage the buffer of Kibana Ops events
* @param {Object} server HapiJS server instance
* @return {Object} the revealed `push` and `flush` modules
*/
export function opsBuffer({ config, getOSInfo }) {
// determine the cloud service in the background
const cloudDetector = new CloudDetector();
if (config.get('monitoring.tests.cloud_detector.enabled')) {
cloudDetector.detectCloudService();
}
const eventRoller = new EventRoller();
return {
push(event) {
eventRoller.addEvent(event);
},
hasEvents() {
return eventRoller.hasEvents();
},
async flush() {
let cloud; // a property that will be left out of the result if the details are undefined
const cloudDetails = cloudDetector.getCloudDetails();
if (cloudDetails != null) {
cloud = { cloud: cloudDetails };
}
const eventRollup = eventRoller.flush();
if (eventRollup && eventRollup.os) {
eventRollup.os = {
...eventRollup.os,
...(await getOSInfo()),
};
}
return {
...cloud,
...eventRollup,
};
},
};
}

View file

@ -1,33 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { get } from 'lodash';
const snapshotRegex = /-snapshot/i;
/**
* This provides a common structure to apply to all Kibana monitoring documents so that they can be commonly
* searched, field-collapsed, and aggregated against.
*
* @param {Object} kbnServer manager of Kibana services - see `src/legacy/server/kbn_server` in Kibana core
* @param {Object} config Server config
* @param {String} host Kibana host
* @return {Object} The object containing a "kibana" field and source instance details.
*/
export function getKibanaInfoForStats({ kbnServerStatus, kbnServerVersion, config }) {
const status = kbnServerStatus.toJSON();
return {
uuid: config.get('server.uuid'),
name: config.get('server.name'),
index: config.get('kibana.index'),
host: config.get('server.host'),
transport_address: `${config.get('server.host')}:${config.get('server.port')}`,
version: kbnServerVersion.replace(snapshotRegex, ''),
snapshot: snapshotRegex.test(kbnServerVersion),
status: get(status, 'overall.state'),
};
}

View file

@ -1,10 +0,0 @@
/*
* Not using named imports, because the resources are JSON files
*/
import shardStatsFixture from './shard_stats';
import clusterFixture from './cluster';
export {
shardStatsFixture,
clusterFixture
};

View file

@ -1,189 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { i18n } from '@kbn/i18n';
import {
LOGGING_TAG,
KIBANA_MONITORING_LOGGING_TAG,
KIBANA_ALERTING_ENABLED,
} from '../common/constants';
import { requireUIRoutes } from './routes';
import { instantiateClient } from './es_client/instantiate_client';
import { initMonitoringXpackInfo } from './init_monitoring_xpack_info';
import { initBulkUploader, registerCollectors } from './kibana_monitoring';
import { registerMonitoringCollection } from './telemetry_collection';
import { getLicenseExpiration } from './alerts/license_expiration';
import { parseElasticsearchConfig } from './es_client/parse_elasticsearch_config';
export class Plugin {
async setup(_coreSetup, pluginsSetup, __LEGACY) {
const {
plugins,
_kbnServer: kbnServer,
log,
logger,
getOSInfo,
_hapi: hapiServer,
events,
expose,
config: monitoringConfig,
injectUiAppVars,
} = __LEGACY;
const config = monitoringConfig();
const { usageCollection, licensing, alerting } = pluginsSetup;
registerMonitoringCollection();
/*
* Register collector objects for stats to show up in the APIs
*/
registerCollectors(usageCollection, {
elasticsearchPlugin: plugins.elasticsearch,
kbnServerConfig: kbnServer.config,
log,
config,
getOSInfo,
hapiServer,
});
/*
* Instantiate and start the internal background task that calls collector
* fetch methods and uploads to the ES monitoring bulk endpoint
*/
const xpackMainPlugin = plugins.xpack_main;
/*
* Parse the Elasticsearch config and read any certificates/keys if necessary
*/
const elasticsearchConfig = parseElasticsearchConfig(config);
// Create the dedicated client
const client = await instantiateClient({
log,
events,
elasticsearchConfig,
elasticsearchPlugin: plugins.elasticsearch,
});
xpackMainPlugin.status.once('green', async () => {
// first time xpack_main turns green
/*
* End-user-facing services
*/
const uiEnabled = config.get('monitoring.ui.enabled');
if (uiEnabled) {
await initMonitoringXpackInfo({
config,
server: hapiServer,
client,
log,
xpackMainPlugin: plugins.xpack_main,
expose,
}); // Route handlers depend on this for xpackInfo
await requireUIRoutes(__LEGACY);
}
});
xpackMainPlugin.registerFeature({
id: 'monitoring',
name: i18n.translate('xpack.monitoring.featureRegistry.monitoringFeatureName', {
defaultMessage: 'Stack Monitoring',
}),
icon: 'monitoringApp',
navLinkId: 'monitoring',
app: ['monitoring', 'kibana'],
catalogue: ['monitoring'],
privileges: {},
reserved: {
privilege: {
savedObject: {
all: [],
read: [],
},
ui: [],
},
description: i18n.translate('xpack.monitoring.feature.reserved.description', {
defaultMessage: 'To grant users access, you should also assign the monitoring_user role.',
}),
},
});
const bulkUploader = initBulkUploader({
elasticsearchPlugin: plugins.elasticsearch,
config,
log,
kbnServerStatus: kbnServer.status,
kbnServerVersion: kbnServer.version,
});
const kibanaCollectionEnabled = config.get('monitoring.kibana.collection.enabled');
if (kibanaCollectionEnabled) {
/*
* Bulk uploading of Kibana stats
*/
licensing.license$.subscribe(license => {
// use updated xpack license info to start/stop bulk upload
const mainMonitoring = license.getFeature('monitoring');
const monitoringBulkEnabled =
mainMonitoring && mainMonitoring.isAvailable && mainMonitoring.isEnabled;
if (monitoringBulkEnabled) {
bulkUploader.start(usageCollection);
} else {
bulkUploader.handleNotEnabled();
}
});
} else if (!kibanaCollectionEnabled) {
log(
['info', LOGGING_TAG, KIBANA_MONITORING_LOGGING_TAG],
'Internal collection for Kibana monitoring is disabled per configuration.'
);
}
injectUiAppVars('monitoring', () => {
return {
maxBucketSize: config.get('monitoring.ui.max_bucket_size'),
minIntervalSeconds: config.get('monitoring.ui.min_interval_seconds'),
kbnIndex: config.get('kibana.index'),
monitoringUiEnabled: config.get('monitoring.ui.enabled'),
showLicenseExpiration: config.get('monitoring.ui.show_license_expiration'),
showCgroupMetricsElasticsearch: config.get('monitoring.ui.container.elasticsearch.enabled'),
showCgroupMetricsLogstash: config.get('monitoring.ui.container.logstash.enabled'), // Note, not currently used, but see https://github.com/elastic/x-pack-kibana/issues/1559 part 2
};
});
if (KIBANA_ALERTING_ENABLED && alerting) {
// this is not ready right away but we need to register alerts right away
async function getMonitoringCluster() {
const configs = config.get('xpack.monitoring.elasticsearch');
if (configs.hosts) {
const monitoringCluster = plugins.elasticsearch.getCluster('monitoring');
const { username, password } = configs;
const fakeRequest = {
headers: {
authorization: `Basic ${Buffer.from(`${username}:${password}`).toString('base64')}`,
},
};
return {
callCluster: (...args) => monitoringCluster.callWithRequest(fakeRequest, ...args),
};
}
return null;
}
function getLogger(contexts) {
return logger.get('plugins', LOGGING_TAG, ...contexts);
}
alerting.registerType(
getLicenseExpiration(
hapiServer,
getMonitoringCluster,
getLogger,
config.get('xpack.monitoring.ccs.enabled')
)
);
}
}
}

View file

@ -1,21 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { telemetryCollectionManager } from '../../../../../../src/legacy/core_plugins/telemetry/server';
import { getAllStats } from './get_all_stats';
import { getClusterUuids } from './get_cluster_uuids';
import { getLicenses } from './get_licenses';
export function registerMonitoringCollection() {
telemetryCollectionManager.setCollection({
esCluster: 'monitoring',
title: 'monitoring',
priority: 2,
statsGetter: getAllStats,
clusterDetailsGetter: getClusterUuids,
licenseGetter: getLicenses,
});
}

View file

@ -5,8 +5,8 @@
*/
import { boomify } from 'boom';
import { get } from 'lodash';
import { KIBANA_SETTINGS_TYPE } from '../../../../../monitoring/common/constants';
import { getKibanaInfoForStats } from '../../../../../monitoring/server/kibana_monitoring/lib';
const getClusterUuid = async callCluster => {
const { cluster_uuid: uuid } = await callCluster('info', { filterPath: 'cluster_uuid' });
@ -32,11 +32,21 @@ export function settingsRoute(server, kbnServer) {
}
const uuid = await getClusterUuid(callCluster);
const kibana = getKibanaInfoForStats({
kbnServerStatus: kbnServer.status,
kbnServerVersion: kbnServer.version,
config: server.config(),
});
const snapshotRegex = /-snapshot/i;
const config = server.config();
const status = kbnServer.status.toJSON();
const kibana = {
uuid: config.get('server.uuid'),
name: config.get('server.name'),
index: config.get('kibana.index'),
host: config.get('server.host'),
port: config.get('server.port'),
locale: config.get('i18n.locale'),
transport_address: `${config.get('server.host')}:${config.get('server.port')}`,
version: kbnServer.version.replace(snapshotRegex, ''),
snapshot: snapshotRegex.test(kbnServer.version),
status: get(status, 'overall.state'),
};
return {
cluster_uuid: uuid,

View file

@ -5,10 +5,10 @@
*/
import { PluginInitializerContext } from 'src/core/server';
import { config, InfraConfig, InfraServerPlugin } from './plugin';
import { config, InfraConfig, InfraServerPlugin, InfraPluginSetup } from './plugin';
import { savedObjectMappings } from './saved_objects';
export { config, InfraConfig, savedObjectMappings };
export { config, InfraConfig, savedObjectMappings, InfraPluginSetup };
export function plugin(context: PluginInitializerContext) {
return new InfraServerPlugin(context);

View file

@ -0,0 +1,128 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import expect from '@kbn/expect';
import moment from 'moment';
import { formatTimestampToDuration } from '../format_timestamp_to_duration';
import { CALCULATE_DURATION_SINCE, CALCULATE_DURATION_UNTIL } from '../constants';
const testTime = moment('2010-05-01'); // pick a date where adding/subtracting 2 months formats roundly to '2 months 0 days'
const getTestTime = () => moment(testTime); // clones the obj so it's not mutated with .adds and .subtracts
/**
* Test the moment-duration-format template
*/
describe('formatTimestampToDuration', () => {
describe('format timestamp to duration - time since', () => {
it('should format timestamp to human-readable duration', () => {
// time inputs are a few "moments" extra from the time advertised by name
const fiftyNineSeconds = getTestTime().subtract(59, 'seconds');
expect(
formatTimestampToDuration(fiftyNineSeconds, CALCULATE_DURATION_SINCE, getTestTime())
).to.be('59 seconds');
const fiveMins = getTestTime()
.subtract(5, 'minutes')
.subtract(30, 'seconds');
expect(formatTimestampToDuration(fiveMins, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'6 mins'
);
const sixHours = getTestTime()
.subtract(6, 'hours')
.subtract(30, 'minutes');
expect(formatTimestampToDuration(sixHours, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'6 hrs 30 mins'
);
const sevenDays = getTestTime()
.subtract(7, 'days')
.subtract(6, 'hours')
.subtract(18, 'minutes');
expect(formatTimestampToDuration(sevenDays, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'7 days 6 hrs 18 mins'
);
const eightWeeks = getTestTime()
.subtract(8, 'weeks')
.subtract(7, 'days')
.subtract(6, 'hours')
.subtract(18, 'minutes');
expect(formatTimestampToDuration(eightWeeks, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'2 months 2 days'
);
const oneHour = getTestTime().subtract(1, 'hour'); // should trim 0 min
expect(formatTimestampToDuration(oneHour, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'1 hr'
);
const oneDay = getTestTime().subtract(1, 'day'); // should trim 0 hrs
expect(formatTimestampToDuration(oneDay, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'1 day'
);
const twoMonths = getTestTime().subtract(2, 'month'); // should trim 0 days
expect(formatTimestampToDuration(twoMonths, CALCULATE_DURATION_SINCE, getTestTime())).to.be(
'2 months'
);
});
});
describe('format timestamp to duration - time until', () => {
it('should format timestamp to human-readable duration', () => {
// time inputs are a few "moments" extra from the time advertised by name
const fiftyNineSeconds = getTestTime().add(59, 'seconds');
expect(
formatTimestampToDuration(fiftyNineSeconds, CALCULATE_DURATION_UNTIL, getTestTime())
).to.be('59 seconds');
const fiveMins = getTestTime().add(10, 'minutes');
expect(formatTimestampToDuration(fiveMins, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'10 mins'
);
const sixHours = getTestTime()
.add(6, 'hours')
.add(30, 'minutes');
expect(formatTimestampToDuration(sixHours, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'6 hrs 30 mins'
);
const sevenDays = getTestTime()
.add(7, 'days')
.add(6, 'hours')
.add(18, 'minutes');
expect(formatTimestampToDuration(sevenDays, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'7 days 6 hrs 18 mins'
);
const eightWeeks = getTestTime()
.add(8, 'weeks')
.add(7, 'days')
.add(6, 'hours')
.add(18, 'minutes');
expect(formatTimestampToDuration(eightWeeks, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'2 months 2 days'
);
const oneHour = getTestTime().add(1, 'hour'); // should trim 0 min
expect(formatTimestampToDuration(oneHour, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'1 hr'
);
const oneDay = getTestTime().add(1, 'day'); // should trim 0 hrs
expect(formatTimestampToDuration(oneDay, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'1 day'
);
const twoMonths = getTestTime().add(2, 'month'); // should trim 0 days
expect(formatTimestampToDuration(twoMonths, CALCULATE_DURATION_UNTIL, getTestTime())).to.be(
'2 months'
);
});
});
});

View file

@ -0,0 +1,70 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
export enum Status {
Canceled,
Failed,
Resolved,
Awaiting,
Idle,
}
/**
* Simple [PromiseWithCancel] factory
*/
export class PromiseWithCancel {
private _promise: Promise<any>;
private _status: Status = Status.Idle;
/**
* @param {Promise} promise Promise you want to cancel / track
*/
constructor(promise: Promise<any>) {
this._promise = promise;
}
/**
* Cancel the promise in any state
*/
public cancel = (): void => {
this._status = Status.Canceled;
};
/**
* @returns status based on [Status]
*/
public status = (): Status => {
return this._status;
};
/**
* @returns promise passed in [constructor]
* This sets the state to Status.Awaiting
*/
public promise = (): Promise<any> => {
if (this._status === Status.Canceled) {
throw Error('Getting a canceled promise is not allowed');
} else if (this._status !== Status.Idle) {
return this._promise;
}
return new Promise((resolve, reject) => {
this._status = Status.Awaiting;
return this._promise
.then(response => {
if (this._status !== Status.Canceled) {
this._status = Status.Resolved;
return resolve(response);
}
})
.catch(error => {
if (this._status !== Status.Canceled) {
this._status = Status.Failed;
return reject(error);
}
});
});
};
}

View file

@ -0,0 +1,264 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
/**
* Helper string to add as a tag in every logging call
*/
export const LOGGING_TAG = 'monitoring';
/**
* Helper string to add as a tag in every logging call related to Kibana monitoring
*/
export const KIBANA_MONITORING_LOGGING_TAG = 'kibana-monitoring';
/**
* The Monitoring API version is the expected API format that we export and expect to import.
* @type {string}
*/
export const MONITORING_SYSTEM_API_VERSION = '7';
/**
* The type name used within the Monitoring index to publish Kibana ops stats.
* @type {string}
*/
export const KIBANA_STATS_TYPE_MONITORING = 'kibana_stats'; // similar to KIBANA_STATS_TYPE but rolled up into 10s stats from 5s intervals through ops_buffer
/**
* The type name used within the Monitoring index to publish Kibana stats.
* @type {string}
*/
export const KIBANA_SETTINGS_TYPE = 'kibana_settings';
/**
* The type name used within the Monitoring index to publish Kibana usage stats.
* NOTE: this string shows as-is in the stats API as a field name for the kibana usage stats
* @type {string}
*/
export const KIBANA_USAGE_TYPE = 'kibana';
/*
* Key for the localStorage service
*/
export const STORAGE_KEY = 'xpack.monitoring.data';
/**
* Units for derivative metric values
*/
export const NORMALIZED_DERIVATIVE_UNIT = '1s';
/*
* Values for column sorting in table options
* @type {number} 1 or -1
*/
export const EUI_SORT_ASCENDING = 'asc';
export const EUI_SORT_DESCENDING = 'desc';
export const SORT_ASCENDING = 1;
export const SORT_DESCENDING = -1;
/*
* Chart colors
* @type {string}
*/
export const CHART_LINE_COLOR = '#d2d2d2';
export const CHART_TEXT_COLOR = '#9c9c9c';
/*
* Number of cluster alerts to show on overview page
* @type {number}
*/
export const CLUSTER_ALERTS_SEARCH_SIZE = 3;
/*
* Format for moment-duration-format timestamp-to-duration template if the time diffs are gte 1 month
* @type {string}
*/
export const FORMAT_DURATION_TEMPLATE_LONG = 'M [months] d [days]';
/*
* Format for moment-duration-format timestamp-to-duration template if the time diffs are lt 1 month but gt 1 minute
* @type {string}
*/
export const FORMAT_DURATION_TEMPLATE_SHORT = ' d [days] h [hrs] m [min]';
/*
* Format for moment-duration-format timestamp-to-duration template if the time diffs are lt 1 minute
* @type {string}
*/
export const FORMAT_DURATION_TEMPLATE_TINY = ' s [seconds]';
/*
* Simple unique values for Timestamp to duration flags. These are used for
* determining if calculation should be formatted as "time until" (now to
* timestamp) or "time since" (timestamp to now)
*/
export const CALCULATE_DURATION_SINCE = 'since';
export const CALCULATE_DURATION_UNTIL = 'until';
/**
* In order to show ML Jobs tab in the Elasticsearch section / tab navigation, license must be supported
*/
export const ML_SUPPORTED_LICENSES = ['trial', 'platinum', 'enterprise'];
/**
* Metadata service URLs for the different cloud services that have constant URLs (e.g., unlike GCP, which is a constant prefix).
*
* @type {Object}
*/
export const CLOUD_METADATA_SERVICES = {
// We explicitly call out the version, 2016-09-02, rather than 'latest' to avoid unexpected changes
AWS_URL: 'http://169.254.169.254/2016-09-02/dynamic/instance-identity/document',
// 2017-04-02 is the first GA release of this API
AZURE_URL: 'http://169.254.169.254/metadata/instance?api-version=2017-04-02',
// GCP documentation shows both 'metadata.google.internal' (mostly) and '169.254.169.254' (sometimes)
// To bypass potential DNS changes, the IP was used because it's shared with other cloud services
GCP_URL_PREFIX: 'http://169.254.169.254/computeMetadata/v1/instance',
};
/**
* Constants used by Logstash monitoring code
*/
export const LOGSTASH = {
MAJOR_VER_REQD_FOR_PIPELINES: 6,
/*
* Names ES keys on for different Logstash pipeline queues.
* @type {string}
*/
QUEUE_TYPES: {
MEMORY: 'memory',
PERSISTED: 'persisted',
},
};
export const DEBOUNCE_SLOW_MS = 17; // roughly how long it takes to render a frame at 60fps
export const DEBOUNCE_FAST_MS = 10; // roughly how long it takes to render a frame at 100fps
/**
* Configuration key for setting the email address used for cluster alert notifications.
*/
export const CLUSTER_ALERTS_ADDRESS_CONFIG_KEY = 'cluster_alerts.email_notifications.email_address';
export const STANDALONE_CLUSTER_CLUSTER_UUID = '__standalone_cluster__';
export const INDEX_PATTERN = '.monitoring-*-6-*,.monitoring-*-7-*';
export const INDEX_PATTERN_KIBANA = '.monitoring-kibana-6-*,.monitoring-kibana-7-*';
export const INDEX_PATTERN_LOGSTASH = '.monitoring-logstash-6-*,.monitoring-logstash-7-*';
export const INDEX_PATTERN_BEATS = '.monitoring-beats-6-*,.monitoring-beats-7-*';
export const INDEX_ALERTS = '.monitoring-alerts-6,.monitoring-alerts-7';
export const INDEX_PATTERN_ELASTICSEARCH = '.monitoring-es-6-*,.monitoring-es-7-*';
// This is the unique token that exists in monitoring indices collected by metricbeat
export const METRICBEAT_INDEX_NAME_UNIQUE_TOKEN = '-mb-';
// We use this for metricbeat migration to identify specific products that we do not have constants for
export const ELASTICSEARCH_SYSTEM_ID = 'elasticsearch';
/**
* The id of the infra source owned by the monitoring plugin.
*/
export const INFRA_SOURCE_ID = 'internal-stack-monitoring';
/*
* These constants represent code paths within `getClustersFromRequest`
* that an api call wants to invoke. This is meant as an optimization to
* avoid unnecessary ES queries (looking at you logstash) when the data
* is not used. In the long term, it'd be nice to have separate api calls
* instead of this path logic.
*/
export const CODE_PATH_ALL = 'all';
export const CODE_PATH_ALERTS = 'alerts';
export const CODE_PATH_KIBANA = 'kibana';
export const CODE_PATH_ELASTICSEARCH = 'elasticsearch';
export const CODE_PATH_ML = 'ml';
export const CODE_PATH_BEATS = 'beats';
export const CODE_PATH_LOGSTASH = 'logstash';
export const CODE_PATH_APM = 'apm';
export const CODE_PATH_LICENSE = 'license';
export const CODE_PATH_LOGS = 'logs';
/**
* The header sent by telemetry service when hitting Elasticsearch to identify query source
* @type {string}
*/
export const TELEMETRY_QUERY_SOURCE = 'TELEMETRY';
/**
* The name of the Kibana System ID used to publish and look up Kibana stats through the Monitoring system.
* @type {string}
*/
export const KIBANA_SYSTEM_ID = 'kibana';
/**
* The name of the Beats System ID used to publish and look up Beats stats through the Monitoring system.
* @type {string}
*/
export const BEATS_SYSTEM_ID = 'beats';
/**
* The name of the Apm System ID used to publish and look up Apm stats through the Monitoring system.
* @type {string}
*/
export const APM_SYSTEM_ID = 'apm';
/**
* The name of the Kibana System ID used to look up Logstash stats through the Monitoring system.
* @type {string}
*/
export const LOGSTASH_SYSTEM_ID = 'logstash';
/**
* The name of the Kibana System ID used to look up Reporting stats through the Monitoring system.
* @type {string}
*/
export const REPORTING_SYSTEM_ID = 'reporting';
/**
* The amount of time, in milliseconds, to wait between collecting kibana stats from es.
*
* Currently 24 hours kept in sync with reporting interval.
* @type {Number}
*/
export const TELEMETRY_COLLECTION_INTERVAL = 86400000;
/**
* We want to slowly rollout the migration from watcher-based cluster alerts to
* kibana alerts and we only want to enable the kibana alerts once all
* watcher-based cluster alerts have been migrated so this flag will serve
* as the only way to see the new UI and actually run Kibana alerts. It will
* be false until all alerts have been migrated, then it will be removed
*/
export const KIBANA_ALERTING_ENABLED = false;
/**
* The prefix for all alert types used by monitoring
*/
export const ALERT_TYPE_PREFIX = 'monitoring_';
/**
* This is the alert type id for the license expiration alert
*/
export const ALERT_TYPE_LICENSE_EXPIRATION = `${ALERT_TYPE_PREFIX}alert_type_license_expiration`;
/**
* A listing of all alert types
*/
export const ALERT_TYPES = [ALERT_TYPE_LICENSE_EXPIRATION];
/**
* Matches the id for the built-in in email action type
* See x-pack/legacy/plugins/actions/server/builtin_action_types/email.ts
*/
export const ALERT_ACTION_TYPE_EMAIL = '.email';
/**
* The number of alerts that have been migrated
*/
export const NUMBER_OF_MIGRATED_ALERTS = 1;
/**
* The advanced settings config name for the email address
*/
export const MONITORING_CONFIG_ALERTING_EMAIL_ADDRESS = 'monitoring:alertingEmailAddress';
export const ALERT_EMAIL_SERVICES = ['gmail', 'hotmail', 'icloud', 'outlook365', 'ses', 'yahoo'];

View file

@ -0,0 +1,54 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import moment from 'moment';
import 'moment-duration-format';
import {
FORMAT_DURATION_TEMPLATE_TINY,
FORMAT_DURATION_TEMPLATE_SHORT,
FORMAT_DURATION_TEMPLATE_LONG,
CALCULATE_DURATION_SINCE,
CALCULATE_DURATION_UNTIL,
} from './constants';
/*
* Formats a timestamp string
* @param timestamp: ISO time string
* @param calculationFlag: control "since" or "until" logic
* @param initialTime {Object} moment object (not required)
* @return string
*/
export function formatTimestampToDuration(timestamp, calculationFlag, initialTime) {
initialTime = initialTime || moment();
let timeDuration;
if (calculationFlag === CALCULATE_DURATION_SINCE) {
timeDuration = moment.duration(initialTime - moment(timestamp)); // since: now - timestamp
} else if (calculationFlag === CALCULATE_DURATION_UNTIL) {
timeDuration = moment.duration(moment(timestamp) - initialTime); // until: timestamp - now
} else {
throw new Error(
'[formatTimestampToDuration] requires a [calculationFlag] parameter to specify format as "since" or "until" the given time.'
);
}
// See https://github.com/elastic/x-pack-kibana/issues/3554
let duration;
if (Math.abs(initialTime.diff(timestamp, 'months')) >= 1) {
// time diff is greater than 1 month, show months / days
duration = moment.duration(timeDuration).format(FORMAT_DURATION_TEMPLATE_LONG);
} else if (Math.abs(initialTime.diff(timestamp, 'minutes')) >= 1) {
// time diff is less than 1 month but greater than a minute, show days / hours / minutes
duration = moment.duration(timeDuration).format(FORMAT_DURATION_TEMPLATE_SHORT);
} else {
// time diff is less than a minute, show seconds
duration = moment.duration(timeDuration).format(FORMAT_DURATION_TEMPLATE_TINY);
}
return duration
.replace(/ 0 mins$/, '')
.replace(/ 0 hrs$/, '')
.replace(/ 0 days$/, ''); // See https://github.com/jsmreese/moment-duration-format/issues/64
}

View file

@ -0,0 +1,33 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import moment from 'moment-timezone';
export const LARGE_FLOAT = '0,0.[00]';
export const SMALL_FLOAT = '0.[00]';
export const LARGE_BYTES = '0,0.0 b';
export const SMALL_BYTES = '0.0 b';
export const LARGE_ABBREVIATED = '0,0.[0]a';
/**
* Format the {@code date} in the user's expected date/time format using their <em>guessed</em> local time zone.
* @param date Either a numeric Unix timestamp or a {@code Date} object
* @returns The date formatted using 'LL LTS'
*/
export function formatDateTimeLocal(date, useUTC = false) {
return useUTC
? moment.utc(date).format('LL LTS')
: moment.tz(date, moment.tz.guess()).format('LL LTS');
}
/**
* Shorten a Logstash Pipeline's hash for display purposes
* @param {string} hash The complete hash
* @return {string} The shortened hash
*/
export function shortenPipelineHash(hash) {
return hash.substr(0, 6);
}

View file

@ -4,4 +4,4 @@
* you may not use this file except in compliance with the Elastic License.
*/
export { opsBuffer } from './ops_buffer';
export { formatTimestampToDuration } from './format_timestamp_to_duration';

View file

@ -0,0 +1,10 @@
{
"id": "monitoring",
"version": "8.0.0",
"kibanaVersion": "kibana",
"configPath": ["monitoring"],
"requiredPlugins": ["usageCollection", "licensing", "features"],
"optionalPlugins": ["alerting", "actions", "infra"],
"server": true,
"ui": false
}

View file

@ -11,11 +11,13 @@ import sinon from 'sinon';
describe('monitoring plugin deprecations', function() {
let transformDeprecations;
const rename = sinon.stub().returns(() => {});
const fromPath = 'monitoring';
before(function() {
const deprecations = deprecationsModule();
transformDeprecations = (settings, log = noop) => {
deprecations.forEach(deprecation => deprecation(settings, log));
const deprecations = deprecationsModule({ rename });
transformDeprecations = (settings, fromPath, log = noop) => {
deprecations.forEach(deprecation => deprecation(settings, fromPath, log));
};
});
@ -30,7 +32,7 @@ describe('monitoring plugin deprecations', function() {
};
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
@ -45,7 +47,7 @@ describe('monitoring plugin deprecations', function() {
};
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
@ -61,7 +63,7 @@ describe('monitoring plugin deprecations', function() {
};
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
@ -76,7 +78,7 @@ describe('monitoring plugin deprecations', function() {
};
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(true);
});
});
@ -86,7 +88,7 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { username: 'elastic' } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(true);
});
@ -94,7 +96,7 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { username: 'otheruser' } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
@ -102,7 +104,7 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { username: undefined } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
@ -110,7 +112,7 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { ssl: { key: '' } } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(true);
});
@ -118,7 +120,7 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { ssl: { certificate: '' } } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(true);
});
@ -126,8 +128,17 @@ describe('monitoring plugin deprecations', function() {
const settings = { elasticsearch: { ssl: { key: '', certificate: '' } } };
const log = sinon.spy();
transformDeprecations(settings, log);
transformDeprecations(settings, fromPath, log);
expect(log.called).to.be(false);
});
});
describe('xpack_api_polling_frequency_millis', () => {
it('should call rename for this renamed config key', () => {
const settings = { xpack_api_polling_frequency_millis: 30000 };
const log = sinon.spy();
transformDeprecations(settings, fromPath, log);
expect(rename.called).to.be(true);
});
});
});

View file

@ -11,7 +11,7 @@ import {
MONITORING_CONFIG_ALERTING_EMAIL_ADDRESS,
} from '../../common/constants';
import { Logger } from 'src/core/server';
import { AlertServices, AlertInstance } from '../../../../../plugins/alerting/server';
import { AlertServices, AlertInstance } from '../../../alerting/server';
import { savedObjectsClientMock } from 'src/core/server/mocks';
import {
AlertState,
@ -69,18 +69,12 @@ const alertExecutorOptions: LicenseExpirationAlertExecutorOptions = {
describe('getLicenseExpiration', () => {
const emailAddress = 'foo@foo.com';
const server: any = {
newPlatform: {
__internals: {
uiSettings: {
asScopedToClient: (): any => ({
get: () => new Promise(resolve => resolve(emailAddress)),
}),
},
},
},
};
const getMonitoringCluster: () => void = jest.fn();
const getUiSettingsService: any = () => ({
asScopedToClient: (): any => ({
get: () => new Promise(resolve => resolve(emailAddress)),
}),
});
const monitoringCluster: any = null;
const logger: Logger = {
warn: jest.fn(),
log: jest.fn(),
@ -99,13 +93,23 @@ describe('getLicenseExpiration', () => {
});
it('should have the right id and actionGroups', () => {
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
expect(alert.id).toBe(ALERT_TYPE_LICENSE_EXPIRATION);
expect(alert.actionGroups).toEqual([{ id: 'default', name: 'Default' }]);
});
it('should return the state if no license is provided', async () => {
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const services: MockServices | AlertServices = {
callCluster: jest.fn(),
@ -125,18 +129,17 @@ describe('getLicenseExpiration', () => {
});
it('should log a warning if no email is provided', async () => {
const customServer: any = {
newPlatform: {
__internals: {
uiSettings: {
asScopedToClient: () => ({
get: () => null,
}),
},
},
},
};
const alert = getLicenseExpiration(customServer, getMonitoringCluster, getLogger, ccrEnabled);
const customGetUiSettingsService: any = () => ({
asScopedToClient: () => ({
get: () => null,
}),
});
const alert = getLicenseExpiration(
customGetUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const services = {
callCluster: jest.fn(
@ -186,7 +189,12 @@ describe('getLicenseExpiration', () => {
}
);
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const savedObjectsClient = savedObjectsClientMock.create();
savedObjectsClient.get.mockReturnValue(
@ -256,7 +264,12 @@ describe('getLicenseExpiration', () => {
return instance;
}
);
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const savedObjectsClient = savedObjectsClientMock.create();
savedObjectsClient.get.mockReturnValue(
@ -332,7 +345,12 @@ describe('getLicenseExpiration', () => {
return instance;
}
);
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const savedObjectsClient = savedObjectsClientMock.create();
savedObjectsClient.get.mockReturnValue(
@ -396,7 +414,12 @@ describe('getLicenseExpiration', () => {
return instance;
}
);
const alert = getLicenseExpiration(server, getMonitoringCluster, getLogger, ccrEnabled);
const alert = getLicenseExpiration(
getUiSettingsService,
monitoringCluster,
getLogger,
ccrEnabled
);
const savedObjectsClient = savedObjectsClientMock.create();
savedObjectsClient.get.mockReturnValue(

View file

@ -6,11 +6,10 @@
import moment from 'moment-timezone';
import { get } from 'lodash';
import { Legacy } from 'kibana';
import { Logger } from 'src/core/server';
import { Logger, ICustomClusterClient, UiSettingsServiceStart } from 'src/core/server';
import { i18n } from '@kbn/i18n';
import { ALERT_TYPE_LICENSE_EXPIRATION, INDEX_PATTERN_ELASTICSEARCH } from '../../common/constants';
import { AlertType } from '../../../../../plugins/alerting/server';
import { AlertType } from '../../../../plugins/alerting/server';
import { fetchLicenses } from '../lib/alerts/fetch_licenses';
import { fetchDefaultEmailAddress } from '../lib/alerts/fetch_default_email_address';
import { fetchClusters } from '../lib/alerts/fetch_clusters';
@ -28,21 +27,20 @@ import { executeActions, getUiMessage } from '../lib/alerts/license_expiration.l
const EXPIRES_DAYS = [60, 30, 14, 7];
export const getLicenseExpiration = (
server: Legacy.Server,
getMonitoringCluster: any,
getLogger: (contexts: string[]) => Logger,
getUiSettingsService: () => Promise<UiSettingsServiceStart>,
monitoringCluster: ICustomClusterClient,
getLogger: (...scopes: string[]) => Logger,
ccsEnabled: boolean
): AlertType => {
async function getCallCluster(services: any): Promise<any> {
const monitoringCluster = await getMonitoringCluster();
if (!monitoringCluster) {
return services.callCluster;
}
return monitoringCluster.callCluster;
return monitoringCluster.callAsInternalUser;
}
const logger = getLogger([ALERT_TYPE_LICENSE_EXPIRATION]);
const logger = getLogger(ALERT_TYPE_LICENSE_EXPIRATION);
return {
id: ALERT_TYPE_LICENSE_EXPIRATION,
name: 'Monitoring Alert - License Expiration',
@ -85,7 +83,7 @@ export const getLicenseExpiration = (
return state;
}
const uiSettings = server.newPlatform.__internals.uiSettings.asScopedToClient(
const uiSettings = (await getUiSettingsService()).asScopedToClient(
services.savedObjectsClient
);
const dateFormat: string = await uiSettings.get<string>('dateFormat');

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { Moment } from 'moment';
import { AlertExecutorOptions } from '../../../../../plugins/alerting/server';
import { AlertExecutorOptions } from '../../../alerting/server';
export interface AlertLicense {
status: string;

View file

@ -97,6 +97,7 @@ describe('Alerts Cluster Search', () => {
expect(alerts[1]).to.eql({
metadata: {
severity: 0,
cluster_uuid: cluster.cluster_uuid,
link: 'https://www.elastic.co/guide/en/x-pack/6.1/ssl-tls.html',
},

View file

@ -1,10 +1,19 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import sinon from 'sinon';
export function createStubs(mockQueryResult, featureStub) {
const callWithRequestStub = sinon.stub().returns(Promise.resolve(mockQueryResult));
const getClusterStub = sinon.stub().returns({ callWithRequest: callWithRequestStub });
const configStub = sinon.stub().returns({
get: sinon.stub().withArgs('xpack.monitoring.cluster_alerts.enabled').returns(true)
get: sinon
.stub()
.withArgs('xpack.monitoring.cluster_alerts.enabled')
.returns(true),
});
return {
callWithRequestStub,
@ -14,14 +23,14 @@ export function createStubs(mockQueryResult, featureStub) {
plugins: {
monitoring: {
info: {
feature: featureStub
}
feature: featureStub,
},
},
elasticsearch: {
getCluster: getClusterStub
}
}
}
}
getCluster: getClusterStub,
},
},
},
},
};
};
}

View file

@ -22,11 +22,10 @@ export function verifyMonitoringLicense(server) {
if (config.get('monitoring.cluster_alerts.enabled')) {
const xpackInfo = get(server.plugins.monitoring, 'info');
if (xpackInfo) {
const monitoringCluster = xpackInfo.feature('monitoring').getLicenseCheckResults();
const watcherFeature = xpackInfo.getWatcherFeature();
return {
enabled: monitoringCluster.clusterAlerts.enabled,
message: monitoringCluster.message,
enabled: watcherFeature.isEnabled,
message: xpackInfo.getMessage(),
};
}

View file

@ -0,0 +1,230 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { schema, TypeOf } from '@kbn/config-schema';
const hostURISchema = schema.uri({ scheme: ['http', 'https'] });
const DEFAULT_API_VERSION = 'master';
export const configSchema = schema.object({
enabled: schema.boolean({ defaultValue: true }),
elasticsearch: schema.object({
logFetchCount: schema.number({ defaultValue: 10 }),
sniffOnStart: schema.boolean({ defaultValue: false }),
sniffInterval: schema.oneOf([schema.duration(), schema.literal(false)], {
defaultValue: false,
}),
sniffOnConnectionFault: schema.boolean({ defaultValue: false }),
hosts: schema.maybe(
schema.oneOf([hostURISchema, schema.arrayOf(hostURISchema, { minSize: 1 })])
),
preserveHost: schema.boolean({ defaultValue: true }),
username: schema.maybe(
schema.conditional(
schema.contextRef('dist'),
false,
schema.string({
validate: () => {},
}),
schema.string()
)
),
password: schema.maybe(schema.string()),
requestHeadersWhitelist: schema.oneOf([schema.string(), schema.arrayOf(schema.string())], {
defaultValue: ['authorization'],
}),
customHeaders: schema.recordOf(schema.string(), schema.string(), { defaultValue: {} }),
shardTimeout: schema.duration({ defaultValue: '30s' }),
requestTimeout: schema.duration({ defaultValue: '30s' }),
pingTimeout: schema.duration({ defaultValue: schema.siblingRef('requestTimeout') }),
startupTimeout: schema.duration({ defaultValue: '5s' }),
logQueries: schema.boolean({ defaultValue: false }),
ssl: schema.object(
{
verificationMode: schema.oneOf(
[schema.literal('none'), schema.literal('certificate'), schema.literal('full')],
{ defaultValue: 'full' }
),
certificateAuthorities: schema.maybe(
schema.oneOf([schema.string(), schema.arrayOf(schema.string(), { minSize: 1 })])
),
certificate: schema.maybe(schema.string()),
key: schema.maybe(schema.string()),
keyPassphrase: schema.maybe(schema.string()),
keystore: schema.object({
path: schema.maybe(schema.string()),
password: schema.maybe(schema.string()),
}),
truststore: schema.object({
path: schema.maybe(schema.string()),
password: schema.maybe(schema.string()),
}),
alwaysPresentCertificate: schema.boolean({ defaultValue: false }),
},
{
validate: rawConfig => {
if (rawConfig.key && rawConfig.keystore.path) {
return 'cannot use [key] when [keystore.path] is specified';
}
if (rawConfig.certificate && rawConfig.keystore.path) {
return 'cannot use [certificate] when [keystore.path] is specified';
}
},
}
),
apiVersion: schema.string({ defaultValue: DEFAULT_API_VERSION }),
healthCheck: schema.object({ delay: schema.duration({ defaultValue: 2500 }) }),
ignoreVersionMismatch: schema.conditional(
schema.contextRef('dev'),
false,
schema.boolean({
validate: rawValue => {
if (rawValue === true) {
return '"ignoreVersionMismatch" can only be set to true in development mode';
}
},
defaultValue: false,
}),
schema.boolean({ defaultValue: false })
),
}),
ui: schema.object({
enabled: schema.boolean({ defaultValue: true }),
ccs: schema.object({
enabled: schema.boolean({ defaultValue: true }),
}),
logs: schema.object({
index: schema.string({ defaultValue: 'filebeat-*' }),
}),
max_bucket_size: schema.number({ defaultValue: 10000 }),
elasticsearch: schema.object({
logFetchCount: schema.number({ defaultValue: 10 }),
sniffOnStart: schema.boolean({ defaultValue: false }),
sniffInterval: schema.oneOf([schema.duration(), schema.literal(false)], {
defaultValue: false,
}),
sniffOnConnectionFault: schema.boolean({ defaultValue: false }),
hosts: schema.maybe(
schema.oneOf([hostURISchema, schema.arrayOf(hostURISchema, { minSize: 1 })])
),
preserveHost: schema.boolean({ defaultValue: true }),
username: schema.maybe(
schema.conditional(
schema.contextRef('dist'),
false,
schema.string({
validate: rawConfig => {
if (rawConfig === 'elastic') {
return (
'value of "elastic" is forbidden. This is a superuser account that can obfuscate ' +
'privilege-related issues. You should use the "kibana" user instead.'
);
}
},
}),
schema.string()
)
),
password: schema.maybe(schema.string()),
requestHeadersWhitelist: schema.oneOf([schema.string(), schema.arrayOf(schema.string())], {
defaultValue: ['authorization'],
}),
customHeaders: schema.recordOf(schema.string(), schema.string(), { defaultValue: {} }),
shardTimeout: schema.duration({ defaultValue: '30s' }),
requestTimeout: schema.duration({ defaultValue: '30s' }),
pingTimeout: schema.duration({ defaultValue: schema.siblingRef('requestTimeout') }),
startupTimeout: schema.duration({ defaultValue: '5s' }),
logQueries: schema.boolean({ defaultValue: false }),
ssl: schema.object(
{
verificationMode: schema.oneOf(
[schema.literal('none'), schema.literal('certificate'), schema.literal('full')],
{ defaultValue: 'full' }
),
certificateAuthorities: schema.maybe(
schema.oneOf([schema.string(), schema.arrayOf(schema.string(), { minSize: 1 })])
),
certificate: schema.maybe(schema.string()),
key: schema.maybe(schema.string()),
keyPassphrase: schema.maybe(schema.string()),
keystore: schema.object({
path: schema.maybe(schema.string()),
password: schema.maybe(schema.string()),
}),
truststore: schema.object({
path: schema.maybe(schema.string()),
password: schema.maybe(schema.string()),
}),
alwaysPresentCertificate: schema.boolean({ defaultValue: false }),
},
{
validate: rawConfig => {
if (rawConfig.key && rawConfig.keystore.path) {
return 'cannot use [key] when [keystore.path] is specified';
}
if (rawConfig.certificate && rawConfig.keystore.path) {
return 'cannot use [certificate] when [keystore.path] is specified';
}
},
}
),
apiVersion: schema.string({ defaultValue: DEFAULT_API_VERSION }),
healthCheck: schema.object({ delay: schema.duration({ defaultValue: 2500 }) }),
ignoreVersionMismatch: schema.conditional(
schema.contextRef('dev'),
false,
schema.boolean({
validate: rawValue => {
if (rawValue === true) {
return '"ignoreVersionMismatch" can only be set to true in development mode';
}
},
defaultValue: false,
}),
schema.boolean({ defaultValue: false })
),
}),
container: schema.object({
elasticsearch: schema.object({
enabled: schema.boolean({ defaultValue: false }),
}),
logstash: schema.object({
enabled: schema.boolean({ defaultValue: false }),
}),
}),
min_interval_seconds: schema.number({ defaultValue: 10 }),
show_license_expiration: schema.boolean({ defaultValue: true }),
}),
kibana: schema.object({
collection: schema.object({
enabled: schema.boolean({ defaultValue: true }),
interval: schema.number({ defaultValue: 10000 }), // op status metrics get buffered at `ops.interval` and flushed to the bulk endpoint at this interval
}),
}),
cluster_alerts: schema.object({
enabled: schema.boolean({ defaultValue: true }),
email_notifications: schema.object({
enabled: schema.boolean({ defaultValue: true }),
email_address: schema.string({ defaultValue: '' }),
}),
}),
licensing: schema.object({
api_polling_frequency: schema.duration({
defaultValue: '30s',
}),
}),
agent: schema.object({
interval: schema.string({ defaultValue: '10s' }),
// TOOD: NP
// .regex(/[\d\.]+[yMwdhms]/)
}),
tests: schema.object({
cloud_detector: schema.object({
enabled: schema.boolean({ defaultValue: true }),
}),
}),
});
export type MonitoringConfig = TypeOf<typeof configSchema>;

View file

@ -5,7 +5,8 @@
*/
import { get } from 'lodash';
import { CLUSTER_ALERTS_ADDRESS_CONFIG_KEY, KIBANA_ALERTING_ENABLED } from './common/constants';
import { ConfigDeprecationFactory, ConfigDeprecation } from 'kibana/server';
import { CLUSTER_ALERTS_ADDRESS_CONFIG_KEY } from '../common/constants';
/**
* Re-writes deprecated user-defined config settings and logs warnings as a
@ -15,53 +16,45 @@ import { CLUSTER_ALERTS_ADDRESS_CONFIG_KEY, KIBANA_ALERTING_ENABLED } from './co
* major version!
* @return {Array} array of rename operations and callback function for rename logging
*/
export const deprecations = () => {
export const deprecations = ({ rename }: ConfigDeprecationFactory): ConfigDeprecation[] => {
return [
(settings, log) => {
const clusterAlertsEnabled = get(settings, 'cluster_alerts.enabled');
(config, fromPath, logger) => {
const clusterAlertsEnabled = get(config, 'cluster_alerts.enabled');
const emailNotificationsEnabled =
clusterAlertsEnabled && get(settings, 'cluster_alerts.email_notifications.enabled');
if (emailNotificationsEnabled) {
if (KIBANA_ALERTING_ENABLED) {
if (get(settings, CLUSTER_ALERTS_ADDRESS_CONFIG_KEY)) {
log(
`Config key "${CLUSTER_ALERTS_ADDRESS_CONFIG_KEY}" is deprecated. Please configure the email adddress through the Stack Monitoring UI instead."`
);
}
} else {
if (!get(settings, CLUSTER_ALERTS_ADDRESS_CONFIG_KEY)) {
log(
`Config key "${CLUSTER_ALERTS_ADDRESS_CONFIG_KEY}" will be required for email notifications to work in 7.0."`
);
}
}
clusterAlertsEnabled && get(config, 'cluster_alerts.email_notifications.enabled');
if (emailNotificationsEnabled && !get(config, CLUSTER_ALERTS_ADDRESS_CONFIG_KEY)) {
logger(
`Config key [${fromPath}.${CLUSTER_ALERTS_ADDRESS_CONFIG_KEY}] will be required for email notifications to work in 7.0."`
);
}
return config;
},
(settings, log) => {
const fromPath = 'monitoring.elasticsearch';
const es = get(settings, 'elasticsearch');
(config, fromPath, logger) => {
const es: Record<string, any> = get(config, 'elasticsearch');
if (es) {
if (es.username === 'elastic') {
log(
logger(
`Setting [${fromPath}.username] to "elastic" is deprecated. You should use the "kibana" user instead.`
);
}
}
return config;
},
(settings, log) => {
const fromPath = 'monitoring.elasticsearch.ssl';
const ssl = get(settings, 'elasticsearch.ssl');
(config, fromPath, logger) => {
const ssl: Record<string, any> = get(config, 'elasticsearch.ssl');
if (ssl) {
if (ssl.key !== undefined && ssl.certificate === undefined) {
log(
logger(
`Setting [${fromPath}.key] without [${fromPath}.certificate] is deprecated. This has no effect, you should use both settings to enable TLS client authentication to Elasticsearch.`
);
} else if (ssl.certificate !== undefined && ssl.key === undefined) {
log(
logger(
`Setting [${fromPath}.certificate] without [${fromPath}.key] is deprecated. This has no effect, you should use both settings to enable TLS client authentication to Elasticsearch.`
);
}
}
return config;
},
rename('xpack_api_polling_frequency_millis', 'licensing.api_polling_frequency'),
];
};

View file

@ -0,0 +1,125 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import expect from '@kbn/expect';
import sinon from 'sinon';
import { instantiateClient, hasMonitoringCluster } from '../instantiate_client';
const server = {
monitoring: {
ui: {
elasticsearch: {
hosts: [],
username: 'monitoring-user-internal-test',
password: 'monitoring-p@ssw0rd!-internal-test',
ssl: {},
customHeaders: {
'x-custom-headers-test': 'connection-monitoring',
},
},
},
},
};
const serverWithUrl = {
monitoring: {
ui: {
elasticsearch: {
hosts: ['http://monitoring-cluster.test:9200'],
username: 'monitoring-user-internal-test',
password: 'monitoring-p@ssw0rd!-internal-test',
ssl: {},
customHeaders: {
'x-custom-headers-test': 'connection-monitoring',
},
},
},
},
};
const createClient = sinon.stub();
const log = { info: sinon.stub() };
describe('Instantiate Client', () => {
afterEach(() => {
createClient.resetHistory();
log.info.resetHistory();
});
describe('Logging', () => {
it('logs that the config was sourced from the production options', () => {
instantiateClient(server.monitoring.ui.elasticsearch, log, createClient);
expect(log.info.getCall(0).args).to.eql(['config sourced from: production cluster']);
});
it('logs that the config was sourced from the monitoring options', () => {
instantiateClient(serverWithUrl.monitoring.ui.elasticsearch, log, createClient);
expect(log.info.getCall(0).args).to.eql(['config sourced from: monitoring cluster']);
});
});
describe('Custom Headers Configuration', () => {
it('Does not add xpack.monitoring.elasticsearch.customHeaders if connected to production cluster', () => {
instantiateClient(server.monitoring.ui.elasticsearch, log, createClient);
const createClusterCall = createClient.getCall(0);
sinon.assert.calledOnce(createClient);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClusterCall.args[1].customHeaders).to.eql(undefined);
});
it('Adds xpack.monitoring.elasticsearch.customHeaders if connected to monitoring cluster', () => {
instantiateClient(serverWithUrl.monitoring.ui.elasticsearch, log, createClient);
const createClusterCall = createClient.getCall(0);
sinon.assert.calledOnce(createClient);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClusterCall.args[1].customHeaders).to.eql({
'x-custom-headers-test': 'connection-monitoring',
});
});
});
describe('Use a connection to production cluster', () => {
it('exposes an authenticated client using production host settings', () => {
instantiateClient(server.monitoring.ui.elasticsearch, log, createClient);
const createClusterCall = createClient.getCall(0);
const createClientOptions = createClusterCall.args[1];
sinon.assert.calledOnce(createClient);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClientOptions.hosts).to.eql(undefined);
});
});
describe('Use a connection to monitoring cluster', () => {
it('exposes an authenticated client using monitoring host settings', () => {
instantiateClient(serverWithUrl.monitoring.ui.elasticsearch, log, createClient);
const createClusterCall = createClient.getCall(0);
const createClientOptions = createClusterCall.args[1];
sinon.assert.calledOnce(createClient);
expect(createClusterCall.args[0]).to.be('monitoring');
expect(createClientOptions.hosts[0]).to.eql('http://monitoring-cluster.test:9200');
expect(createClientOptions.username).to.eql('monitoring-user-internal-test');
expect(createClientOptions.password).to.eql('monitoring-p@ssw0rd!-internal-test');
});
});
describe('hasMonitoringCluster', () => {
it('returns true if monitoring is configured', () => {
expect(hasMonitoringCluster(serverWithUrl.monitoring.ui.elasticsearch)).to.be(true);
});
it('returns false if monitoring is not configured', () => {
expect(hasMonitoringCluster(server.monitoring.ui.elasticsearch)).to.be(false);
});
});
});

View file

@ -4,9 +4,10 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { bindKey, once } from 'lodash';
import { Logger, ElasticsearchClientConfig, ICustomClusterClient } from 'kibana/server';
// @ts-ignore
import { monitoringBulk } from '../kibana_monitoring/lib/monitoring_bulk';
import { LOGGING_TAG } from '../../common/constants';
import { MonitoringElasticsearchConfig } from '../types';
/* Provide a dedicated Elasticsearch client for Monitoring
* The connection options can be customized for the Monitoring application
@ -14,22 +15,26 @@ import { LOGGING_TAG } from '../../common/constants';
* Kibana itself is connected to a production cluster.
*/
export function exposeClient({ elasticsearchConfig, events, log, elasticsearchPlugin }) {
export function instantiateClient(
elasticsearchConfig: any,
log: Logger,
createClient: (
type: string,
clientConfig?: Partial<ElasticsearchClientConfig>
) => ICustomClusterClient
) {
const isMonitoringCluster = hasMonitoringCluster(elasticsearchConfig);
const cluster = elasticsearchPlugin.createCluster('monitoring', {
const cluster = createClient('monitoring', {
...(isMonitoringCluster ? elasticsearchConfig : {}),
plugins: [monitoringBulk],
logQueries: Boolean(elasticsearchConfig.logQueries),
});
events.on('stop', bindKey(cluster, 'close'));
const configSource = isMonitoringCluster ? 'monitoring' : 'production';
log([LOGGING_TAG, 'es-client'], `config sourced from: ${configSource} cluster`);
log.info(`config sourced from: ${configSource} cluster`);
return cluster;
}
export function hasMonitoringCluster(config) {
return Boolean(config.hosts && config.hosts.length);
export function hasMonitoringCluster(config: MonitoringElasticsearchConfig) {
return Boolean(config.hosts && config.hosts[0]);
}
export const instantiateClient = once(exposeClient);

View file

@ -0,0 +1,17 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { TypeOf } from '@kbn/config-schema';
import { PluginInitializerContext, PluginConfigDescriptor } from '../../../../src/core/server';
import { Plugin } from './plugin';
import { configSchema } from './config';
import { deprecations } from './deprecations';
export const plugin = (initContext: PluginInitializerContext) => new Plugin(initContext);
export const config: PluginConfigDescriptor<TypeOf<typeof configSchema>> = {
schema: configSchema,
deprecations,
};

View file

@ -5,20 +5,12 @@
*/
import { defaultsDeep, uniq, compact, get } from 'lodash';
import { callClusterFactory } from '../../../xpack_main';
import {
LOGGING_TAG,
KIBANA_MONITORING_LOGGING_TAG,
TELEMETRY_COLLECTION_INTERVAL,
} from '../../common/constants';
import { TELEMETRY_COLLECTION_INTERVAL } from '../../common/constants';
import { sendBulkPayload, monitoringBulk, getKibanaInfoForStats } from './lib';
import { parseElasticsearchConfig } from '../es_client/parse_elasticsearch_config';
import { sendBulkPayload, monitoringBulk } from './lib';
import { hasMonitoringCluster } from '../es_client/instantiate_client';
const LOGGING_TAGS = [LOGGING_TAG, KIBANA_MONITORING_LOGGING_TAG];
/*
* Handles internal Kibana stats collection and uploading data to Monitoring
* bulk endpoint.
@ -36,7 +28,7 @@ const LOGGING_TAGS = [LOGGING_TAG, KIBANA_MONITORING_LOGGING_TAG];
* @param {Object} xpackInfo server.plugins.xpack_main.info object
*/
export class BulkUploader {
constructor({ config, log, interval, elasticsearchPlugin, kbnServerStatus, kbnServerVersion }) {
constructor({ config, log, interval, elasticsearch, kibanaStats }) {
if (typeof interval !== 'number') {
throw new Error('interval number of milliseconds is required');
}
@ -53,39 +45,27 @@ export class BulkUploader {
// Limit sending and fetching usage to once per day once usage is successfully stored
// into the monitoring indices.
this._usageInterval = TELEMETRY_COLLECTION_INTERVAL;
this._log = log;
this._log = {
debug: message => log(['debug', ...LOGGING_TAGS], message),
info: message => log(['info', ...LOGGING_TAGS], message),
warn: message => log(['warning', ...LOGGING_TAGS], message),
};
this._cluster = elasticsearchPlugin.createCluster('admin', {
this._cluster = elasticsearch.createClient('admin', {
plugins: [monitoringBulk],
});
const directConfig = parseElasticsearchConfig(config, 'monitoring.elasticsearch');
if (hasMonitoringCluster(directConfig)) {
if (hasMonitoringCluster(config.elasticsearch)) {
this._log.info(`Detected direct connection to monitoring cluster`);
this._hasDirectConnectionToMonitoringCluster = true;
this._cluster = elasticsearchPlugin.createCluster('monitoring-direct', directConfig);
elasticsearchPlugin
.getCluster('admin')
.callWithInternalUser('info')
.then(data => {
this._productionClusterUuid = get(data, 'cluster_uuid');
});
this._cluster = elasticsearch.createClient('monitoring-direct', config.elasticsearch);
elasticsearch.adminClient.callAsInternalUser('info').then(data => {
this._productionClusterUuid = get(data, 'cluster_uuid');
});
}
this._callClusterWithInternalUser = callClusterFactory({
plugins: { elasticsearch: elasticsearchPlugin },
}).getCallClusterInternal();
this._getKibanaInfoForStats = () =>
getKibanaInfoForStats({
kbnServerStatus,
kbnServerVersion,
config,
});
this.kibanaStats = kibanaStats;
this.kibanaStatusGetter = null;
}
setKibanaStatusGetter(getter) {
this.kibanaStatusGetter = getter;
}
filterCollectorSet(usageCollection) {
@ -166,7 +146,7 @@ export class BulkUploader {
return;
}
const data = await usageCollection.bulkFetch(this._callClusterWithInternalUser);
const data = await usageCollection.bulkFetch(this._cluster.callAsInternalUser);
const payload = this.toBulkUploadFormat(compact(data), usageCollection);
if (payload) {
try {
@ -208,6 +188,13 @@ export class BulkUploader {
);
}
getKibanaStats() {
return {
...this.kibanaStats,
status: this.kibanaStatusGetter(),
};
}
/*
* Bulk stats are transformed into a bulk upload format
* Non-legacy transformation is done in CollectorSet.toApiStats
@ -265,7 +252,7 @@ export class BulkUploader {
...accum,
{ index: { _type: type } },
{
kibana: this._getKibanaInfoForStats(),
kibana: this.getKibanaStats(),
...typesNested[type],
},
];

View file

@ -5,6 +5,7 @@
*/
import { get, snakeCase } from 'lodash';
import { CallCluster } from 'src/legacy/core_plugins/elasticsearch';
import { KIBANA_USAGE_TYPE, KIBANA_STATS_TYPE_MONITORING } from '../../../common/constants';
const TYPES = [
@ -19,14 +20,13 @@ const TYPES = [
/**
* Fetches saved object counts by querying the .kibana index
*/
export function getKibanaUsageCollector(usageCollection, config) {
export function getKibanaUsageCollector(usageCollection: any, kibanaIndex: string) {
return usageCollection.makeUsageCollector({
type: KIBANA_USAGE_TYPE,
isReady: () => true,
async fetch(callCluster) {
const index = config.get('kibana.index');
async fetch(callCluster: CallCluster) {
const savedObjectCountSearchParams = {
index,
index: kibanaIndex,
ignoreUnavailable: true,
filterPath: 'aggregations.types.buckets',
body: {
@ -43,11 +43,11 @@ export function getKibanaUsageCollector(usageCollection, config) {
};
const resp = await callCluster('search', savedObjectCountSearchParams);
const buckets = get(resp, 'aggregations.types.buckets', []);
const buckets: any = get(resp, 'aggregations.types.buckets', []);
// get the doc_count from each bucket
const bucketCounts = buckets.reduce(
(acc, bucket) => ({
(acc: any, bucket: any) => ({
...acc,
[bucket.key]: bucket.doc_count,
}),
@ -55,7 +55,7 @@ export function getKibanaUsageCollector(usageCollection, config) {
);
return {
index,
index: kibanaIndex,
...TYPES.reduce(
(acc, type) => ({
// combine the bucketCounts and 0s for types that don't have documents
@ -74,7 +74,7 @@ export function getKibanaUsageCollector(usageCollection, config) {
* 1. Make this data part of the "kibana_stats" type
* 2. Organize the payload in the usage namespace of the data payload (usage.index, etc)
*/
formatForBulkUpload: result => {
formatForBulkUpload: (result: any) => {
return {
type: KIBANA_STATS_TYPE_MONITORING,
payload: {

View file

@ -0,0 +1,37 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { Observable } from 'rxjs';
import moment from 'moment';
import { OpsMetrics } from 'kibana/server';
import { UsageCollectionSetup } from 'src/plugins/usage_collection/server';
import { KIBANA_STATS_TYPE_MONITORING } from '../../../common/constants';
interface MonitoringOpsMetrics extends OpsMetrics {
timestamp: string;
}
/*
* Initialize a collector for Kibana Ops Stats
*/
export function getOpsStatsCollector(
usageCollection: UsageCollectionSetup,
metrics$: Observable<OpsMetrics>
) {
let lastMetrics: MonitoringOpsMetrics | null = null;
metrics$.subscribe(metrics => {
lastMetrics = {
...metrics,
timestamp: moment.utc().toISOString(),
};
});
return usageCollection.makeStatsCollector({
type: KIBANA_STATS_TYPE_MONITORING,
isReady: () => !!lastMetrics,
fetch: () => lastMetrics,
});
}

View file

@ -4,34 +4,29 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { CLUSTER_ALERTS_ADDRESS_CONFIG_KEY, KIBANA_SETTINGS_TYPE } from '../../../common/constants';
import { KIBANA_SETTINGS_TYPE } from '../../../common/constants';
import { MonitoringConfig } from '../../config';
/*
* Check if Cluster Alert email notifications is enabled in config
* If so, get email from kibana.yml
*/
export async function getDefaultAdminEmail(config) {
if (!config.get('monitoring.cluster_alerts.email_notifications.enabled')) {
export async function getDefaultAdminEmail(config: MonitoringConfig) {
if (!config.cluster_alerts.email_notifications.enabled) {
return null;
}
const emailAddressConfigKey = `monitoring.${CLUSTER_ALERTS_ADDRESS_CONFIG_KEY}`;
const configuredEmailAddress = config.get(emailAddressConfigKey);
return configuredEmailAddress || null;
return config.cluster_alerts.email_notifications.email_address || null;
}
// we use shouldUseNull to determine if we need to send nulls; we only send nulls if the last email wasn't null
let shouldUseNull = true;
export async function checkForEmailValue(
config,
callCluster,
log,
config: MonitoringConfig,
_shouldUseNull = shouldUseNull,
_getDefaultAdminEmail = getDefaultAdminEmail
) {
const defaultAdminEmail = await _getDefaultAdminEmail(config, callCluster, log);
const defaultAdminEmail = await _getDefaultAdminEmail(config);
// Allow null so clearing the advanced setting will be reflected in the data
const isAcceptableNull = defaultAdminEmail === null && _shouldUseNull;
@ -46,13 +41,13 @@ export async function checkForEmailValue(
}
}
export function getSettingsCollector(usageCollection, config) {
export function getSettingsCollector(usageCollection: any, config: MonitoringConfig) {
return usageCollection.makeStatsCollector({
type: KIBANA_SETTINGS_TYPE,
isReady: () => true,
async fetch(callCluster) {
async fetch() {
let kibanaSettingsData;
const defaultAdminEmail = await checkForEmailValue(config, callCluster, this.log);
const defaultAdminEmail = await checkForEmailValue(config);
// skip everything if defaultAdminEmail === undefined
if (defaultAdminEmail || (defaultAdminEmail === null && shouldUseNull)) {
@ -72,7 +67,7 @@ export function getSettingsCollector(usageCollection, config) {
// returns undefined if there was no result
return kibanaSettingsData;
},
getEmailValueStructure(email) {
getEmailValueStructure(email: string) {
return {
xpack: {
default_admin_email: email,

View file

@ -3,15 +3,20 @@
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { Observable } from 'rxjs';
import { OpsMetrics } from 'kibana/server';
import { getKibanaUsageCollector } from './get_kibana_usage_collector';
import { getOpsStatsCollector } from './get_ops_stats_collector';
import { getSettingsCollector } from './get_settings_collector';
import { MonitoringConfig } from '../../config';
export function registerCollectors(usageCollection, collectorsConfigs) {
const { config } = collectorsConfigs;
usageCollection.registerCollector(getOpsStatsCollector(usageCollection, collectorsConfigs));
usageCollection.registerCollector(getKibanaUsageCollector(usageCollection, config));
export function registerCollectors(
usageCollection: any,
config: MonitoringConfig,
opsMetrics$: Observable<OpsMetrics>,
kibanaIndex: string
) {
usageCollection.registerCollector(getOpsStatsCollector(usageCollection, opsMetrics$));
usageCollection.registerCollector(getKibanaUsageCollector(usageCollection, kibanaIndex));
usageCollection.registerCollector(getSettingsCollector(usageCollection, config));
}

View file

@ -16,7 +16,7 @@ import { BulkUploader } from './bulk_uploader';
* @param {Object} server HapiJS server instance
*/
export function initBulkUploader({ config, ...params }) {
const interval = config.get('monitoring.kibana.collection.interval');
const interval = config.kibana.collection.interval;
return new BulkUploader({
interval,
config,

View file

@ -6,4 +6,3 @@
export { sendBulkPayload } from './send_bulk_payload';
export { monitoringBulk } from './monitoring_bulk';
export { getKibanaInfoForStats } from './get_kibana_info_for_stats';

View file

@ -56,12 +56,12 @@ export async function sendBulkPayload(
);
}
const formattedPayload = formatForNormalBulkEndpoint(payload, productionClusterUuid);
return await cluster.callWithInternalUser('bulk', {
return await cluster.callAsInternalUser('bulk', {
body: formattedPayload,
});
}
return cluster.callWithInternalUser('monitoring.bulk', {
return cluster.callAsInternalUser('monitoring.bulk', {
system_id: KIBANA_SYSTEM_ID,
system_api_version: MONITORING_SYSTEM_API_VERSION,
interval: interval + 'ms',

View file

@ -53,9 +53,11 @@ describe('ccs_utils', () => {
const abcPattern = prefixIndexPattern(config, indexPattern, 'aBc');
const underscorePattern = prefixIndexPattern(config, indexPattern, 'cluster_one');
expect(abcPattern).to.eql('aBc:.monitoring-xyz-1-*,aBc:.monitoring-xyz-2-*');
expect(abcPattern).to.eql(
'aBc:.monitoring-xyz-1-*,aBc:.monitoring-xyz-2-*,aBc:monitoring-xyz-1-*,aBc:monitoring-xyz-2-*'
);
expect(underscorePattern).to.eql(
'cluster_one:.monitoring-xyz-1-*,cluster_one:.monitoring-xyz-2-*'
'cluster_one:.monitoring-xyz-1-*,cluster_one:.monitoring-xyz-2-*,cluster_one:monitoring-xyz-1-*,cluster_one:monitoring-xyz-2-*'
);
expect(get.callCount).to.eql(2);
});
@ -69,7 +71,11 @@ describe('ccs_utils', () => {
const pattern = prefixIndexPattern(config, indexPattern, '*');
// it should have BOTH patterns so that it searches all CCS clusters and the local cluster
expect(pattern).to.eql('*:.monitoring-xyz-1-*,*:.monitoring-xyz-2-*' + ',' + indexPattern);
expect(pattern).to.eql(
'*:.monitoring-xyz-1-*,*:.monitoring-xyz-2-*,*:monitoring-xyz-1-*,*:monitoring-xyz-2-*' +
',' +
indexPattern
);
expect(get.callCount).to.eql(1);
});
});
@ -77,18 +83,25 @@ describe('ccs_utils', () => {
describe('parseCrossClusterPrefix', () => {
it('returns ccs prefix for index with one', () => {
expect(parseCrossClusterPrefix('abc:.monitoring-es-6-2017.07.28')).to.eql('abc');
expect(parseCrossClusterPrefix('abc:monitoring-es-6-2017.07.28')).to.eql('abc');
expect(parseCrossClusterPrefix('abc_123:.monitoring-es-6-2017.07.28')).to.eql('abc_123');
expect(parseCrossClusterPrefix('abc_123:monitoring-es-6-2017.07.28')).to.eql('abc_123');
expect(parseCrossClusterPrefix('broken:example:.monitoring-es-6-2017.07.28')).to.eql(
'broken'
);
expect(parseCrossClusterPrefix('broken:example:monitoring-es-6-2017.07.28')).to.eql('broken');
expect(parseCrossClusterPrefix('with-a-dash:.monitoring-es-6-2017.07.28')).to.eql(
'with-a-dash'
);
expect(parseCrossClusterPrefix('with-a-dash:monitoring-es-6-2017.07.28')).to.eql(
'with-a-dash'
);
expect(parseCrossClusterPrefix('something:not-monitoring')).to.eql('something');
});
it('returns null when no prefix exists', () => {
expect(parseCrossClusterPrefix('.monitoring-es-6-2017.07.28')).to.be(null);
expect(parseCrossClusterPrefix('monitoring-es-6-2017.07.28')).to.be(null);
expect(parseCrossClusterPrefix('random')).to.be(null);
});
});

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { fetchDefaultEmailAddress } from './fetch_default_email_address';
import { uiSettingsServiceMock } from '../../../../../../../src/core/server/mocks';
import { uiSettingsServiceMock } from '../../../../../../src/core/server/mocks';
describe('fetchDefaultEmailAddress', () => {
it('get the email address', async () => {

Some files were not shown because too many files have changed in this diff Show more