kibana/x-pack/plugins/ml/server/plugin.ts

197 lines
6.4 KiB
TypeScript
Raw Normal View History

/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { i18n } from '@kbn/i18n';
import {
CoreSetup,
CoreStart,
Plugin,
IScopedClusterClient,
KibanaRequest,
Logger,
PluginInitializerContext,
[SIEM] Check ML Job status on ML Rule execution (#61715) * Move isMlRule helper to a more general location And use it during rule execution as well. * Add error message back to rule error status This was unintentionally removed in a previous merge commit. * Expose mlClient as part of ML's Setup contract This allows dependent plugins to leverage the exposed services without having to define their own ml paths, e.g. "ml.jobs" * Move ML Job predicates to common folder These are pure functions and used on both the client and server. * WIP: Check ML Job status on ML Rule execution This works, but unfortunately it pushes this executor function to a complexity of 25. We're gonna refactor this next. * Move isMlRule and RuleType to common These are used on both the frontend and the backend, and can be shared. * Refactor Signal Rule executor to use RuleStatusService RuleStatusService holds the logic for updating the current status as well as adding an error status. It leverages a simple RuleStatusSavedObjectClient to handle the communication with SavedObjects. This removes the need for our specialized 'writeError', 'writeGap', and 'writeSuccess' functions, which duplicated much of the rule status logic and code. It also fixes a bug with gap failures, with should have been treated the same as other failures. NB that an error does not necessarily prevent the rule from running, as in the case of a gap or an ML Job not running. This also adds a buildRuleMessage helper to reduce the noise of generating logs/messages, and to make them more consistent. * Remove unneeded 'async' keywords We're not awaiting here, so we can just return the promise. * Make buildRuleStatusAttributes synchronous We weren't doing anything async here, and in fact the returning of a promise was causing a bug when we tried to spread it into our attributes object. * Fix incorrectly-named RuleStatus attributes This mapping could be done within the ruleStatusService, but it lives outside it for now. Also renames the object holding these values to the more general 'result,' as creationSuccess implies it always succeeds. * Move our rule message helpers to a separate file Adds some tests, as well. * Refactor how rule status objects interact Only ruleStatusSavedObjectsClient receives a savedObjectsClient, the other functions receive the ruleStatusSavedObjectsClient * pluralizes savedObjects in ruleStatusSavedObjectsClient * Backfills tests * Handle adding multiple errors during a single rule execution We were storing state in our RuleStatusClient, and consequently could get into a situation where that state did not reflect reality, and we would incorrectly try to delete a SavedObject that had already been deleted. Rather than try to store the _correct_ state in the service, we remove state entirely and just fetch our statuses on each action. Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
2020-03-30 23:35:38 +02:00
ICustomClusterClient,
CapabilitiesStart,
} from 'kibana/server';
import { PluginsSetup, RouteInitialization } from './types';
import { PLUGIN_ID, PLUGIN_ICON } from '../common/constants/app';
import { MlCapabilities } from '../common/types/capabilities';
import { elasticsearchJsPlugin } from './client/elasticsearch_ml';
import { initMlTelemetry } from './lib/telemetry';
import { initMlServerLog } from './client/log';
import { initSampleDataSets } from './lib/sample_data_sets';
import { annotationRoutes } from './routes/annotations';
import { calendars } from './routes/calendars';
import { dataFeedRoutes } from './routes/datafeeds';
import { dataFrameAnalyticsRoutes } from './routes/data_frame_analytics';
import { dataRecognizer } from './routes/modules';
import { dataVisualizerRoutes } from './routes/data_visualizer';
import { fieldsService } from './routes/fields_service';
import { fileDataVisualizerRoutes } from './routes/file_data_visualizer';
import { filtersRoutes } from './routes/filters';
import { indicesRoutes } from './routes/indices';
import { jobAuditMessagesRoutes } from './routes/job_audit_messages';
import { jobRoutes } from './routes/anomaly_detectors';
import { jobServiceRoutes } from './routes/job_service';
import { jobValidationRoutes } from './routes/job_validation';
import { notificationRoutes } from './routes/notification_settings';
import { resultsServiceRoutes } from './routes/results_service';
import { systemRoutes } from './routes/system';
import { MlLicense } from '../common/license';
import { MlServerLicense } from './lib/license';
import { createSharedServices, SharedServices } from './shared_services';
import { getPluginPrivileges } from '../common/types/capabilities';
import { setupCapabilitiesSwitcher } from './lib/capabilities';
import { registerKibanaSettings } from './lib/register_settings';
declare module 'kibana/server' {
interface RequestHandlerContext {
ml?: {
mlClient: IScopedClusterClient;
};
}
}
[SIEM] Check ML Job status on ML Rule execution (#61715) * Move isMlRule helper to a more general location And use it during rule execution as well. * Add error message back to rule error status This was unintentionally removed in a previous merge commit. * Expose mlClient as part of ML's Setup contract This allows dependent plugins to leverage the exposed services without having to define their own ml paths, e.g. "ml.jobs" * Move ML Job predicates to common folder These are pure functions and used on both the client and server. * WIP: Check ML Job status on ML Rule execution This works, but unfortunately it pushes this executor function to a complexity of 25. We're gonna refactor this next. * Move isMlRule and RuleType to common These are used on both the frontend and the backend, and can be shared. * Refactor Signal Rule executor to use RuleStatusService RuleStatusService holds the logic for updating the current status as well as adding an error status. It leverages a simple RuleStatusSavedObjectClient to handle the communication with SavedObjects. This removes the need for our specialized 'writeError', 'writeGap', and 'writeSuccess' functions, which duplicated much of the rule status logic and code. It also fixes a bug with gap failures, with should have been treated the same as other failures. NB that an error does not necessarily prevent the rule from running, as in the case of a gap or an ML Job not running. This also adds a buildRuleMessage helper to reduce the noise of generating logs/messages, and to make them more consistent. * Remove unneeded 'async' keywords We're not awaiting here, so we can just return the promise. * Make buildRuleStatusAttributes synchronous We weren't doing anything async here, and in fact the returning of a promise was causing a bug when we tried to spread it into our attributes object. * Fix incorrectly-named RuleStatus attributes This mapping could be done within the ruleStatusService, but it lives outside it for now. Also renames the object holding these values to the more general 'result,' as creationSuccess implies it always succeeds. * Move our rule message helpers to a separate file Adds some tests, as well. * Refactor how rule status objects interact Only ruleStatusSavedObjectsClient receives a savedObjectsClient, the other functions receive the ruleStatusSavedObjectsClient * pluralizes savedObjects in ruleStatusSavedObjectsClient * Backfills tests * Handle adding multiple errors during a single rule execution We were storing state in our RuleStatusClient, and consequently could get into a situation where that state did not reflect reality, and we would incorrectly try to delete a SavedObject that had already been deleted. Rather than try to store the _correct_ state in the service, we remove state entirely and just fetch our statuses on each action. Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
2020-03-30 23:35:38 +02:00
export interface MlPluginSetup extends SharedServices {
mlClient: ICustomClusterClient;
}
export type MlPluginStart = void;
export class MlServerPlugin implements Plugin<MlPluginSetup, MlPluginStart, PluginsSetup> {
private log: Logger;
private version: string;
private mlLicense: MlServerLicense;
private capabilities: CapabilitiesStart | null = null;
constructor(ctx: PluginInitializerContext) {
this.log = ctx.logger.get();
this.version = ctx.env.packageInfo.branch;
this.mlLicense = new MlServerLicense();
}
public setup(coreSetup: CoreSetup, plugins: PluginsSetup): MlPluginSetup {
const { user, admin } = getPluginPrivileges();
plugins.features.registerFeature({
id: PLUGIN_ID,
name: i18n.translate('xpack.ml.featureRegistry.mlFeatureName', {
defaultMessage: 'Machine Learning',
}),
icon: PLUGIN_ICON,
order: 500,
navLinkId: PLUGIN_ID,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
privileges: null,
reserved: {
description: i18n.translate('xpack.ml.feature.reserved.description', {
defaultMessage:
'To grant users access, you should also assign either the machine_learning_user or machine_learning_admin role.',
}),
privileges: [
{
id: 'ml_user',
privilege: {
api: user.api,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
savedObject: {
all: [],
read: [],
},
ui: user.ui,
},
},
{
id: 'ml_admin',
privilege: {
api: admin.api,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
savedObject: {
all: [],
read: [],
},
ui: admin.ui,
},
},
],
},
});
registerKibanaSettings(coreSetup);
this.mlLicense.setup(plugins.licensing.license$, [
(mlLicense: MlLicense) => initSampleDataSets(mlLicense, plugins),
]);
// initialize capabilities switcher to add license filter to ml capabilities
setupCapabilitiesSwitcher(coreSetup, plugins.licensing.license$, this.log);
// Can access via router's handler function 'context' parameter - context.ml.mlClient
const mlClient = coreSetup.elasticsearch.legacy.createClient(PLUGIN_ID, {
plugins: [elasticsearchJsPlugin],
});
coreSetup.http.registerRouteHandlerContext(PLUGIN_ID, (context, request) => {
return {
mlClient: mlClient.asScoped(request),
};
});
const routeInit: RouteInitialization = {
router: coreSetup.http.createRouter(),
mlLicense: this.mlLicense,
};
const resolveMlCapabilities = async (request: KibanaRequest) => {
if (this.capabilities === null) {
return null;
}
const capabilities = await this.capabilities.resolveCapabilities(request);
return capabilities.ml as MlCapabilities;
};
annotationRoutes(routeInit, plugins.security);
calendars(routeInit);
dataFeedRoutes(routeInit);
dataFrameAnalyticsRoutes(routeInit);
dataRecognizer(routeInit);
dataVisualizerRoutes(routeInit);
fieldsService(routeInit);
fileDataVisualizerRoutes(routeInit);
filtersRoutes(routeInit);
indicesRoutes(routeInit);
jobAuditMessagesRoutes(routeInit);
jobRoutes(routeInit);
jobServiceRoutes(routeInit);
notificationRoutes(routeInit);
resultsServiceRoutes(routeInit);
jobValidationRoutes(routeInit, this.version);
systemRoutes(routeInit, {
spaces: plugins.spaces,
cloud: plugins.cloud,
resolveMlCapabilities,
});
initMlServerLog({ log: this.log });
initMlTelemetry(coreSetup, plugins.usageCollection);
[SIEM] Check ML Job status on ML Rule execution (#61715) * Move isMlRule helper to a more general location And use it during rule execution as well. * Add error message back to rule error status This was unintentionally removed in a previous merge commit. * Expose mlClient as part of ML's Setup contract This allows dependent plugins to leverage the exposed services without having to define their own ml paths, e.g. "ml.jobs" * Move ML Job predicates to common folder These are pure functions and used on both the client and server. * WIP: Check ML Job status on ML Rule execution This works, but unfortunately it pushes this executor function to a complexity of 25. We're gonna refactor this next. * Move isMlRule and RuleType to common These are used on both the frontend and the backend, and can be shared. * Refactor Signal Rule executor to use RuleStatusService RuleStatusService holds the logic for updating the current status as well as adding an error status. It leverages a simple RuleStatusSavedObjectClient to handle the communication with SavedObjects. This removes the need for our specialized 'writeError', 'writeGap', and 'writeSuccess' functions, which duplicated much of the rule status logic and code. It also fixes a bug with gap failures, with should have been treated the same as other failures. NB that an error does not necessarily prevent the rule from running, as in the case of a gap or an ML Job not running. This also adds a buildRuleMessage helper to reduce the noise of generating logs/messages, and to make them more consistent. * Remove unneeded 'async' keywords We're not awaiting here, so we can just return the promise. * Make buildRuleStatusAttributes synchronous We weren't doing anything async here, and in fact the returning of a promise was causing a bug when we tried to spread it into our attributes object. * Fix incorrectly-named RuleStatus attributes This mapping could be done within the ruleStatusService, but it lives outside it for now. Also renames the object holding these values to the more general 'result,' as creationSuccess implies it always succeeds. * Move our rule message helpers to a separate file Adds some tests, as well. * Refactor how rule status objects interact Only ruleStatusSavedObjectsClient receives a savedObjectsClient, the other functions receive the ruleStatusSavedObjectsClient * pluralizes savedObjects in ruleStatusSavedObjectsClient * Backfills tests * Handle adding multiple errors during a single rule execution We were storing state in our RuleStatusClient, and consequently could get into a situation where that state did not reflect reality, and we would incorrectly try to delete a SavedObject that had already been deleted. Rather than try to store the _correct_ state in the service, we remove state entirely and just fetch our statuses on each action. Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
2020-03-30 23:35:38 +02:00
return {
...createSharedServices(this.mlLicense, plugins.spaces, plugins.cloud, resolveMlCapabilities),
[SIEM] Check ML Job status on ML Rule execution (#61715) * Move isMlRule helper to a more general location And use it during rule execution as well. * Add error message back to rule error status This was unintentionally removed in a previous merge commit. * Expose mlClient as part of ML's Setup contract This allows dependent plugins to leverage the exposed services without having to define their own ml paths, e.g. "ml.jobs" * Move ML Job predicates to common folder These are pure functions and used on both the client and server. * WIP: Check ML Job status on ML Rule execution This works, but unfortunately it pushes this executor function to a complexity of 25. We're gonna refactor this next. * Move isMlRule and RuleType to common These are used on both the frontend and the backend, and can be shared. * Refactor Signal Rule executor to use RuleStatusService RuleStatusService holds the logic for updating the current status as well as adding an error status. It leverages a simple RuleStatusSavedObjectClient to handle the communication with SavedObjects. This removes the need for our specialized 'writeError', 'writeGap', and 'writeSuccess' functions, which duplicated much of the rule status logic and code. It also fixes a bug with gap failures, with should have been treated the same as other failures. NB that an error does not necessarily prevent the rule from running, as in the case of a gap or an ML Job not running. This also adds a buildRuleMessage helper to reduce the noise of generating logs/messages, and to make them more consistent. * Remove unneeded 'async' keywords We're not awaiting here, so we can just return the promise. * Make buildRuleStatusAttributes synchronous We weren't doing anything async here, and in fact the returning of a promise was causing a bug when we tried to spread it into our attributes object. * Fix incorrectly-named RuleStatus attributes This mapping could be done within the ruleStatusService, but it lives outside it for now. Also renames the object holding these values to the more general 'result,' as creationSuccess implies it always succeeds. * Move our rule message helpers to a separate file Adds some tests, as well. * Refactor how rule status objects interact Only ruleStatusSavedObjectsClient receives a savedObjectsClient, the other functions receive the ruleStatusSavedObjectsClient * pluralizes savedObjects in ruleStatusSavedObjectsClient * Backfills tests * Handle adding multiple errors during a single rule execution We were storing state in our RuleStatusClient, and consequently could get into a situation where that state did not reflect reality, and we would incorrectly try to delete a SavedObject that had already been deleted. Rather than try to store the _correct_ state in the service, we remove state entirely and just fetch our statuses on each action. Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
2020-03-30 23:35:38 +02:00
mlClient,
};
}
public start(coreStart: CoreStart): MlPluginStart {
this.capabilities = coreStart.capabilities;
}
public stop() {
this.mlLicense.unsubscribe();
}
}