[ML] Changing all calls to ML endpoints to use internal user (#70487)

* [ML] Changing all calls to ML endpoints to use internal user

* updating alerting

* updating documentation

* [ML] Changing all calls to ML endpoints to use internal user

* updating alerting

* updating documentation

* fixing missed types

* adding authorization headers to endpoint calls

* correcting has privileges call

* updating security tests

* odd eslint error

* adding auth header to module setup

* fixing missing auth argument

* fixing delete DFA job permission checks

* removing debug test tag

* removing additional ml privilege checks

* adding authorization header to _evaluate

* updating alerting cluster client name

* code clean up

* changing authorizationHeader name

* updating alterting documentation

* fixing secondary credentials

* adding management links

* updating SIEM telemetry

* fixing merge conflicts

* granting access to index patterns

Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
This commit is contained in:
James Gowdy 2020-07-14 15:48:24 +01:00 committed by GitHub
parent 67f466aab6
commit a1e511a727
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
99 changed files with 968 additions and 975 deletions

View file

@ -160,7 +160,7 @@ This is the primary function for an action type. Whenever the action needs to ex
| config | The decrypted configuration given to an action. This comes from the action saved object that is partially or fully encrypted within the data store. If you would like to validate the config before being passed to the executor, define `validate.config` within the action type. |
| params | Parameters for the execution. These will be given at execution time by either an alert or manually provided when calling the plugin provided execute function. |
| services.callCluster(path, opts) | Use this to do Elasticsearch queries on the cluster Kibana connects to. This function is the same as any other `callCluster` in Kibana but runs in the context of the user who is calling the action when security is enabled. |
| services.getScopedCallCluster | This function scopes an instance of CallCluster by returning a `callCluster(path, opts)` function that runs in the context of the user who is calling the action when security is enabled. This must only be called with instances of CallCluster provided by core. |
| services.getLegacyScopedClusterClient | This function returns an instance of the LegacyScopedClusterClient scoped to the user who is calling the action when security is enabled. |
| services.savedObjectsClient | This is an instance of the saved objects client. This provides the ability to do CRUD on any saved objects within the same space the alert lives in.<br><br>The scope of the saved objects client is tied to the user in context calling the execute API or the API key provided to the execute plugin function (only when security isenabled). |
| services.log(tags, [data], [timestamp]) | Use this to create server logs. (This is the same function as server.log) |

View file

@ -38,7 +38,7 @@ const createServicesMock = () => {
}
> = {
callCluster: elasticsearchServiceMock.createLegacyScopedClusterClient().callAsCurrentUser,
getScopedCallCluster: jest.fn(),
getLegacyScopedClusterClient: jest.fn(),
savedObjectsClient: savedObjectsClientMock.create(),
};
return mock;

View file

@ -307,8 +307,8 @@ export class ActionsPlugin implements Plugin<Promise<PluginSetupContract>, Plugi
return (request) => ({
callCluster: elasticsearch.legacy.client.asScoped(request).callAsCurrentUser,
savedObjectsClient: getScopedClient(request),
getScopedCallCluster(clusterClient: ILegacyClusterClient) {
return clusterClient.asScoped(request).callAsCurrentUser;
getLegacyScopedClusterClient(clusterClient: ILegacyClusterClient) {
return clusterClient.asScoped(request);
},
});
}

View file

@ -25,9 +25,7 @@ export type SpaceIdToNamespaceFunction = (spaceId?: string) => string | undefine
export interface Services {
callCluster: ILegacyScopedClusterClient['callAsCurrentUser'];
savedObjectsClient: SavedObjectsClientContract;
getScopedCallCluster(
clusterClient: ILegacyClusterClient
): ILegacyScopedClusterClient['callAsCurrentUser'];
getLegacyScopedClusterClient(clusterClient: ILegacyClusterClient): ILegacyScopedClusterClient;
}
declare module 'src/core/server' {

View file

@ -103,7 +103,7 @@ This is the primary function for an alert type. Whenever the alert needs to exec
|---|---|
|services.callCluster(path, opts)|Use this to do Elasticsearch queries on the cluster Kibana connects to. This function is the same as any other `callCluster` in Kibana but in the context of the user who created the alert when security is enabled.|
|services.savedObjectsClient|This is an instance of the saved objects client. This provides the ability to do CRUD on any saved objects within the same space the alert lives in.<br><br>The scope of the saved objects client is tied to the user who created the alert (only when security isenabled).|
|services.getScopedCallCluster|This function scopes an instance of CallCluster by returning a `callCluster(path, opts)` function that runs in the context of the user who created the alert when security is enabled. This must only be called with instances of CallCluster provided by core.|
|services.getLegacyScopedClusterClient|This function returns an instance of the LegacyScopedClusterClient scoped to the user who created the alert when security is enabled.|
|services.alertInstanceFactory(id)|This [alert instance factory](#alert-instance-factory) creates instances of alerts and must be used in order to execute actions. The id you give to the alert instance factory is a unique identifier to the alert instance.|
|services.log(tags, [data], [timestamp])|Use this to create server logs. (This is the same function as server.log)|
|startedAt|The date and time the alert type started execution.|

View file

@ -59,7 +59,7 @@ const createAlertServicesMock = () => {
.fn<jest.Mocked<AlertInstance>, [string]>()
.mockReturnValue(alertInstanceFactoryMock),
callCluster: elasticsearchServiceMock.createLegacyScopedClusterClient().callAsCurrentUser,
getScopedCallCluster: jest.fn(),
getLegacyScopedClusterClient: jest.fn(),
savedObjectsClient: savedObjectsClientMock.create(),
};
};

View file

@ -273,8 +273,8 @@ export class AlertingPlugin {
return (request) => ({
callCluster: elasticsearch.legacy.client.asScoped(request).callAsCurrentUser,
savedObjectsClient: this.getScopedClientWithAlertSavedObjectType(savedObjects, request),
getScopedCallCluster(clusterClient: ILegacyClusterClient) {
return clusterClient.asScoped(request).callAsCurrentUser;
getLegacyScopedClusterClient(clusterClient: ILegacyClusterClient) {
return clusterClient.asScoped(request);
},
});
}

View file

@ -40,9 +40,7 @@ declare module 'src/core/server' {
export interface Services {
callCluster: ILegacyScopedClusterClient['callAsCurrentUser'];
savedObjectsClient: SavedObjectsClientContract;
getScopedCallCluster(
clusterClient: ILegacyClusterClient
): ILegacyScopedClusterClient['callAsCurrentUser'];
getLegacyScopedClusterClient(clusterClient: ILegacyClusterClient): ILegacyScopedClusterClient;
}
export interface AlertServices extends Services {

View file

@ -112,7 +112,7 @@ function getMlSetup(context: APMRequestHandlerContext, request: KibanaRequest) {
return;
}
const ml = context.plugins.ml;
const mlClient = ml.mlClient.asScoped(request).callAsCurrentUser;
const mlClient = ml.mlClient.asScoped(request);
return {
mlSystem: ml.mlSystemProvider(mlClient, request),
anomalyDetectors: ml.anomalyDetectorsProvider(mlClient, request),

View file

@ -152,12 +152,9 @@ export class InfraServerPlugin {
core.http.registerRouteHandlerContext(
'infra',
(context, request): InfraRequestHandlerContext => {
const mlSystem =
context.ml &&
plugins.ml?.mlSystemProvider(context.ml?.mlClient.callAsCurrentUser, request);
const mlSystem = context.ml && plugins.ml?.mlSystemProvider(context.ml?.mlClient, request);
const mlAnomalyDetectors =
context.ml &&
plugins.ml?.anomalyDetectorsProvider(context.ml?.mlClient.callAsCurrentUser, request);
context.ml && plugins.ml?.anomalyDetectorsProvider(context.ml?.mlClient, request);
const spaceId = plugins.spaces?.spacesService.getSpaceId(request) || 'default';
return {

View file

@ -5,6 +5,7 @@
*/
import { KibanaRequest } from 'kibana/server';
import { PLUGIN_ID } from '../constants/app';
export const userMlCapabilities = {
canAccessML: false,
@ -69,16 +70,31 @@ export function getDefaultCapabilities(): MlCapabilities {
export function getPluginPrivileges() {
const userMlCapabilitiesKeys = Object.keys(userMlCapabilities);
const adminMlCapabilitiesKeys = Object.keys(adminMlCapabilities);
const allMlCapabilities = [...adminMlCapabilitiesKeys, ...userMlCapabilitiesKeys];
const allMlCapabilitiesKeys = [...adminMlCapabilitiesKeys, ...userMlCapabilitiesKeys];
// TODO: include ML in base privileges for the `8.0` release: https://github.com/elastic/kibana/issues/71422
const privilege = {
app: [PLUGIN_ID, 'kibana'],
excludeFromBasePrivileges: true,
management: {
insightsAndAlerting: ['jobsListLink'],
},
catalogue: [PLUGIN_ID],
savedObject: {
all: [],
read: ['index-pattern', 'search'],
},
};
return {
user: {
ui: userMlCapabilitiesKeys,
api: userMlCapabilitiesKeys.map((k) => `ml:${k}`),
},
admin: {
ui: allMlCapabilities,
api: allMlCapabilities.map((k) => `ml:${k}`),
...privilege,
api: allMlCapabilitiesKeys.map((k) => `ml:${k}`),
ui: allMlCapabilitiesKeys,
},
user: {
...privilege,
api: userMlCapabilitiesKeys.map((k) => `ml:${k}`),
ui: userMlCapabilitiesKeys,
},
};
}

View file

@ -11,8 +11,6 @@ import { IndexPatternAttributes } from 'src/plugins/data/common';
export type IndexPatternTitle = string;
export type callWithRequestType = (action: string, params?: any) => Promise<any>;
export interface Route {
id: string;
k7Breadcrumbs: () => any;

View file

@ -1,12 +0,0 @@
{
"compilerOptions": {
"target": "es6",
"module": "commonjs",
"baseUrl": "../../../.",
"paths": {
"ui/*": ["src/legacy/ui/public/*"],
"plugins/ml/*": ["x-pack/plugins/ml/public/*"]
}
},
"exclude": ["node_modules", "build"]
}

View file

@ -16,8 +16,8 @@ let _capabilities: MlCapabilities = getDefaultCapabilities();
export function checkGetManagementMlJobsResolver() {
return new Promise<{ mlFeatureEnabledInSpace: boolean }>((resolve, reject) => {
getManageMlCapabilities().then(
({ capabilities, isPlatinumOrTrialLicense, mlFeatureEnabledInSpace }) => {
getManageMlCapabilities()
.then(({ capabilities, isPlatinumOrTrialLicense, mlFeatureEnabledInSpace }) => {
_capabilities = capabilities;
// Loop through all capabilities to ensure they are all set to true.
const isManageML = Object.values(_capabilities).every((p) => p === true);
@ -28,62 +28,80 @@ export function checkGetManagementMlJobsResolver() {
window.location.href = ACCESS_DENIED_PATH;
return reject();
}
}
);
})
.catch((e) => {
window.location.href = ACCESS_DENIED_PATH;
return reject();
});
});
}
export function checkGetJobsCapabilitiesResolver(): Promise<MlCapabilities> {
return new Promise((resolve, reject) => {
getCapabilities().then(({ capabilities, isPlatinumOrTrialLicense }) => {
_capabilities = capabilities;
// the minimum privilege for using ML with a platinum or trial license is being able to get the transforms list.
// all other functionality is controlled by the return capabilities object.
// if the license is basic (isPlatinumOrTrialLicense === false) then do not redirect,
// allow the promise to resolve as the separate license check will redirect then user to
// a basic feature
if (_capabilities.canGetJobs || isPlatinumOrTrialLicense === false) {
return resolve(_capabilities);
} else {
getCapabilities()
.then(({ capabilities, isPlatinumOrTrialLicense }) => {
_capabilities = capabilities;
// the minimum privilege for using ML with a platinum or trial license is being able to get the transforms list.
// all other functionality is controlled by the return capabilities object.
// if the license is basic (isPlatinumOrTrialLicense === false) then do not redirect,
// allow the promise to resolve as the separate license check will redirect then user to
// a basic feature
if (_capabilities.canGetJobs || isPlatinumOrTrialLicense === false) {
return resolve(_capabilities);
} else {
window.location.href = '#/access-denied';
return reject();
}
})
.catch((e) => {
window.location.href = '#/access-denied';
return reject();
}
});
});
});
}
export function checkCreateJobsCapabilitiesResolver(): Promise<MlCapabilities> {
return new Promise((resolve, reject) => {
getCapabilities().then(({ capabilities, isPlatinumOrTrialLicense }) => {
_capabilities = capabilities;
// if the license is basic (isPlatinumOrTrialLicense === false) then do not redirect,
// allow the promise to resolve as the separate license check will redirect then user to
// a basic feature
if (_capabilities.canCreateJob || isPlatinumOrTrialLicense === false) {
return resolve(_capabilities);
} else {
// if the user has no permission to create a job,
// redirect them back to the Transforms Management page
getCapabilities()
.then(({ capabilities, isPlatinumOrTrialLicense }) => {
_capabilities = capabilities;
// if the license is basic (isPlatinumOrTrialLicense === false) then do not redirect,
// allow the promise to resolve as the separate license check will redirect then user to
// a basic feature
if (_capabilities.canCreateJob || isPlatinumOrTrialLicense === false) {
return resolve(_capabilities);
} else {
// if the user has no permission to create a job,
// redirect them back to the Transforms Management page
window.location.href = '#/jobs';
return reject();
}
})
.catch((e) => {
window.location.href = '#/jobs';
return reject();
}
});
});
});
}
export function checkFindFileStructurePrivilegeResolver(): Promise<MlCapabilities> {
return new Promise((resolve, reject) => {
getCapabilities().then(({ capabilities }) => {
_capabilities = capabilities;
// the minimum privilege for using ML with a basic license is being able to use the datavisualizer.
// all other functionality is controlled by the return _capabilities object
if (_capabilities.canFindFileStructure) {
return resolve(_capabilities);
} else {
getCapabilities()
.then(({ capabilities }) => {
_capabilities = capabilities;
// the minimum privilege for using ML with a basic license is being able to use the datavisualizer.
// all other functionality is controlled by the return _capabilities object
if (_capabilities.canFindFileStructure) {
return resolve(_capabilities);
} else {
window.location.href = '#/access-denied';
return reject();
}
})
.catch((e) => {
window.location.href = '#/access-denied';
return reject();
}
});
});
});
}

View file

@ -20,7 +20,7 @@ import {
import { ml } from './ml_api_service';
import { getIndexPatternAndSavedSearch } from '../util/index_utils';
// called in the angular routing resolve block to initialize the
// called in the routing resolve block to initialize the
// newJobCapsService with the currently selected index pattern
export function loadNewJobCapabilities(
indexPatternId: string,

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { getAdminCapabilities, getUserCapabilities } from './__mocks__/ml_capabilities';
import { capabilitiesProvider } from './check_capabilities';
import { MlLicense } from '../../../common/license';
@ -23,18 +23,23 @@ const mlLicenseBasic = {
const mlIsEnabled = async () => true;
const mlIsNotEnabled = async () => false;
const callWithRequestNonUpgrade = ((async () => ({
upgrade_mode: false,
})) as unknown) as LegacyAPICaller;
const callWithRequestUpgrade = ((async () => ({
upgrade_mode: true,
})) as unknown) as LegacyAPICaller;
const mlClusterClientNonUpgrade = ({
callAsInternalUser: async () => ({
upgrade_mode: false,
}),
} as unknown) as ILegacyScopedClusterClient;
const mlClusterClientUpgrade = ({
callAsInternalUser: async () => ({
upgrade_mode: true,
}),
} as unknown) as ILegacyScopedClusterClient;
describe('check_capabilities', () => {
describe('getCapabilities() - right number of capabilities', () => {
test('kibana capabilities count', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestNonUpgrade,
mlClusterClientNonUpgrade,
getAdminCapabilities(),
mlLicense,
mlIsEnabled
@ -49,7 +54,7 @@ describe('check_capabilities', () => {
describe('getCapabilities() with security', () => {
test('ml_user capabilities only', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestNonUpgrade,
mlClusterClientNonUpgrade,
getUserCapabilities(),
mlLicense,
mlIsEnabled
@ -98,7 +103,7 @@ describe('check_capabilities', () => {
test('full capabilities', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestNonUpgrade,
mlClusterClientNonUpgrade,
getAdminCapabilities(),
mlLicense,
mlIsEnabled
@ -147,7 +152,7 @@ describe('check_capabilities', () => {
test('upgrade in progress with full capabilities', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestUpgrade,
mlClusterClientUpgrade,
getAdminCapabilities(),
mlLicense,
mlIsEnabled
@ -196,7 +201,7 @@ describe('check_capabilities', () => {
test('upgrade in progress with partial capabilities', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestUpgrade,
mlClusterClientUpgrade,
getUserCapabilities(),
mlLicense,
mlIsEnabled
@ -245,7 +250,7 @@ describe('check_capabilities', () => {
test('full capabilities, ml disabled in space', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestNonUpgrade,
mlClusterClientNonUpgrade,
getDefaultCapabilities(),
mlLicense,
mlIsNotEnabled
@ -295,7 +300,7 @@ describe('check_capabilities', () => {
test('full capabilities, basic license, ml disabled in space', async (done) => {
const { getCapabilities } = capabilitiesProvider(
callWithRequestNonUpgrade,
mlClusterClientNonUpgrade,
getDefaultCapabilities(),
mlLicenseBasic,
mlIsNotEnabled

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest } from 'kibana/server';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { mlLog } from '../../client/log';
import {
MlCapabilities,
@ -22,12 +22,12 @@ import {
} from './errors';
export function capabilitiesProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
capabilities: MlCapabilities,
mlLicense: MlLicense,
isMlEnabledInSpace: () => Promise<boolean>
) {
const { isUpgradeInProgress } = upgradeCheckProvider(callAsCurrentUser);
const { isUpgradeInProgress } = upgradeCheckProvider(mlClusterClient);
async function getCapabilities(): Promise<MlCapabilitiesResponse> {
const upgradeInProgress = await isUpgradeInProgress();
const isPlatinumOrTrialLicense = mlLicense.isFullLicense();

View file

@ -4,14 +4,14 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { mlLog } from '../../client/log';
export function upgradeCheckProvider(callAsCurrentUser: LegacyAPICaller) {
export function upgradeCheckProvider({ callAsInternalUser }: ILegacyScopedClusterClient) {
async function isUpgradeInProgress(): Promise<boolean> {
let upgradeInProgress = false;
try {
const info = await callAsCurrentUser('ml.info');
const info = await callAsInternalUser('ml.info');
// if ml indices are currently being migrated, upgrade_mode will be set to true
// pass this back with the privileges to allow for the disabling of UI controls.
upgradeInProgress = info.upgrade_mode === true;

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { mlLog } from '../../client/log';
import {
@ -17,7 +17,9 @@ import {
// - ML_ANNOTATIONS_INDEX_PATTERN index is present
// - ML_ANNOTATIONS_INDEX_ALIAS_READ alias is present
// - ML_ANNOTATIONS_INDEX_ALIAS_WRITE alias is present
export async function isAnnotationsFeatureAvailable(callAsCurrentUser: LegacyAPICaller) {
export async function isAnnotationsFeatureAvailable({
callAsCurrentUser,
}: ILegacyScopedClusterClient) {
try {
const indexParams = { index: ML_ANNOTATIONS_INDEX_PATTERN };

View file

@ -0,0 +1,13 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { KibanaRequest } from 'kibana/server';
export function getAuthorizationHeader(request: KibanaRequest) {
return {
headers: { 'es-secondary-authorization': request.headers.authorization },
};
}

View file

@ -6,7 +6,6 @@
import getAnnotationsRequestMock from './__mocks__/get_annotations_request.json';
import getAnnotationsResponseMock from './__mocks__/get_annotations_response.json';
import { LegacyAPICaller } from 'kibana/server';
import { ANNOTATION_TYPE } from '../../../common/constants/annotations';
import { ML_ANNOTATIONS_INDEX_ALIAS_WRITE } from '../../../common/constants/index_patterns';
@ -20,10 +19,10 @@ const acknowledgedResponseMock = { acknowledged: true };
const jobIdMock = 'jobIdMock';
describe('annotation_service', () => {
let callWithRequestSpy: any;
let mlClusterClientSpy = {} as any;
beforeEach(() => {
callWithRequestSpy = (jest.fn((action: string) => {
const callAs = jest.fn((action: string) => {
switch (action) {
case 'delete':
case 'index':
@ -31,13 +30,18 @@ describe('annotation_service', () => {
case 'search':
return Promise.resolve(getAnnotationsResponseMock);
}
}) as unknown) as LegacyAPICaller;
});
mlClusterClientSpy = {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
});
describe('deleteAnnotation()', () => {
it('should delete annotation', async (done) => {
const { deleteAnnotation } = annotationServiceProvider(callWithRequestSpy);
const mockFunct = callWithRequestSpy;
const { deleteAnnotation } = annotationServiceProvider(mlClusterClientSpy);
const mockFunct = mlClusterClientSpy;
const annotationMockId = 'mockId';
const deleteParamsMock: DeleteParams = {
@ -48,8 +52,8 @@ describe('annotation_service', () => {
const response = await deleteAnnotation(annotationMockId);
expect(mockFunct.mock.calls[0][0]).toBe('delete');
expect(mockFunct.mock.calls[0][1]).toEqual(deleteParamsMock);
expect(mockFunct.callAsCurrentUser.mock.calls[0][0]).toBe('delete');
expect(mockFunct.callAsCurrentUser.mock.calls[0][1]).toEqual(deleteParamsMock);
expect(response).toBe(acknowledgedResponseMock);
done();
});
@ -57,8 +61,8 @@ describe('annotation_service', () => {
describe('getAnnotation()', () => {
it('should get annotations for specific job', async (done) => {
const { getAnnotations } = annotationServiceProvider(callWithRequestSpy);
const mockFunct = callWithRequestSpy;
const { getAnnotations } = annotationServiceProvider(mlClusterClientSpy);
const mockFunct = mlClusterClientSpy;
const indexAnnotationArgsMock: IndexAnnotationArgs = {
jobIds: [jobIdMock],
@ -69,8 +73,8 @@ describe('annotation_service', () => {
const response: GetResponse = await getAnnotations(indexAnnotationArgsMock);
expect(mockFunct.mock.calls[0][0]).toBe('search');
expect(mockFunct.mock.calls[0][1]).toEqual(getAnnotationsRequestMock);
expect(mockFunct.callAsCurrentUser.mock.calls[0][0]).toBe('search');
expect(mockFunct.callAsCurrentUser.mock.calls[0][1]).toEqual(getAnnotationsRequestMock);
expect(Object.keys(response.annotations)).toHaveLength(1);
expect(response.annotations[jobIdMock]).toHaveLength(2);
expect(isAnnotations(response.annotations[jobIdMock])).toBeTruthy();
@ -84,11 +88,13 @@ describe('annotation_service', () => {
message: 'mock error message',
};
const callWithRequestSpyError = (jest.fn(() => {
return Promise.resolve(mockEsError);
}) as unknown) as LegacyAPICaller;
const mlClusterClientSpyError: any = {
callAsCurrentUser: jest.fn(() => {
return Promise.resolve(mockEsError);
}),
};
const { getAnnotations } = annotationServiceProvider(callWithRequestSpyError);
const { getAnnotations } = annotationServiceProvider(mlClusterClientSpyError);
const indexAnnotationArgsMock: IndexAnnotationArgs = {
jobIds: [jobIdMock],
@ -105,8 +111,8 @@ describe('annotation_service', () => {
describe('indexAnnotation()', () => {
it('should index annotation', async (done) => {
const { indexAnnotation } = annotationServiceProvider(callWithRequestSpy);
const mockFunct = callWithRequestSpy;
const { indexAnnotation } = annotationServiceProvider(mlClusterClientSpy);
const mockFunct = mlClusterClientSpy;
const annotationMock: Annotation = {
annotation: 'Annotation text',
@ -118,10 +124,10 @@ describe('annotation_service', () => {
const response = await indexAnnotation(annotationMock, usernameMock);
expect(mockFunct.mock.calls[0][0]).toBe('index');
expect(mockFunct.callAsCurrentUser.mock.calls[0][0]).toBe('index');
// test if the annotation has been correctly augmented
const indexParamsCheck = mockFunct.mock.calls[0][1];
const indexParamsCheck = mockFunct.callAsCurrentUser.mock.calls[0][1];
const annotation = indexParamsCheck.body;
expect(annotation.create_username).toBe(usernameMock);
expect(annotation.modified_username).toBe(usernameMock);
@ -133,8 +139,8 @@ describe('annotation_service', () => {
});
it('should remove ._id and .key before updating annotation', async (done) => {
const { indexAnnotation } = annotationServiceProvider(callWithRequestSpy);
const mockFunct = callWithRequestSpy;
const { indexAnnotation } = annotationServiceProvider(mlClusterClientSpy);
const mockFunct = mlClusterClientSpy;
const annotationMock: Annotation = {
_id: 'mockId',
@ -148,10 +154,10 @@ describe('annotation_service', () => {
const response = await indexAnnotation(annotationMock, usernameMock);
expect(mockFunct.mock.calls[0][0]).toBe('index');
expect(mockFunct.callAsCurrentUser.mock.calls[0][0]).toBe('index');
// test if the annotation has been correctly augmented
const indexParamsCheck = mockFunct.mock.calls[0][1];
const indexParamsCheck = mockFunct.callAsCurrentUser.mock.calls[0][1];
const annotation = indexParamsCheck.body;
expect(annotation.create_username).toBe(usernameMock);
expect(annotation.modified_username).toBe(usernameMock);
@ -165,8 +171,8 @@ describe('annotation_service', () => {
});
it('should update annotation text and the username for modified_username', async (done) => {
const { getAnnotations, indexAnnotation } = annotationServiceProvider(callWithRequestSpy);
const mockFunct = callWithRequestSpy;
const { getAnnotations, indexAnnotation } = annotationServiceProvider(mlClusterClientSpy);
const mockFunct = mlClusterClientSpy;
const indexAnnotationArgsMock: IndexAnnotationArgs = {
jobIds: [jobIdMock],
@ -190,9 +196,9 @@ describe('annotation_service', () => {
await indexAnnotation(annotation, modifiedUsernameMock);
expect(mockFunct.mock.calls[1][0]).toBe('index');
expect(mockFunct.callAsCurrentUser.mock.calls[1][0]).toBe('index');
// test if the annotation has been correctly updated
const indexParamsCheck = mockFunct.mock.calls[1][1];
const indexParamsCheck = mockFunct.callAsCurrentUser.mock.calls[1][1];
const modifiedAnnotation = indexParamsCheck.body;
expect(modifiedAnnotation.annotation).toBe(modifiedAnnotationText);
expect(modifiedAnnotation.create_username).toBe(originalUsernameMock);

View file

@ -6,7 +6,7 @@
import Boom from 'boom';
import _ from 'lodash';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ANNOTATION_TYPE } from '../../../common/constants/annotations';
import {
@ -61,14 +61,7 @@ export interface DeleteParams {
id: string;
}
type annotationProviderParams = DeleteParams | GetParams | IndexParams;
export type callWithRequestType = (
action: string,
params: annotationProviderParams
) => Promise<any>;
export function annotationProvider(callAsCurrentUser: LegacyAPICaller) {
export function annotationProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
async function indexAnnotation(annotation: Annotation, username: string) {
if (isAnnotation(annotation) === false) {
// No need to translate, this will not be exposed in the UI.

View file

@ -4,11 +4,11 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { annotationProvider } from './annotation';
export function annotationServiceProvider(callAsCurrentUser: LegacyAPICaller) {
export function annotationServiceProvider(mlClusterClient: ILegacyScopedClusterClient) {
return {
...annotationProvider(callAsCurrentUser),
...annotationProvider(mlClusterClient),
};
}

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ES_AGGREGATION } from '../../../common/constants/aggregation_types';
export interface BucketSpanEstimatorData {
@ -20,8 +20,7 @@ export interface BucketSpanEstimatorData {
timeField: string | undefined;
}
export function estimateBucketSpanFactory(
callAsCurrentUser: LegacyAPICaller,
callAsInternalUser: LegacyAPICaller,
isSecurityDisabled: boolean
): (config: BucketSpanEstimatorData) => Promise<any>;
export function estimateBucketSpanFactory({
callAsCurrentUser,
callAsInternalUser,
}: ILegacyScopedClusterClient): (config: BucketSpanEstimatorData) => Promise<any>;

View file

@ -12,13 +12,10 @@ import { INTERVALS } from './intervals';
import { singleSeriesCheckerFactory } from './single_series_checker';
import { polledDataCheckerFactory } from './polled_data_checker';
export function estimateBucketSpanFactory(
callAsCurrentUser,
callAsInternalUser,
isSecurityDisabled
) {
const PolledDataChecker = polledDataCheckerFactory(callAsCurrentUser);
const SingleSeriesChecker = singleSeriesCheckerFactory(callAsCurrentUser);
export function estimateBucketSpanFactory(mlClusterClient) {
const { callAsCurrentUser, callAsInternalUser } = mlClusterClient;
const PolledDataChecker = polledDataCheckerFactory(mlClusterClient);
const SingleSeriesChecker = singleSeriesCheckerFactory(mlClusterClient);
class BucketSpanEstimator {
constructor(
@ -334,99 +331,65 @@ export function estimateBucketSpanFactory(
}
return new Promise((resolve, reject) => {
function getBucketSpanEstimation() {
// fetch the `search.max_buckets` cluster setting so we're able to
// adjust aggregations to not exceed that limit.
callAsInternalUser('cluster.getSettings', {
flatSettings: true,
includeDefaults: true,
filterPath: '*.*max_buckets',
// fetch the `search.max_buckets` cluster setting so we're able to
// adjust aggregations to not exceed that limit.
callAsInternalUser('cluster.getSettings', {
flatSettings: true,
includeDefaults: true,
filterPath: '*.*max_buckets',
})
.then((settings) => {
if (typeof settings !== 'object') {
reject('Unable to retrieve cluster settings');
}
// search.max_buckets could exist in default, persistent or transient cluster settings
const maxBucketsSetting = (settings.defaults ||
settings.persistent ||
settings.transient ||
{})['search.max_buckets'];
if (maxBucketsSetting === undefined) {
reject('Unable to retrieve cluster setting search.max_buckets');
}
const maxBuckets = parseInt(maxBucketsSetting);
const runEstimator = (splitFieldValues = []) => {
const bucketSpanEstimator = new BucketSpanEstimator(
formConfig,
splitFieldValues,
maxBuckets
);
bucketSpanEstimator
.run()
.then((resp) => {
resolve(resp);
})
.catch((resp) => {
reject(resp);
});
};
// a partition has been selected, so we need to load some field values to use in the
// bucket span tests.
if (formConfig.splitField !== undefined) {
getRandomFieldValues(formConfig.index, formConfig.splitField, formConfig.query)
.then((splitFieldValues) => {
runEstimator(splitFieldValues);
})
.catch((resp) => {
reject(resp);
});
} else {
// no partition field selected or we're in the single metric config
runEstimator();
}
})
.then((settings) => {
if (typeof settings !== 'object') {
reject('Unable to retrieve cluster settings');
}
// search.max_buckets could exist in default, persistent or transient cluster settings
const maxBucketsSetting = (settings.defaults ||
settings.persistent ||
settings.transient ||
{})['search.max_buckets'];
if (maxBucketsSetting === undefined) {
reject('Unable to retrieve cluster setting search.max_buckets');
}
const maxBuckets = parseInt(maxBucketsSetting);
const runEstimator = (splitFieldValues = []) => {
const bucketSpanEstimator = new BucketSpanEstimator(
formConfig,
splitFieldValues,
maxBuckets
);
bucketSpanEstimator
.run()
.then((resp) => {
resolve(resp);
})
.catch((resp) => {
reject(resp);
});
};
// a partition has been selected, so we need to load some field values to use in the
// bucket span tests.
if (formConfig.splitField !== undefined) {
getRandomFieldValues(formConfig.index, formConfig.splitField, formConfig.query)
.then((splitFieldValues) => {
runEstimator(splitFieldValues);
})
.catch((resp) => {
reject(resp);
});
} else {
// no partition field selected or we're in the single metric config
runEstimator();
}
})
.catch((resp) => {
reject(resp);
});
}
if (isSecurityDisabled) {
getBucketSpanEstimation();
} else {
// if security is enabled, check that the user has permission to
// view jobs before calling getBucketSpanEstimation.
// getBucketSpanEstimation calls the 'cluster.getSettings' endpoint as the internal user
// and so could give the user access to more information than
// they are entitled to.
const body = {
cluster: [
'cluster:monitor/xpack/ml/job/get',
'cluster:monitor/xpack/ml/job/stats/get',
'cluster:monitor/xpack/ml/datafeeds/get',
'cluster:monitor/xpack/ml/datafeeds/stats/get',
],
};
callAsCurrentUser('ml.privilegeCheck', { body })
.then((resp) => {
if (
resp.cluster['cluster:monitor/xpack/ml/job/get'] &&
resp.cluster['cluster:monitor/xpack/ml/job/stats/get'] &&
resp.cluster['cluster:monitor/xpack/ml/datafeeds/get'] &&
resp.cluster['cluster:monitor/xpack/ml/datafeeds/stats/get']
) {
getBucketSpanEstimation();
} else {
reject('Insufficient permissions to call bucket span estimation.');
}
})
.catch(reject);
}
.catch((resp) => {
reject(resp);
});
});
};
}

View file

@ -4,40 +4,21 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ES_AGGREGATION } from '../../../common/constants/aggregation_types';
import { estimateBucketSpanFactory, BucketSpanEstimatorData } from './bucket_span_estimator';
// Mock callWithRequest with the ability to simulate returning different
// permission settings. On each call using `ml.privilegeCheck` we retrieve
// the last value from `permissions` and pass that to one of the permission
// settings. The tests call `ml.privilegeCheck` two times, the first time
// sufficient permissions should be returned, the second time insufficient
// permissions.
const permissions = [false, true];
const callWithRequest: LegacyAPICaller = (method: string) => {
const callAs = () => {
return new Promise((resolve) => {
if (method === 'ml.privilegeCheck') {
resolve({
cluster: {
'cluster:monitor/xpack/ml/job/get': true,
'cluster:monitor/xpack/ml/job/stats/get': true,
'cluster:monitor/xpack/ml/datafeeds/get': true,
'cluster:monitor/xpack/ml/datafeeds/stats/get': permissions.pop(),
},
});
return;
}
resolve({});
}) as Promise<any>;
};
const callWithInternalUser: LegacyAPICaller = () => {
return new Promise((resolve) => {
resolve({});
}) as Promise<any>;
const mlClusterClient: ILegacyScopedClusterClient = {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
// mock configuration to be passed to the estimator
@ -59,17 +40,13 @@ const formConfig: BucketSpanEstimatorData = {
describe('ML - BucketSpanEstimator', () => {
it('call factory', () => {
expect(function () {
estimateBucketSpanFactory(callWithRequest, callWithInternalUser, false);
estimateBucketSpanFactory(mlClusterClient);
}).not.toThrow('Not initialized.');
});
it('call factory and estimator with security disabled', (done) => {
expect(function () {
const estimateBucketSpan = estimateBucketSpanFactory(
callWithRequest,
callWithInternalUser,
true
);
const estimateBucketSpan = estimateBucketSpanFactory(mlClusterClient);
estimateBucketSpan(formConfig).catch((catchData) => {
expect(catchData).toBe('Unable to retrieve cluster setting search.max_buckets');
@ -81,11 +58,7 @@ describe('ML - BucketSpanEstimator', () => {
it('call factory and estimator with security enabled.', (done) => {
expect(function () {
const estimateBucketSpan = estimateBucketSpanFactory(
callWithRequest,
callWithInternalUser,
false
);
const estimateBucketSpan = estimateBucketSpanFactory(mlClusterClient);
estimateBucketSpan(formConfig).catch((catchData) => {
expect(catchData).toBe('Unable to retrieve cluster setting search.max_buckets');

View file

@ -12,7 +12,7 @@
import _ from 'lodash';
export function polledDataCheckerFactory(callAsCurrentUser) {
export function polledDataCheckerFactory({ callAsCurrentUser }) {
class PolledDataChecker {
constructor(index, timeField, duration, query) {
this.index = index;

View file

@ -13,7 +13,7 @@
import { mlLog } from '../../client/log';
import { INTERVALS, LONG_INTERVALS } from './intervals';
export function singleSeriesCheckerFactory(callAsCurrentUser) {
export function singleSeriesCheckerFactory({ callAsCurrentUser }) {
const REF_DATA_INTERVAL = { name: '1h', ms: 3600000 };
class SingleSeriesChecker {

View file

@ -5,7 +5,7 @@
*/
import numeral from '@elastic/numeral';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { MLCATEGORY } from '../../../common/constants/field_types';
import { AnalysisConfig } from '../../../common/types/anomaly_detection_jobs';
import { fieldsServiceProvider } from '../fields_service';
@ -36,8 +36,8 @@ export interface ModelMemoryEstimate {
/**
* Retrieves overall and max bucket cardinalities.
*/
const cardinalityCheckProvider = (callAsCurrentUser: LegacyAPICaller) => {
const fieldsService = fieldsServiceProvider(callAsCurrentUser);
const cardinalityCheckProvider = (mlClusterClient: ILegacyScopedClusterClient) => {
const fieldsService = fieldsServiceProvider(mlClusterClient);
return async (
analysisConfig: AnalysisConfig,
@ -123,8 +123,9 @@ const cardinalityCheckProvider = (callAsCurrentUser: LegacyAPICaller) => {
};
};
export function calculateModelMemoryLimitProvider(callAsCurrentUser: LegacyAPICaller) {
const getCardinalities = cardinalityCheckProvider(callAsCurrentUser);
export function calculateModelMemoryLimitProvider(mlClusterClient: ILegacyScopedClusterClient) {
const { callAsInternalUser } = mlClusterClient;
const getCardinalities = cardinalityCheckProvider(mlClusterClient);
/**
* Retrieves an estimated size of the model memory limit used in the job config
@ -140,7 +141,7 @@ export function calculateModelMemoryLimitProvider(callAsCurrentUser: LegacyAPICa
latestMs: number,
allowMMLGreaterThanMax = false
): Promise<ModelMemoryEstimationResult> {
const info = await callAsCurrentUser<MlInfoResponse>('ml.info');
const info = (await callAsInternalUser('ml.info')) as MlInfoResponse;
const maxModelMemoryLimit = info.limits.max_model_memory_limit?.toUpperCase();
const effectiveMaxModelMemoryLimit = info.limits.effective_max_model_memory_limit?.toUpperCase();
@ -153,28 +154,26 @@ export function calculateModelMemoryLimitProvider(callAsCurrentUser: LegacyAPICa
latestMs
);
const estimatedModelMemoryLimit = (
await callAsCurrentUser<ModelMemoryEstimate>('ml.estimateModelMemory', {
body: {
analysis_config: analysisConfig,
overall_cardinality: overallCardinality,
max_bucket_cardinality: maxBucketCardinality,
},
})
).model_memory_estimate.toUpperCase();
const estimatedModelMemoryLimit = ((await callAsInternalUser('ml.estimateModelMemory', {
body: {
analysis_config: analysisConfig,
overall_cardinality: overallCardinality,
max_bucket_cardinality: maxBucketCardinality,
},
})) as ModelMemoryEstimate).model_memory_estimate.toUpperCase();
let modelMemoryLimit = estimatedModelMemoryLimit;
let mmlCappedAtMax = false;
// if max_model_memory_limit has been set,
// make sure the estimated value is not greater than it.
if (allowMMLGreaterThanMax === false) {
// @ts-ignore
// @ts-expect-error
const mmlBytes = numeral(estimatedModelMemoryLimit).value();
if (maxModelMemoryLimit !== undefined) {
// @ts-ignore
// @ts-expect-error
const maxBytes = numeral(maxModelMemoryLimit).value();
if (mmlBytes > maxBytes) {
// @ts-ignore
// @ts-expect-error
modelMemoryLimit = `${Math.floor(maxBytes / numeral('1MB').value())}MB`;
mmlCappedAtMax = true;
}
@ -183,10 +182,10 @@ export function calculateModelMemoryLimitProvider(callAsCurrentUser: LegacyAPICa
// if we've not already capped the estimated mml at the hard max server setting
// ensure that the estimated mml isn't greater than the effective max mml
if (mmlCappedAtMax === false && effectiveMaxModelMemoryLimit !== undefined) {
// @ts-ignore
// @ts-expect-error
const effectiveMaxMmlBytes = numeral(effectiveMaxModelMemoryLimit).value();
if (mmlBytes > effectiveMaxMmlBytes) {
// @ts-ignore
// @ts-expect-error
modelMemoryLimit = `${Math.floor(effectiveMaxMmlBytes / numeral('1MB').value())}MB`;
}
}

View file

@ -5,7 +5,7 @@
*/
import { difference } from 'lodash';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { EventManager, CalendarEvent } from './event_manager';
interface BasicCalendar {
@ -23,16 +23,16 @@ export interface FormCalendar extends BasicCalendar {
}
export class CalendarManager {
private _callAsCurrentUser: LegacyAPICaller;
private _callAsInternalUser: ILegacyScopedClusterClient['callAsInternalUser'];
private _eventManager: EventManager;
constructor(callAsCurrentUser: LegacyAPICaller) {
this._callAsCurrentUser = callAsCurrentUser;
this._eventManager = new EventManager(callAsCurrentUser);
constructor(mlClusterClient: ILegacyScopedClusterClient) {
this._callAsInternalUser = mlClusterClient.callAsInternalUser;
this._eventManager = new EventManager(mlClusterClient);
}
async getCalendar(calendarId: string) {
const resp = await this._callAsCurrentUser('ml.calendars', {
const resp = await this._callAsInternalUser('ml.calendars', {
calendarId,
});
@ -43,7 +43,7 @@ export class CalendarManager {
}
async getAllCalendars() {
const calendarsResp = await this._callAsCurrentUser('ml.calendars');
const calendarsResp = await this._callAsInternalUser('ml.calendars');
const events: CalendarEvent[] = await this._eventManager.getAllEvents();
const calendars: Calendar[] = calendarsResp.calendars;
@ -74,7 +74,7 @@ export class CalendarManager {
const events = calendar.events;
delete calendar.calendarId;
delete calendar.events;
await this._callAsCurrentUser('ml.addCalendar', {
await this._callAsInternalUser('ml.addCalendar', {
calendarId,
body: calendar,
});
@ -109,7 +109,7 @@ export class CalendarManager {
// add all new jobs
if (jobsToAdd.length) {
await this._callAsCurrentUser('ml.addJobToCalendar', {
await this._callAsInternalUser('ml.addJobToCalendar', {
calendarId,
jobId: jobsToAdd.join(','),
});
@ -117,7 +117,7 @@ export class CalendarManager {
// remove all removed jobs
if (jobsToRemove.length) {
await this._callAsCurrentUser('ml.removeJobFromCalendar', {
await this._callAsInternalUser('ml.removeJobFromCalendar', {
calendarId,
jobId: jobsToRemove.join(','),
});
@ -140,6 +140,6 @@ export class CalendarManager {
}
async deleteCalendar(calendarId: string) {
return this._callAsCurrentUser('ml.deleteCalendar', { calendarId });
return this._callAsInternalUser('ml.deleteCalendar', { calendarId });
}
}

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { GLOBAL_CALENDAR } from '../../../common/constants/calendars';
export interface CalendarEvent {
@ -16,10 +16,13 @@ export interface CalendarEvent {
}
export class EventManager {
constructor(private _callAsCurrentUser: LegacyAPICaller) {}
private _callAsInternalUser: ILegacyScopedClusterClient['callAsInternalUser'];
constructor({ callAsInternalUser }: ILegacyScopedClusterClient) {
this._callAsInternalUser = callAsInternalUser;
}
async getCalendarEvents(calendarId: string) {
const resp = await this._callAsCurrentUser('ml.events', { calendarId });
const resp = await this._callAsInternalUser('ml.events', { calendarId });
return resp.events;
}
@ -27,7 +30,7 @@ export class EventManager {
// jobId is optional
async getAllEvents(jobId?: string) {
const calendarId = GLOBAL_CALENDAR;
const resp = await this._callAsCurrentUser('ml.events', {
const resp = await this._callAsInternalUser('ml.events', {
calendarId,
jobId,
});
@ -38,14 +41,14 @@ export class EventManager {
async addEvents(calendarId: string, events: CalendarEvent[]) {
const body = { events };
return await this._callAsCurrentUser('ml.addEvent', {
return await this._callAsInternalUser('ml.addEvent', {
calendarId,
body,
});
}
async deleteEvent(calendarId: string, eventId: string) {
return this._callAsCurrentUser('ml.deleteEvent', { calendarId, eventId });
return this._callAsInternalUser('ml.deleteEvent', { calendarId, eventId });
}
isEqual(ev1: CalendarEvent, ev2: CalendarEvent) {

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { callWithRequestType } from '../../../common/types/kibana';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ML_NOTIFICATION_INDEX_PATTERN } from '../../../common/constants/index_patterns';
import { JobMessage } from '../../../common/types/audit_message';
@ -23,7 +23,7 @@ interface BoolQuery {
bool: { [key: string]: any };
}
export function analyticsAuditMessagesProvider(callWithRequest: callWithRequestType) {
export function analyticsAuditMessagesProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
// search for audit messages,
// analyticsId is optional. without it, all analytics will be listed.
async function getAnalyticsAuditMessages(analyticsId: string) {
@ -69,7 +69,7 @@ export function analyticsAuditMessagesProvider(callWithRequest: callWithRequestT
}
try {
const resp = await callWithRequest('search', {
const resp = await callAsCurrentUser('search', {
index: ML_NOTIFICATION_INDEX_PATTERN,
ignore_unavailable: true,
rest_total_hits_as_int: true,

View file

@ -4,17 +4,18 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, SavedObjectsClientContract } from 'kibana/server';
import { SavedObjectsClientContract, KibanaRequest } from 'kibana/server';
import { Module } from '../../../common/types/modules';
import { DataRecognizer } from '../data_recognizer';
describe('ML - data recognizer', () => {
const dr = new DataRecognizer(
jest.fn() as LegacyAPICaller,
{ callAsCurrentUser: jest.fn(), callAsInternalUser: jest.fn() },
({
find: jest.fn(),
bulkCreate: jest.fn(),
} as never) as SavedObjectsClientContract
} as unknown) as SavedObjectsClientContract,
{ headers: { authorization: '' } } as KibanaRequest
);
describe('jobOverrides', () => {

View file

@ -7,11 +7,16 @@
import fs from 'fs';
import Boom from 'boom';
import numeral from '@elastic/numeral';
import { LegacyAPICaller, SavedObjectsClientContract } from 'kibana/server';
import {
KibanaRequest,
ILegacyScopedClusterClient,
SavedObjectsClientContract,
} from 'kibana/server';
import moment from 'moment';
import { IndexPatternAttributes } from 'src/plugins/data/server';
import { merge } from 'lodash';
import { AnalysisLimits, CombinedJobWithStats } from '../../../common/types/anomaly_detection_jobs';
import { getAuthorizationHeader } from '../../lib/request_authorization';
import { MlInfoResponse } from '../../../common/types/ml_server_info';
import {
KibanaObjects,
@ -104,18 +109,28 @@ interface SaveResults {
}
export class DataRecognizer {
modulesDir = `${__dirname}/modules`;
indexPatternName: string = '';
indexPatternId: string | undefined = undefined;
private _callAsCurrentUser: ILegacyScopedClusterClient['callAsCurrentUser'];
private _callAsInternalUser: ILegacyScopedClusterClient['callAsInternalUser'];
private _mlClusterClient: ILegacyScopedClusterClient;
private _authorizationHeader: object;
private _modulesDir = `${__dirname}/modules`;
private _indexPatternName: string = '';
private _indexPatternId: string | undefined = undefined;
/**
* List of the module jobs that require model memory estimation
*/
jobsForModelMemoryEstimation: Array<{ job: ModuleJob; query: any }> = [];
constructor(
private callAsCurrentUser: LegacyAPICaller,
private savedObjectsClient: SavedObjectsClientContract
) {}
mlClusterClient: ILegacyScopedClusterClient,
private savedObjectsClient: SavedObjectsClientContract,
request: KibanaRequest
) {
this._mlClusterClient = mlClusterClient;
this._callAsCurrentUser = mlClusterClient.callAsCurrentUser;
this._callAsInternalUser = mlClusterClient.callAsInternalUser;
this._authorizationHeader = getAuthorizationHeader(request);
}
// list all directories under the given directory
async listDirs(dirName: string): Promise<string[]> {
@ -150,12 +165,12 @@ export class DataRecognizer {
async loadManifestFiles(): Promise<Config[]> {
const configs: Config[] = [];
const dirs = await this.listDirs(this.modulesDir);
const dirs = await this.listDirs(this._modulesDir);
await Promise.all(
dirs.map(async (dir) => {
let file: string | undefined;
try {
file = await this.readFile(`${this.modulesDir}/${dir}/manifest.json`);
file = await this.readFile(`${this._modulesDir}/${dir}/manifest.json`);
} catch (error) {
mlLog.warn(`Data recognizer skipping folder ${dir} as manifest.json cannot be read`);
}
@ -204,7 +219,7 @@ export class DataRecognizer {
if (moduleConfig.logoFile) {
try {
logo = await this.readFile(
`${this.modulesDir}/${i.dirName}/${moduleConfig.logoFile}`
`${this._modulesDir}/${i.dirName}/${moduleConfig.logoFile}`
);
logo = JSON.parse(logo);
} catch (e) {
@ -236,7 +251,7 @@ export class DataRecognizer {
query: moduleConfig.query,
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
rest_total_hits_as_int: true,
size,
@ -281,7 +296,7 @@ export class DataRecognizer {
manifestJSON.jobs.map(async (job) => {
try {
const jobConfig = await this.readFile(
`${this.modulesDir}/${dirName}/${ML_DIR}/${job.file}`
`${this._modulesDir}/${dirName}/${ML_DIR}/${job.file}`
);
// use the file name for the id
jobs.push({
@ -301,7 +316,7 @@ export class DataRecognizer {
manifestJSON.datafeeds.map(async (datafeed) => {
try {
const datafeedConfig = await this.readFile(
`${this.modulesDir}/${dirName}/${ML_DIR}/${datafeed.file}`
`${this._modulesDir}/${dirName}/${ML_DIR}/${datafeed.file}`
);
const config = JSON.parse(datafeedConfig);
// use the job id from the manifestFile
@ -329,7 +344,7 @@ export class DataRecognizer {
manifestJSON!.kibana[key].map(async (obj) => {
try {
const kConfig = await this.readFile(
`${this.modulesDir}/${dirName}/${KIBANA_DIR}/${key}/${obj.file}`
`${this._modulesDir}/${dirName}/${KIBANA_DIR}/${key}/${obj.file}`
);
// use the file name for the id
const kId = obj.file.replace('.json', '');
@ -385,26 +400,26 @@ export class DataRecognizer {
);
}
this.indexPatternName =
this._indexPatternName =
indexPatternName === undefined ? moduleConfig.defaultIndexPattern : indexPatternName;
this.indexPatternId = await this.getIndexPatternId(this.indexPatternName);
this._indexPatternId = await this.getIndexPatternId(this._indexPatternName);
// the module's jobs contain custom URLs which require an index patten id
// but there is no corresponding index pattern, throw an error
if (this.indexPatternId === undefined && this.doJobUrlsContainIndexPatternId(moduleConfig)) {
if (this._indexPatternId === undefined && this.doJobUrlsContainIndexPatternId(moduleConfig)) {
throw Boom.badRequest(
`Module's jobs contain custom URLs which require a kibana index pattern (${this.indexPatternName}) which cannot be found.`
`Module's jobs contain custom URLs which require a kibana index pattern (${this._indexPatternName}) which cannot be found.`
);
}
// the module's saved objects require an index patten id
// but there is no corresponding index pattern, throw an error
if (
this.indexPatternId === undefined &&
this._indexPatternId === undefined &&
this.doSavedObjectsContainIndexPatternId(moduleConfig)
) {
throw Boom.badRequest(
`Module's saved objects contain custom URLs which require a kibana index pattern (${this.indexPatternName}) which cannot be found.`
`Module's saved objects contain custom URLs which require a kibana index pattern (${this._indexPatternName}) which cannot be found.`
);
}
@ -495,7 +510,7 @@ export class DataRecognizer {
// Add a wildcard at the front of each of the job IDs in the module,
// as a prefix may have been supplied when creating the jobs in the module.
const jobIds = module.jobs.map((job) => `*${job.id}`);
const { jobsExist } = jobServiceProvider(this.callAsCurrentUser);
const { jobsExist } = jobServiceProvider(this._mlClusterClient);
const jobInfo = await jobsExist(jobIds);
// Check if the value for any of the jobs is false.
@ -504,11 +519,13 @@ export class DataRecognizer {
if (doJobsExist === true) {
// Get the IDs of the jobs created from the module, and their earliest / latest timestamps.
const jobStats: MlJobStats = await this.callAsCurrentUser('ml.jobStats', { jobId: jobIds });
const jobStats: MlJobStats = await this._callAsInternalUser('ml.jobStats', {
jobId: jobIds,
});
const jobStatsJobs: JobStat[] = [];
if (jobStats.jobs && jobStats.jobs.length > 0) {
const foundJobIds = jobStats.jobs.map((job) => job.job_id);
const { getLatestBucketTimestampByJob } = resultsServiceProvider(this.callAsCurrentUser);
const { getLatestBucketTimestampByJob } = resultsServiceProvider(this._mlClusterClient);
const latestBucketTimestampsByJob = await getLatestBucketTimestampByJob(foundJobIds);
jobStats.jobs.forEach((job) => {
@ -669,7 +686,7 @@ export class DataRecognizer {
async saveJob(job: ModuleJob) {
const { id: jobId, config: body } = job;
return this.callAsCurrentUser('ml.addJob', { jobId, body });
return this._callAsInternalUser('ml.addJob', { jobId, body });
}
// save the datafeeds.
@ -690,7 +707,11 @@ export class DataRecognizer {
async saveDatafeed(datafeed: ModuleDataFeed) {
const { id: datafeedId, config: body } = datafeed;
return this.callAsCurrentUser('ml.addDatafeed', { datafeedId, body });
return this._callAsInternalUser('ml.addDatafeed', {
datafeedId,
body,
...this._authorizationHeader,
});
}
async startDatafeeds(
@ -713,7 +734,7 @@ export class DataRecognizer {
const result = { started: false } as DatafeedResponse;
let opened = false;
try {
const openResult = await this.callAsCurrentUser('ml.openJob', {
const openResult = await this._callAsInternalUser('ml.openJob', {
jobId: datafeed.config.job_id,
});
opened = openResult.opened;
@ -737,7 +758,10 @@ export class DataRecognizer {
duration.end = end;
}
await this.callAsCurrentUser('ml.startDatafeed', { datafeedId: datafeed.id, ...duration });
await this._callAsInternalUser('ml.startDatafeed', {
datafeedId: datafeed.id,
...duration,
});
result.started = true;
} catch (error) {
result.started = false;
@ -838,7 +862,7 @@ export class DataRecognizer {
updateDatafeedIndices(moduleConfig: Module) {
// if the supplied index pattern contains a comma, split into multiple indices and
// add each one to the datafeed
const indexPatternNames = splitIndexPatternNames(this.indexPatternName);
const indexPatternNames = splitIndexPatternNames(this._indexPatternName);
moduleConfig.datafeeds.forEach((df) => {
const newIndices: string[] = [];
@ -876,7 +900,7 @@ export class DataRecognizer {
if (url.match(INDEX_PATTERN_ID)) {
const newUrl = url.replace(
new RegExp(INDEX_PATTERN_ID, 'g'),
this.indexPatternId as string
this._indexPatternId as string
);
// update the job's url
cUrl.url_value = newUrl;
@ -915,7 +939,7 @@ export class DataRecognizer {
if (jsonString.match(INDEX_PATTERN_ID)) {
jsonString = jsonString.replace(
new RegExp(INDEX_PATTERN_ID, 'g'),
this.indexPatternId as string
this._indexPatternId as string
);
item.config.kibanaSavedObjectMeta!.searchSourceJSON = jsonString;
}
@ -927,7 +951,7 @@ export class DataRecognizer {
if (visStateString !== undefined && visStateString.match(INDEX_PATTERN_NAME)) {
visStateString = visStateString.replace(
new RegExp(INDEX_PATTERN_NAME, 'g'),
this.indexPatternName
this._indexPatternName
);
item.config.visState = visStateString;
}
@ -944,10 +968,10 @@ export class DataRecognizer {
timeField: string,
query?: any
): Promise<{ start: number; end: number }> {
const fieldsService = fieldsServiceProvider(this.callAsCurrentUser);
const fieldsService = fieldsServiceProvider(this._mlClusterClient);
const timeFieldRange = await fieldsService.getTimeFieldRange(
this.indexPatternName,
this._indexPatternName,
timeField,
query
);
@ -974,7 +998,7 @@ export class DataRecognizer {
if (estimateMML && this.jobsForModelMemoryEstimation.length > 0) {
try {
const calculateModelMemoryLimit = calculateModelMemoryLimitProvider(this.callAsCurrentUser);
const calculateModelMemoryLimit = calculateModelMemoryLimitProvider(this._mlClusterClient);
// Checks if all jobs in the module have the same time field configured
const firstJobTimeField = this.jobsForModelMemoryEstimation[0].job.config.data_description
@ -1009,7 +1033,7 @@ export class DataRecognizer {
const { modelMemoryLimit } = await calculateModelMemoryLimit(
job.config.analysis_config,
this.indexPatternName,
this._indexPatternName,
query,
job.config.data_description.time_field,
earliestMs,
@ -1027,20 +1051,20 @@ export class DataRecognizer {
}
}
const { limits } = await this.callAsCurrentUser<MlInfoResponse>('ml.info');
const { limits } = (await this._callAsInternalUser('ml.info')) as MlInfoResponse;
const maxMml = limits.max_model_memory_limit;
if (!maxMml) {
return;
}
// @ts-ignore
// @ts-expect-error
const maxBytes: number = numeral(maxMml.toUpperCase()).value();
for (const job of moduleConfig.jobs) {
const mml = job.config?.analysis_limits?.model_memory_limit;
if (mml !== undefined) {
// @ts-ignore
// @ts-expect-error
const mmlBytes: number = numeral(mml.toUpperCase()).value();
if (mmlBytes > maxBytes) {
// if the job's mml is over the max,

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import _ from 'lodash';
import { KBN_FIELD_TYPES } from '../../../../../../src/plugins/data/server';
import { ML_JOB_FIELD_TYPES } from '../../../common/constants/field_types';
@ -180,7 +180,7 @@ type BatchStats =
| FieldExamples;
const getAggIntervals = async (
callAsCurrentUser: LegacyAPICaller,
{ callAsCurrentUser }: ILegacyScopedClusterClient,
indexPatternTitle: string,
query: any,
fields: HistogramField[],
@ -238,14 +238,15 @@ const getAggIntervals = async (
// export for re-use by transforms plugin
export const getHistogramsForFields = async (
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
indexPatternTitle: string,
query: any,
fields: HistogramField[],
samplerShardSize: number
) => {
const { callAsCurrentUser } = mlClusterClient;
const aggIntervals = await getAggIntervals(
callAsCurrentUser,
mlClusterClient,
indexPatternTitle,
query,
fields,
@ -348,10 +349,12 @@ export const getHistogramsForFields = async (
};
export class DataVisualizer {
callAsCurrentUser: LegacyAPICaller;
private _mlClusterClient: ILegacyScopedClusterClient;
private _callAsCurrentUser: ILegacyScopedClusterClient['callAsCurrentUser'];
constructor(callAsCurrentUser: LegacyAPICaller) {
this.callAsCurrentUser = callAsCurrentUser;
constructor(mlClusterClient: ILegacyScopedClusterClient) {
this._callAsCurrentUser = mlClusterClient.callAsCurrentUser;
this._mlClusterClient = mlClusterClient;
}
// Obtains overall stats on the fields in the supplied index pattern, returning an object
@ -447,7 +450,7 @@ export class DataVisualizer {
samplerShardSize: number
): Promise<any> {
return await getHistogramsForFields(
this.callAsCurrentUser,
this._mlClusterClient,
indexPatternTitle,
query,
fields,
@ -626,7 +629,7 @@ export class DataVisualizer {
aggs: buildSamplerAggregation(aggs, samplerShardSize),
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
rest_total_hits_as_int: true,
size,
@ -693,7 +696,7 @@ export class DataVisualizer {
};
filterCriteria.push({ exists: { field } });
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
rest_total_hits_as_int: true,
size,
@ -735,7 +738,7 @@ export class DataVisualizer {
aggs,
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
size,
body,
@ -838,7 +841,7 @@ export class DataVisualizer {
aggs: buildSamplerAggregation(aggs, samplerShardSize),
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
size,
body,
@ -959,7 +962,7 @@ export class DataVisualizer {
aggs: buildSamplerAggregation(aggs, samplerShardSize),
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
size,
body,
@ -1033,7 +1036,7 @@ export class DataVisualizer {
aggs: buildSamplerAggregation(aggs, samplerShardSize),
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
size,
body,
@ -1100,7 +1103,7 @@ export class DataVisualizer {
aggs: buildSamplerAggregation(aggs, samplerShardSize),
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
size,
body,
@ -1162,7 +1165,7 @@ export class DataVisualizer {
},
};
const resp = await this.callAsCurrentUser('search', {
const resp = await this._callAsCurrentUser('search', {
index,
rest_total_hits_as_int: true,
size,

View file

@ -5,7 +5,7 @@
*/
import Boom from 'boom';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { duration } from 'moment';
import { parseInterval } from '../../../common/util/parse_interval';
import { initCardinalityFieldsCache } from './fields_aggs_cache';
@ -14,7 +14,7 @@ import { initCardinalityFieldsCache } from './fields_aggs_cache';
* Service for carrying out queries to obtain data
* specific to fields in Elasticsearch indices.
*/
export function fieldsServiceProvider(callAsCurrentUser: LegacyAPICaller) {
export function fieldsServiceProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
const fieldsAggsCache = initCardinalityFieldsCache();
/**

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import {
AnalysisResult,
FormattedOverrides,
@ -13,9 +13,9 @@ import {
export type InputData = any[];
export function fileDataVisualizerProvider(callAsCurrentUser: LegacyAPICaller) {
export function fileDataVisualizerProvider({ callAsInternalUser }: ILegacyScopedClusterClient) {
async function analyzeFile(data: any, overrides: any): Promise<AnalysisResult> {
const results = await callAsCurrentUser('ml.fileStructure', {
const results = await callAsInternalUser('ml.fileStructure', {
body: data,
...overrides,
});

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { INDEX_META_DATA_CREATED_BY } from '../../../common/constants/file_datavisualizer';
import {
ImportResponse,
@ -15,7 +15,7 @@ import {
} from '../../../common/types/file_datavisualizer';
import { InputData } from './file_data_visualizer';
export function importDataProvider(callAsCurrentUser: LegacyAPICaller) {
export function importDataProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
async function importData(
id: string,
index: string,

View file

@ -5,7 +5,7 @@
*/
import Boom from 'boom';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { DetectorRule, DetectorRuleScope } from '../../../common/types/detector_rules';
@ -58,14 +58,17 @@ interface PartialJob {
}
export class FilterManager {
constructor(private callAsCurrentUser: LegacyAPICaller) {}
private _callAsInternalUser: ILegacyScopedClusterClient['callAsInternalUser'];
constructor({ callAsInternalUser }: ILegacyScopedClusterClient) {
this._callAsInternalUser = callAsInternalUser;
}
async getFilter(filterId: string) {
try {
const [JOBS, FILTERS] = [0, 1];
const results = await Promise.all([
this.callAsCurrentUser('ml.jobs'),
this.callAsCurrentUser('ml.filters', { filterId }),
this._callAsInternalUser('ml.jobs'),
this._callAsInternalUser('ml.filters', { filterId }),
]);
if (results[FILTERS] && results[FILTERS].filters.length) {
@ -87,7 +90,7 @@ export class FilterManager {
async getAllFilters() {
try {
const filtersResp = await this.callAsCurrentUser('ml.filters');
const filtersResp = await this._callAsInternalUser('ml.filters');
return filtersResp.filters;
} catch (error) {
throw Boom.badRequest(error);
@ -98,8 +101,8 @@ export class FilterManager {
try {
const [JOBS, FILTERS] = [0, 1];
const results = await Promise.all([
this.callAsCurrentUser('ml.jobs'),
this.callAsCurrentUser('ml.filters'),
this._callAsInternalUser('ml.jobs'),
this._callAsInternalUser('ml.filters'),
]);
// Build a map of filter_ids against jobs and detectors using that filter.
@ -137,7 +140,7 @@ export class FilterManager {
delete filter.filterId;
try {
// Returns the newly created filter.
return await this.callAsCurrentUser('ml.addFilter', { filterId, body: filter });
return await this._callAsInternalUser('ml.addFilter', { filterId, body: filter });
} catch (error) {
throw Boom.badRequest(error);
}
@ -157,7 +160,7 @@ export class FilterManager {
}
// Returns the newly updated filter.
return await this.callAsCurrentUser('ml.updateFilter', {
return await this._callAsInternalUser('ml.updateFilter', {
filterId,
body,
});
@ -167,7 +170,7 @@ export class FilterManager {
}
async deleteFilter(filterId: string) {
return this.callAsCurrentUser('ml.deleteFilter', { filterId });
return this._callAsInternalUser('ml.deleteFilter', { filterId });
}
buildFiltersInUse(jobsList: PartialJob[]) {

View file

@ -4,10 +4,10 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
export function jobAuditMessagesProvider(
callAsCurrentUser: LegacyAPICaller
mlClusterClient: ILegacyScopedClusterClient
): {
getJobAuditMessages: (jobId?: string, from?: string) => any;
getAuditMessagesSummary: (jobIds?: string[]) => any;

View file

@ -34,14 +34,14 @@ const anomalyDetectorTypeFilter = {
},
};
export function jobAuditMessagesProvider(callAsCurrentUser) {
export function jobAuditMessagesProvider({ callAsCurrentUser, callAsInternalUser }) {
// search for audit messages,
// jobId is optional. without it, all jobs will be listed.
// from is optional and should be a string formatted in ES time units. e.g. 12h, 1d, 7d
async function getJobAuditMessages(jobId, from) {
let gte = null;
if (jobId !== undefined && from === undefined) {
const jobs = await callAsCurrentUser('ml.jobs', { jobId });
const jobs = await callAsInternalUser('ml.jobs', { jobId });
if (jobs.count > 0 && jobs.jobs !== undefined) {
gte = moment(jobs.jobs[0].create_time).valueOf();
}

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { i18n } from '@kbn/i18n';
import { JOB_STATE, DATAFEED_STATE } from '../../../common/constants/states';
import { fillResultsWithTimeouts, isRequestTimeout } from './error_utils';
@ -26,7 +26,7 @@ interface Results {
};
}
export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
export function datafeedsProvider({ callAsInternalUser }: ILegacyScopedClusterClient) {
async function forceStartDatafeeds(datafeedIds: string[], start?: number, end?: number) {
const jobIds = await getJobIdsByDatafeedId();
const doStartsCalled = datafeedIds.reduce((acc, cur) => {
@ -84,7 +84,7 @@ export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
async function openJob(jobId: string) {
let opened = false;
try {
const resp = await callAsCurrentUser('ml.openJob', { jobId });
const resp = await callAsInternalUser('ml.openJob', { jobId });
opened = resp.opened;
} catch (error) {
if (error.statusCode === 409) {
@ -97,7 +97,7 @@ export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
}
async function startDatafeed(datafeedId: string, start?: number, end?: number) {
return callAsCurrentUser('ml.startDatafeed', { datafeedId, start, end });
return callAsInternalUser('ml.startDatafeed', { datafeedId, start, end });
}
async function stopDatafeeds(datafeedIds: string[]) {
@ -105,7 +105,7 @@ export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
for (const datafeedId of datafeedIds) {
try {
results[datafeedId] = await callAsCurrentUser('ml.stopDatafeed', { datafeedId });
results[datafeedId] = await callAsInternalUser('ml.stopDatafeed', { datafeedId });
} catch (error) {
if (isRequestTimeout(error)) {
return fillResultsWithTimeouts(results, datafeedId, datafeedIds, DATAFEED_STATE.STOPPED);
@ -117,11 +117,11 @@ export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
}
async function forceDeleteDatafeed(datafeedId: string) {
return callAsCurrentUser('ml.deleteDatafeed', { datafeedId, force: true });
return callAsInternalUser('ml.deleteDatafeed', { datafeedId, force: true });
}
async function getDatafeedIdsByJobId() {
const { datafeeds } = await callAsCurrentUser<MlDatafeedsResponse>('ml.datafeeds');
const { datafeeds } = (await callAsInternalUser('ml.datafeeds')) as MlDatafeedsResponse;
return datafeeds.reduce((acc, cur) => {
acc[cur.job_id] = cur.datafeed_id;
return acc;
@ -129,7 +129,7 @@ export function datafeedsProvider(callAsCurrentUser: LegacyAPICaller) {
}
async function getJobIdsByDatafeedId() {
const { datafeeds } = await callAsCurrentUser<MlDatafeedsResponse>('ml.datafeeds');
const { datafeeds } = (await callAsInternalUser('ml.datafeeds')) as MlDatafeedsResponse;
return datafeeds.reduce((acc, cur) => {
acc[cur.datafeed_id] = cur.job_id;
return acc;

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { CalendarManager } from '../calendar';
import { GLOBAL_CALENDAR } from '../../../common/constants/calendars';
import { Job } from '../../../common/types/anomaly_detection_jobs';
@ -23,14 +23,15 @@ interface Results {
};
}
export function groupsProvider(callAsCurrentUser: LegacyAPICaller) {
const calMngr = new CalendarManager(callAsCurrentUser);
export function groupsProvider(mlClusterClient: ILegacyScopedClusterClient) {
const calMngr = new CalendarManager(mlClusterClient);
const { callAsInternalUser } = mlClusterClient;
async function getAllGroups() {
const groups: { [id: string]: Group } = {};
const jobIds: { [id: string]: undefined | null } = {};
const [{ jobs }, calendars] = await Promise.all([
callAsCurrentUser<MlJobsResponse>('ml.jobs'),
callAsInternalUser('ml.jobs') as Promise<MlJobsResponse>,
calMngr.getAllCalendars(),
]);
@ -79,7 +80,7 @@ export function groupsProvider(callAsCurrentUser: LegacyAPICaller) {
for (const job of jobs) {
const { job_id: jobId, groups } = job;
try {
await callAsCurrentUser('ml.updateJob', { jobId, body: { groups } });
await callAsInternalUser('ml.updateJob', { jobId, body: { groups } });
results[jobId] = { success: true };
} catch (error) {
results[jobId] = { success: false, error };

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { datafeedsProvider } from './datafeeds';
import { jobsProvider } from './jobs';
import { groupsProvider } from './groups';
@ -12,14 +12,14 @@ import { newJobCapsProvider } from './new_job_caps';
import { newJobChartsProvider, topCategoriesProvider } from './new_job';
import { modelSnapshotProvider } from './model_snapshots';
export function jobServiceProvider(callAsCurrentUser: LegacyAPICaller) {
export function jobServiceProvider(mlClusterClient: ILegacyScopedClusterClient) {
return {
...datafeedsProvider(callAsCurrentUser),
...jobsProvider(callAsCurrentUser),
...groupsProvider(callAsCurrentUser),
...newJobCapsProvider(callAsCurrentUser),
...newJobChartsProvider(callAsCurrentUser),
...topCategoriesProvider(callAsCurrentUser),
...modelSnapshotProvider(callAsCurrentUser),
...datafeedsProvider(mlClusterClient),
...jobsProvider(mlClusterClient),
...groupsProvider(mlClusterClient),
...newJobCapsProvider(mlClusterClient),
...newJobChartsProvider(mlClusterClient),
...topCategoriesProvider(mlClusterClient),
...modelSnapshotProvider(mlClusterClient),
};
}

View file

@ -7,7 +7,7 @@
import { i18n } from '@kbn/i18n';
import { uniq } from 'lodash';
import Boom from 'boom';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { JOB_STATE, DATAFEED_STATE } from '../../../common/constants/states';
import {
MlSummaryJob,
@ -46,14 +46,16 @@ interface Results {
};
}
export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
const { forceDeleteDatafeed, getDatafeedIdsByJobId } = datafeedsProvider(callAsCurrentUser);
const { getAuditMessagesSummary } = jobAuditMessagesProvider(callAsCurrentUser);
const { getLatestBucketTimestampByJob } = resultsServiceProvider(callAsCurrentUser);
const calMngr = new CalendarManager(callAsCurrentUser);
export function jobsProvider(mlClusterClient: ILegacyScopedClusterClient) {
const { callAsCurrentUser, callAsInternalUser } = mlClusterClient;
const { forceDeleteDatafeed, getDatafeedIdsByJobId } = datafeedsProvider(mlClusterClient);
const { getAuditMessagesSummary } = jobAuditMessagesProvider(mlClusterClient);
const { getLatestBucketTimestampByJob } = resultsServiceProvider(mlClusterClient);
const calMngr = new CalendarManager(mlClusterClient);
async function forceDeleteJob(jobId: string) {
return callAsCurrentUser('ml.deleteJob', { jobId, force: true });
return callAsInternalUser('ml.deleteJob', { jobId, force: true });
}
async function deleteJobs(jobIds: string[]) {
@ -97,7 +99,7 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
const results: Results = {};
for (const jobId of jobIds) {
try {
await callAsCurrentUser('ml.closeJob', { jobId });
await callAsInternalUser('ml.closeJob', { jobId });
results[jobId] = { closed: true };
} catch (error) {
if (isRequestTimeout(error)) {
@ -113,7 +115,7 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
// if the job has failed we want to attempt a force close.
// however, if we received a 409 due to the datafeed being started we should not attempt a force close.
try {
await callAsCurrentUser('ml.closeJob', { jobId, force: true });
await callAsInternalUser('ml.closeJob', { jobId, force: true });
results[jobId] = { closed: true };
} catch (error2) {
if (isRequestTimeout(error)) {
@ -136,12 +138,12 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
throw Boom.notFound(`Cannot find datafeed for job ${jobId}`);
}
const dfResult = await callAsCurrentUser('ml.stopDatafeed', { datafeedId, force: true });
const dfResult = await callAsInternalUser('ml.stopDatafeed', { datafeedId, force: true });
if (!dfResult || dfResult.stopped !== true) {
return { success: false };
}
await callAsCurrentUser('ml.closeJob', { jobId, force: true });
await callAsInternalUser('ml.closeJob', { jobId, force: true });
return { success: true };
}
@ -257,13 +259,13 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
Promise<{ [id: string]: number | undefined }>
] = [
jobIds.length > 0
? callAsCurrentUser<MlJobsResponse>('ml.jobs', { jobId: jobIds }) // move length check in side call
: callAsCurrentUser<MlJobsResponse>('ml.jobs'),
? (callAsInternalUser('ml.jobs', { jobId: jobIds }) as Promise<MlJobsResponse>) // move length check in side call
: (callAsInternalUser('ml.jobs') as Promise<MlJobsResponse>),
jobIds.length > 0
? callAsCurrentUser<MlJobsStatsResponse>('ml.jobStats', { jobId: jobIds })
: callAsCurrentUser<MlJobsStatsResponse>('ml.jobStats'),
callAsCurrentUser<MlDatafeedsResponse>('ml.datafeeds'),
callAsCurrentUser<MlDatafeedsStatsResponse>('ml.datafeedStats'),
? (callAsInternalUser('ml.jobStats', { jobId: jobIds }) as Promise<MlJobsStatsResponse>)
: (callAsInternalUser('ml.jobStats') as Promise<MlJobsStatsResponse>),
callAsInternalUser('ml.datafeeds') as Promise<MlDatafeedsResponse>,
callAsInternalUser('ml.datafeedStats') as Promise<MlDatafeedsStatsResponse>,
calMngr.getAllCalendars(),
getLatestBucketTimestampByJob(),
];
@ -402,7 +404,7 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
} catch (e) {
// if the user doesn't have permission to load the task list,
// use the jobs list to get the ids of deleting jobs
const { jobs } = await callAsCurrentUser<MlJobsResponse>('ml.jobs');
const { jobs } = (await callAsInternalUser('ml.jobs')) as MlJobsResponse;
jobIds.push(...jobs.filter((j) => j.deleting === true).map((j) => j.job_id));
}
return { jobIds };
@ -413,9 +415,9 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
// e.g. *_low_request_rate_ecs
async function jobsExist(jobIds: string[] = []) {
// Get the list of job IDs.
const jobsInfo = await callAsCurrentUser<MlJobsResponse>('ml.jobs', {
const jobsInfo = (await callAsInternalUser('ml.jobs', {
jobId: jobIds,
});
})) as MlJobsResponse;
const results: { [id: string]: boolean } = {};
if (jobsInfo.count > 0) {
@ -438,8 +440,8 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
}
async function getAllJobAndGroupIds() {
const { getAllGroups } = groupsProvider(callAsCurrentUser);
const jobs = await callAsCurrentUser<MlJobsResponse>('ml.jobs');
const { getAllGroups } = groupsProvider(mlClusterClient);
const jobs = (await callAsInternalUser('ml.jobs')) as MlJobsResponse;
const jobIds = jobs.jobs.map((job) => job.job_id);
const groups = await getAllGroups();
const groupIds = groups.map((group) => group.id);
@ -453,7 +455,7 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
async function getLookBackProgress(jobId: string, start: number, end: number) {
const datafeedId = `datafeed-${jobId}`;
const [jobStats, isRunning] = await Promise.all([
callAsCurrentUser<MlJobsStatsResponse>('ml.jobStats', { jobId: [jobId] }),
callAsInternalUser('ml.jobStats', { jobId: [jobId] }) as Promise<MlJobsStatsResponse>,
isDatafeedRunning(datafeedId),
]);
@ -472,9 +474,9 @@ export function jobsProvider(callAsCurrentUser: LegacyAPICaller) {
}
async function isDatafeedRunning(datafeedId: string) {
const stats = await callAsCurrentUser<MlDatafeedsStatsResponse>('ml.datafeedStats', {
const stats = (await callAsInternalUser('ml.datafeedStats', {
datafeedId: [datafeedId],
});
})) as MlDatafeedsStatsResponse;
if (stats.datafeeds.length) {
const state = stats.datafeeds[0].state;
return (

View file

@ -6,10 +6,9 @@
import Boom from 'boom';
import { i18n } from '@kbn/i18n';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ModelSnapshot } from '../../../common/types/anomaly_detection_jobs';
import { datafeedsProvider, MlDatafeedsResponse } from './datafeeds';
import { MlJobsResponse } from './jobs';
import { datafeedsProvider } from './datafeeds';
import { FormCalendar, CalendarManager } from '../calendar';
export interface ModelSnapshotsResponse {
@ -20,8 +19,9 @@ export interface RevertModelSnapshotResponse {
model: ModelSnapshot;
}
export function modelSnapshotProvider(callAsCurrentUser: LegacyAPICaller) {
const { forceStartDatafeeds, getDatafeedIdsByJobId } = datafeedsProvider(callAsCurrentUser);
export function modelSnapshotProvider(mlClusterClient: ILegacyScopedClusterClient) {
const { callAsInternalUser } = mlClusterClient;
const { forceStartDatafeeds, getDatafeedIdsByJobId } = datafeedsProvider(mlClusterClient);
async function revertModelSnapshot(
jobId: string,
@ -33,12 +33,12 @@ export function modelSnapshotProvider(callAsCurrentUser: LegacyAPICaller) {
) {
let datafeedId = `datafeed-${jobId}`;
// ensure job exists
await callAsCurrentUser<MlJobsResponse>('ml.jobs', { jobId: [jobId] });
await callAsInternalUser('ml.jobs', { jobId: [jobId] });
try {
// ensure the datafeed exists
// the datafeed is probably called datafeed-<jobId>
await callAsCurrentUser<MlDatafeedsResponse>('ml.datafeeds', {
await callAsInternalUser('ml.datafeeds', {
datafeedId: [datafeedId],
});
} catch (e) {
@ -52,22 +52,19 @@ export function modelSnapshotProvider(callAsCurrentUser: LegacyAPICaller) {
}
// ensure the snapshot exists
const snapshot = await callAsCurrentUser<ModelSnapshotsResponse>('ml.modelSnapshots', {
const snapshot = (await callAsInternalUser('ml.modelSnapshots', {
jobId,
snapshotId,
});
})) as ModelSnapshotsResponse;
// apply the snapshot revert
const { model } = await callAsCurrentUser<RevertModelSnapshotResponse>(
'ml.revertModelSnapshot',
{
jobId,
snapshotId,
body: {
delete_intervening_results: deleteInterveningResults,
},
}
);
const { model } = (await callAsInternalUser('ml.revertModelSnapshot', {
jobId,
snapshotId,
body: {
delete_intervening_results: deleteInterveningResults,
},
})) as RevertModelSnapshotResponse;
// create calendar (if specified) and replay datafeed
if (replay && model.snapshot_id === snapshotId && snapshot.model_snapshots.length) {
@ -88,7 +85,7 @@ export function modelSnapshotProvider(callAsCurrentUser: LegacyAPICaller) {
end_time: s.end,
})),
};
const cm = new CalendarManager(callAsCurrentUser);
const cm = new CalendarManager(mlClusterClient);
await cm.newCalendar(calendar);
}

View file

@ -4,6 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { ILegacyScopedClusterClient } from 'kibana/server';
import { chunk } from 'lodash';
import { SearchResponse } from 'elasticsearch';
import { CATEGORY_EXAMPLES_SAMPLE_SIZE } from '../../../../../common/constants/categorization_job';
@ -12,15 +13,14 @@ import {
CategorizationAnalyzer,
CategoryFieldExample,
} from '../../../../../common/types/categories';
import { callWithRequestType } from '../../../../../common/types/kibana';
import { ValidationResults } from './validation_results';
const CHUNK_SIZE = 100;
export function categorizationExamplesProvider(
callWithRequest: callWithRequestType,
callWithInternalUser: callWithRequestType
) {
export function categorizationExamplesProvider({
callAsCurrentUser,
callAsInternalUser,
}: ILegacyScopedClusterClient) {
const validationResults = new ValidationResults();
async function categorizationExamples(
@ -57,7 +57,7 @@ export function categorizationExamplesProvider(
}
}
const results: SearchResponse<{ [id: string]: string }> = await callWithRequest('search', {
const results: SearchResponse<{ [id: string]: string }> = await callAsCurrentUser('search', {
index: indexPatternTitle,
size,
body: {
@ -112,7 +112,7 @@ export function categorizationExamplesProvider(
}
async function loadTokens(examples: string[], analyzer: CategorizationAnalyzer) {
const { tokens }: { tokens: Token[] } = await callWithInternalUser('indices.analyze', {
const { tokens }: { tokens: Token[] } = await callAsInternalUser('indices.analyze', {
body: {
...getAnalyzer(analyzer),
text: examples,

View file

@ -5,13 +5,13 @@
*/
import { SearchResponse } from 'elasticsearch';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ML_RESULTS_INDEX_PATTERN } from '../../../../../common/constants/index_patterns';
import { CategoryId, Category } from '../../../../../common/types/categories';
import { callWithRequestType } from '../../../../../common/types/kibana';
export function topCategoriesProvider(callWithRequest: callWithRequestType) {
export function topCategoriesProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
async function getTotalCategories(jobId: string): Promise<{ total: number }> {
const totalResp = await callWithRequest('search', {
const totalResp = await callAsCurrentUser('search', {
index: ML_RESULTS_INDEX_PATTERN,
size: 0,
body: {
@ -37,7 +37,7 @@ export function topCategoriesProvider(callWithRequest: callWithRequestType) {
}
async function getTopCategoryCounts(jobId: string, numberOfCategories: number) {
const top: SearchResponse<any> = await callWithRequest('search', {
const top: SearchResponse<any> = await callAsCurrentUser('search', {
index: ML_RESULTS_INDEX_PATTERN,
size: 0,
body: {
@ -99,7 +99,7 @@ export function topCategoriesProvider(callWithRequest: callWithRequestType) {
field: 'category_id',
},
};
const result: SearchResponse<any> = await callWithRequest('search', {
const result: SearchResponse<any> = await callAsCurrentUser('search', {
index: ML_RESULTS_INDEX_PATTERN,
size,
body: {

View file

@ -4,13 +4,13 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { ILegacyScopedClusterClient } from 'kibana/server';
import { newJobLineChartProvider } from './line_chart';
import { newJobPopulationChartProvider } from './population_chart';
import { callWithRequestType } from '../../../../common/types/kibana';
export function newJobChartsProvider(callWithRequest: callWithRequestType) {
const { newJobLineChart } = newJobLineChartProvider(callWithRequest);
const { newJobPopulationChart } = newJobPopulationChartProvider(callWithRequest);
export function newJobChartsProvider(mlClusterClient: ILegacyScopedClusterClient) {
const { newJobLineChart } = newJobLineChartProvider(mlClusterClient);
const { newJobPopulationChart } = newJobPopulationChartProvider(mlClusterClient);
return {
newJobLineChart,

View file

@ -5,8 +5,8 @@
*/
import { get } from 'lodash';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { AggFieldNamePair, EVENT_RATE_FIELD_ID } from '../../../../common/types/fields';
import { callWithRequestType } from '../../../../common/types/kibana';
import { ML_MEDIAN_PERCENTS } from '../../../../common/util/job_utils';
type DtrIndex = number;
@ -23,7 +23,7 @@ interface ProcessedResults {
totalResults: number;
}
export function newJobLineChartProvider(callWithRequest: callWithRequestType) {
export function newJobLineChartProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
async function newJobLineChart(
indexPatternTitle: string,
timeField: string,
@ -47,7 +47,7 @@ export function newJobLineChartProvider(callWithRequest: callWithRequestType) {
splitFieldValue
);
const results = await callWithRequest('search', json);
const results = await callAsCurrentUser('search', json);
return processSearchResults(
results,
aggFieldNamePairs.map((af) => af.field)

View file

@ -5,8 +5,8 @@
*/
import { get } from 'lodash';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { AggFieldNamePair, EVENT_RATE_FIELD_ID } from '../../../../common/types/fields';
import { callWithRequestType } from '../../../../common/types/kibana';
import { ML_MEDIAN_PERCENTS } from '../../../../common/util/job_utils';
const OVER_FIELD_EXAMPLES_COUNT = 40;
@ -29,7 +29,7 @@ interface ProcessedResults {
totalResults: number;
}
export function newJobPopulationChartProvider(callWithRequest: callWithRequestType) {
export function newJobPopulationChartProvider({ callAsCurrentUser }: ILegacyScopedClusterClient) {
async function newJobPopulationChart(
indexPatternTitle: string,
timeField: string,
@ -52,7 +52,7 @@ export function newJobPopulationChartProvider(callWithRequest: callWithRequestTy
);
try {
const results = await callWithRequest('search', json);
const results = await callAsCurrentUser('search', json);
return processSearchResults(
results,
aggFieldNamePairs.map((af) => af.field)

View file

@ -4,6 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { ILegacyScopedClusterClient } from 'kibana/server';
import { cloneDeep } from 'lodash';
import { SavedObjectsClientContract } from 'kibana/server';
import {
@ -39,32 +40,32 @@ const supportedTypes: string[] = [
export function fieldServiceProvider(
indexPattern: string,
isRollup: boolean,
callWithRequest: any,
mlClusterClient: ILegacyScopedClusterClient,
savedObjectsClient: SavedObjectsClientContract
) {
return new FieldsService(indexPattern, isRollup, callWithRequest, savedObjectsClient);
return new FieldsService(indexPattern, isRollup, mlClusterClient, savedObjectsClient);
}
class FieldsService {
private _indexPattern: string;
private _isRollup: boolean;
private _callWithRequest: any;
private _mlClusterClient: ILegacyScopedClusterClient;
private _savedObjectsClient: SavedObjectsClientContract;
constructor(
indexPattern: string,
isRollup: boolean,
callWithRequest: any,
savedObjectsClient: any
mlClusterClient: ILegacyScopedClusterClient,
savedObjectsClient: SavedObjectsClientContract
) {
this._indexPattern = indexPattern;
this._isRollup = isRollup;
this._callWithRequest = callWithRequest;
this._mlClusterClient = mlClusterClient;
this._savedObjectsClient = savedObjectsClient;
}
private async loadFieldCaps(): Promise<any> {
return this._callWithRequest('fieldCaps', {
return this._mlClusterClient.callAsCurrentUser('fieldCaps', {
index: this._indexPattern,
fields: '*',
});
@ -108,7 +109,7 @@ class FieldsService {
if (this._isRollup) {
const rollupService = await rollupServiceProvider(
this._indexPattern,
this._callWithRequest,
this._mlClusterClient,
this._savedObjectsClient
);
const rollupConfigs: RollupJob[] | null = await rollupService.getRollupJobs();

View file

@ -16,19 +16,23 @@ import farequoteJobCapsEmpty from './__mocks__/results/farequote_job_caps_empty.
import cloudwatchJobCaps from './__mocks__/results/cloudwatch_rollup_job_caps.json';
describe('job_service - job_caps', () => {
let callWithRequestNonRollupMock: jest.Mock;
let callWithRequestRollupMock: jest.Mock;
let mlClusterClientNonRollupMock: any;
let mlClusterClientRollupMock: any;
let savedObjectsClientMock: any;
beforeEach(() => {
callWithRequestNonRollupMock = jest.fn((action: string) => {
const callAsNonRollupMock = jest.fn((action: string) => {
switch (action) {
case 'fieldCaps':
return farequoteFieldCaps;
}
});
mlClusterClientNonRollupMock = {
callAsCurrentUser: callAsNonRollupMock,
callAsInternalUser: callAsNonRollupMock,
};
callWithRequestRollupMock = jest.fn((action: string) => {
const callAsRollupMock = jest.fn((action: string) => {
switch (action) {
case 'fieldCaps':
return cloudwatchFieldCaps;
@ -36,6 +40,10 @@ describe('job_service - job_caps', () => {
return Promise.resolve(rollupCaps);
}
});
mlClusterClientRollupMock = {
callAsCurrentUser: callAsRollupMock,
callAsInternalUser: callAsRollupMock,
};
savedObjectsClientMock = {
async find() {
@ -48,7 +56,7 @@ describe('job_service - job_caps', () => {
it('can get job caps for index pattern', async (done) => {
const indexPattern = 'farequote-*';
const isRollup = false;
const { newJobCaps } = newJobCapsProvider(callWithRequestNonRollupMock);
const { newJobCaps } = newJobCapsProvider(mlClusterClientNonRollupMock);
const response = await newJobCaps(indexPattern, isRollup, savedObjectsClientMock);
expect(response).toEqual(farequoteJobCaps);
done();
@ -57,7 +65,7 @@ describe('job_service - job_caps', () => {
it('can get rollup job caps for non rollup index pattern', async (done) => {
const indexPattern = 'farequote-*';
const isRollup = true;
const { newJobCaps } = newJobCapsProvider(callWithRequestNonRollupMock);
const { newJobCaps } = newJobCapsProvider(mlClusterClientNonRollupMock);
const response = await newJobCaps(indexPattern, isRollup, savedObjectsClientMock);
expect(response).toEqual(farequoteJobCapsEmpty);
done();
@ -68,7 +76,7 @@ describe('job_service - job_caps', () => {
it('can get rollup job caps for rollup index pattern', async (done) => {
const indexPattern = 'cloud_roll_index';
const isRollup = true;
const { newJobCaps } = newJobCapsProvider(callWithRequestRollupMock);
const { newJobCaps } = newJobCapsProvider(mlClusterClientRollupMock);
const response = await newJobCaps(indexPattern, isRollup, savedObjectsClientMock);
expect(response).toEqual(cloudwatchJobCaps);
done();
@ -77,7 +85,7 @@ describe('job_service - job_caps', () => {
it('can get non rollup job caps for rollup index pattern', async (done) => {
const indexPattern = 'cloud_roll_index';
const isRollup = false;
const { newJobCaps } = newJobCapsProvider(callWithRequestRollupMock);
const { newJobCaps } = newJobCapsProvider(mlClusterClientRollupMock);
const response = await newJobCaps(indexPattern, isRollup, savedObjectsClientMock);
expect(response).not.toEqual(cloudwatchJobCaps);
done();

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { SavedObjectsClientContract } from 'kibana/server';
import { ILegacyScopedClusterClient, SavedObjectsClientContract } from 'kibana/server';
import { Aggregation, Field, NewJobCaps } from '../../../../common/types/fields';
import { fieldServiceProvider } from './field_service';
@ -12,7 +12,7 @@ interface NewJobCapsResponse {
[indexPattern: string]: NewJobCaps;
}
export function newJobCapsProvider(callWithRequest: any) {
export function newJobCapsProvider(mlClusterClient: ILegacyScopedClusterClient) {
async function newJobCaps(
indexPattern: string,
isRollup: boolean = false,
@ -21,7 +21,7 @@ export function newJobCapsProvider(callWithRequest: any) {
const fieldService = fieldServiceProvider(
indexPattern,
isRollup,
callWithRequest,
mlClusterClient,
savedObjectsClient
);
const { aggs, fields } = await fieldService.getData();

View file

@ -4,6 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { ILegacyScopedClusterClient } from 'kibana/server';
import { SavedObject } from 'kibana/server';
import { IndexPatternAttributes } from 'src/plugins/data/server';
import { SavedObjectsClientContract } from 'kibana/server';
@ -21,7 +22,7 @@ export interface RollupJob {
export async function rollupServiceProvider(
indexPattern: string,
callWithRequest: any,
{ callAsCurrentUser }: ILegacyScopedClusterClient,
savedObjectsClient: SavedObjectsClientContract
) {
const rollupIndexPatternObject = await loadRollupIndexPattern(indexPattern, savedObjectsClient);
@ -31,7 +32,7 @@ export async function rollupServiceProvider(
if (rollupIndexPatternObject !== null) {
const parsedTypeMetaData = JSON.parse(rollupIndexPatternObject.attributes.typeMeta);
const rollUpIndex: string = parsedTypeMetaData.params.rollup_index;
const rollupCaps = await callWithRequest('ml.rollupIndexCapabilities', {
const rollupCaps = await callAsCurrentUser('ml.rollupIndexCapabilities', {
indexPattern: rollUpIndex,
});

View file

@ -4,28 +4,48 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { validateJob, ValidateJobPayload } from './job_validation';
import { JobValidationMessage } from '../../../common/constants/messages';
// mock callWithRequest
const callWithRequest: LegacyAPICaller = (method: string) => {
return new Promise((resolve) => {
if (method === 'fieldCaps') {
resolve({ fields: [] });
return;
} else if (method === 'ml.info') {
resolve({
limits: {
effective_max_model_memory_limit: '100MB',
max_model_memory_limit: '1GB',
},
});
}
resolve({});
}) as Promise<any>;
};
const mlClusterClient = ({
// mock callAsCurrentUser
callAsCurrentUser: (method: string) => {
return new Promise((resolve) => {
if (method === 'fieldCaps') {
resolve({ fields: [] });
return;
} else if (method === 'ml.info') {
resolve({
limits: {
effective_max_model_memory_limit: '100MB',
max_model_memory_limit: '1GB',
},
});
}
resolve({});
}) as Promise<any>;
},
// mock callAsInternalUser
callAsInternalUser: (method: string) => {
return new Promise((resolve) => {
if (method === 'fieldCaps') {
resolve({ fields: [] });
return;
} else if (method === 'ml.info') {
resolve({
limits: {
effective_max_model_memory_limit: '100MB',
max_model_memory_limit: '1GB',
},
});
}
resolve({});
}) as Promise<any>;
},
} as unknown) as ILegacyScopedClusterClient;
// Note: The tests cast `payload` as any
// so we can simulate possible runtime payloads
@ -36,7 +56,7 @@ describe('ML - validateJob', () => {
job: { analysis_config: { detectors: [] } },
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
@ -56,7 +76,7 @@ describe('ML - validateJob', () => {
job_id: id,
},
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).catch(() => {
return validateJob(mlClusterClient, payload).catch(() => {
new Error('Promise should not fail for jobIdTests.');
});
});
@ -77,7 +97,7 @@ describe('ML - validateJob', () => {
job: { analysis_config: { detectors: [] }, groups: testIds },
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids.includes(messageId)).toBe(true);
});
@ -117,7 +137,7 @@ describe('ML - validateJob', () => {
const payload = ({
job: { analysis_config: { bucket_span: format, detectors: [] } },
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).catch(() => {
return validateJob(mlClusterClient, payload).catch(() => {
new Error('Promise should not fail for bucketSpanFormatTests.');
});
});
@ -152,11 +172,11 @@ describe('ML - validateJob', () => {
function: '',
});
payload.job.analysis_config.detectors.push({
// @ts-ignore
// @ts-expect-error
function: undefined,
});
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids.includes('detectors_function_empty')).toBe(true);
});
@ -170,7 +190,7 @@ describe('ML - validateJob', () => {
function: 'count',
});
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids.includes('detectors_function_not_empty')).toBe(true);
});
@ -182,7 +202,7 @@ describe('ML - validateJob', () => {
fields: {},
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids.includes('index_fields_invalid')).toBe(true);
});
@ -194,7 +214,7 @@ describe('ML - validateJob', () => {
fields: { testField: {} },
} as unknown) as ValidateJobPayload;
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids.includes('index_fields_valid')).toBe(true);
});
@ -222,7 +242,7 @@ describe('ML - validateJob', () => {
const payload = getBasicPayload() as any;
delete payload.job.analysis_config.influencers;
validateJob(callWithRequest, payload).then(
validateJob(mlClusterClient, payload).then(
() =>
done(
new Error('Promise should not resolve for this test when influencers is not an Array.')
@ -234,7 +254,7 @@ describe('ML - validateJob', () => {
it('detect duplicate detectors', () => {
const payload = getBasicPayload() as any;
payload.job.analysis_config.detectors.push({ function: 'count' });
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -257,7 +277,7 @@ describe('ML - validateJob', () => {
{ function: 'count', by_field_name: 'airline' },
{ function: 'count', partition_field_name: 'airline' },
];
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -272,7 +292,7 @@ describe('ML - validateJob', () => {
// Failing https://github.com/elastic/kibana/issues/65865
it('basic validation passes, extended checks return some messages', () => {
const payload = getBasicPayload();
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -305,7 +325,7 @@ describe('ML - validateJob', () => {
fields: { testField: {} },
};
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -338,7 +358,7 @@ describe('ML - validateJob', () => {
fields: { testField: {} },
};
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -381,7 +401,7 @@ describe('ML - validateJob', () => {
fields: { testField: {} },
};
return validateJob(callWithRequest, payload).then((messages) => {
return validateJob(mlClusterClient, payload).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([
'job_id_valid',
@ -400,7 +420,7 @@ describe('ML - validateJob', () => {
const docsTestPayload = getBasicPayload() as any;
docsTestPayload.job.analysis_config.detectors = [{ function: 'count', by_field_name: 'airline' }];
it('creates a docs url pointing to the current docs version', () => {
return validateJob(callWithRequest, docsTestPayload).then((messages) => {
return validateJob(mlClusterClient, docsTestPayload).then((messages) => {
const message = messages[
messages.findIndex((m) => m.id === 'field_not_aggregatable')
] as JobValidationMessage;
@ -409,7 +429,7 @@ describe('ML - validateJob', () => {
});
it('creates a docs url pointing to the master docs version', () => {
return validateJob(callWithRequest, docsTestPayload, 'master').then((messages) => {
return validateJob(mlClusterClient, docsTestPayload, 'master').then((messages) => {
const message = messages[
messages.findIndex((m) => m.id === 'field_not_aggregatable')
] as JobValidationMessage;

View file

@ -6,7 +6,7 @@
import { i18n } from '@kbn/i18n';
import Boom from 'boom';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { TypeOf } from '@kbn/config-schema';
import { fieldsServiceProvider } from '../fields_service';
@ -19,7 +19,7 @@ import {
import { VALIDATION_STATUS } from '../../../common/constants/validation';
import { basicJobValidation, uniqWithIsEqual } from '../../../common/util/job_utils';
// @ts-ignore
// @ts-expect-error
import { validateBucketSpan } from './validate_bucket_span';
import { validateCardinality } from './validate_cardinality';
import { validateInfluencers } from './validate_influencers';
@ -35,10 +35,9 @@ export type ValidateJobPayload = TypeOf<typeof validateJobSchema>;
* @kbn/config-schema has checked the payload {@link validateJobSchema}.
*/
export async function validateJob(
callWithRequest: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
payload: ValidateJobPayload,
kbnVersion = 'current',
callAsInternalUser?: LegacyAPICaller,
isSecurityDisabled?: boolean
) {
const messages = getMessages();
@ -65,8 +64,8 @@ export async function validateJob(
// if no duration was part of the request, fall back to finding out
// the time range of the time field of the index, but also check first
// if the time field is a valid field of type 'date' using isValidTimeField()
if (typeof duration === 'undefined' && (await isValidTimeField(callWithRequest, job))) {
const fs = fieldsServiceProvider(callWithRequest);
if (typeof duration === 'undefined' && (await isValidTimeField(mlClusterClient, job))) {
const fs = fieldsServiceProvider(mlClusterClient);
const index = job.datafeed_config.indices.join(',');
const timeField = job.data_description.time_field;
const timeRange = await fs.getTimeFieldRange(index, timeField, job.datafeed_config.query);
@ -81,29 +80,23 @@ export async function validateJob(
// next run only the cardinality tests to find out if they trigger an error
// so we can decide later whether certain additional tests should be run
const cardinalityMessages = await validateCardinality(callWithRequest, job);
const cardinalityMessages = await validateCardinality(mlClusterClient, job);
validationMessages.push(...cardinalityMessages);
const cardinalityError = cardinalityMessages.some((m) => {
return messages[m.id as MessageId].status === VALIDATION_STATUS.ERROR;
});
validationMessages.push(
...(await validateBucketSpan(
callWithRequest,
job,
duration,
callAsInternalUser,
isSecurityDisabled
))
...(await validateBucketSpan(mlClusterClient, job, duration, isSecurityDisabled))
);
validationMessages.push(...(await validateTimeRange(callWithRequest, job, duration)));
validationMessages.push(...(await validateTimeRange(mlClusterClient, job, duration)));
// only run the influencer and model memory limit checks
// if cardinality checks didn't return a message with an error level
if (cardinalityError === false) {
validationMessages.push(...(await validateInfluencers(callWithRequest, job)));
validationMessages.push(...(await validateInfluencers(job)));
validationMessages.push(
...(await validateModelMemoryLimit(callWithRequest, job, duration))
...(await validateModelMemoryLimit(mlClusterClient, job, duration))
);
}
} else {

View file

@ -45,13 +45,7 @@ const pickBucketSpan = (bucketSpans) => {
return bucketSpans[i];
};
export async function validateBucketSpan(
callWithRequest,
job,
duration,
callAsInternalUser,
isSecurityDisabled
) {
export async function validateBucketSpan(mlClusterClient, job, duration) {
validateJobObject(job);
// if there is no duration, do not run the estimate test
@ -123,11 +117,7 @@ export async function validateBucketSpan(
try {
const estimations = estimatorConfigs.map((data) => {
return new Promise((resolve) => {
estimateBucketSpanFactory(
callWithRequest,
callAsInternalUser,
isSecurityDisabled
)(data)
estimateBucketSpanFactory(mlClusterClient)(data)
.then(resolve)
// this catch gets triggered when the estimation code runs without error
// but isn't able to come up with a bucket span estimation.

View file

@ -20,32 +20,36 @@ import mockFareQuoteSearchResponse from './__mocks__/mock_farequote_search_respo
// sparse data with a low number of buckets
import mockItSearchResponse from './__mocks__/mock_it_search_response.json';
// mock callWithRequestFactory
const callWithRequestFactory = (mockSearchResponse: any) => {
return () => {
// mock mlClusterClientFactory
const mlClusterClientFactory = (mockSearchResponse: any) => {
const callAs = () => {
return new Promise((resolve) => {
resolve(mockSearchResponse);
});
};
return {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
};
describe('ML - validateBucketSpan', () => {
it('called without arguments', (done) => {
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse)).then(
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse)).then(
() => done(new Error('Promise should not resolve for this test without job argument.')),
() => done()
);
});
it('called with non-valid job argument #1, missing datafeed_config', (done) => {
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), {}).then(
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), {}).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
});
it('called with non-valid job argument #2, missing datafeed_config.indices', (done) => {
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), {
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), {
datafeed_config: {},
}).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -55,7 +59,7 @@ describe('ML - validateBucketSpan', () => {
it('called with non-valid job argument #3, missing data_description', (done) => {
const job = { datafeed_config: { indices: [] } };
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), job).then(
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -63,7 +67,7 @@ describe('ML - validateBucketSpan', () => {
it('called with non-valid job argument #4, missing data_description.time_field', (done) => {
const job = { datafeed_config: { indices: [] }, data_description: {} };
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), job).then(
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -74,7 +78,7 @@ describe('ML - validateBucketSpan', () => {
datafeed_config: { indices: [] },
data_description: { time_field: '@timestamp' },
};
validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), job).then(
validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -87,7 +91,7 @@ describe('ML - validateBucketSpan', () => {
datafeed_config: { indices: [] },
};
return validateBucketSpan(callWithRequestFactory(mockFareQuoteSearchResponse), job).then(
return validateBucketSpan(mlClusterClientFactory(mockFareQuoteSearchResponse), job).then(
(messages: JobValidationMessage[]) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([]);
@ -110,7 +114,7 @@ describe('ML - validateBucketSpan', () => {
const duration = { start: 0, end: 1 };
return validateBucketSpan(
callWithRequestFactory(mockFareQuoteSearchResponse),
mlClusterClientFactory(mockFareQuoteSearchResponse),
job,
duration
).then((messages: JobValidationMessage[]) => {
@ -124,7 +128,7 @@ describe('ML - validateBucketSpan', () => {
const duration = { start: 0, end: 1 };
return validateBucketSpan(
callWithRequestFactory(mockFareQuoteSearchResponse),
mlClusterClientFactory(mockFareQuoteSearchResponse),
job,
duration
).then((messages: JobValidationMessage[]) => {
@ -147,7 +151,7 @@ describe('ML - validateBucketSpan', () => {
function: 'count',
});
return validateBucketSpan(callWithRequestFactory(mockSearchResponse), job, {}).then(
return validateBucketSpan(mlClusterClientFactory(mockSearchResponse), job, {}).then(
(messages: JobValidationMessage[]) => {
const ids = messages.map((m) => m.id);
test(ids);

View file

@ -6,7 +6,7 @@
import _ from 'lodash';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
@ -20,9 +20,12 @@ const mockResponses = {
fieldCaps: mockFieldCaps,
};
// mock callWithRequestFactory
const callWithRequestFactory = (responses: Record<string, any>, fail = false): LegacyAPICaller => {
return (requestName: string) => {
// mock mlClusterClientFactory
const mlClusterClientFactory = (
responses: Record<string, any>,
fail = false
): ILegacyScopedClusterClient => {
const callAs = (requestName: string) => {
return new Promise((resolve, reject) => {
const response = responses[requestName];
if (fail) {
@ -32,25 +35,29 @@ const callWithRequestFactory = (responses: Record<string, any>, fail = false): L
}
}) as Promise<any>;
};
return {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
};
describe('ML - validateCardinality', () => {
it('called without arguments', (done) => {
validateCardinality(callWithRequestFactory(mockResponses)).then(
validateCardinality(mlClusterClientFactory(mockResponses)).then(
() => done(new Error('Promise should not resolve for this test without job argument.')),
() => done()
);
});
it('called with non-valid job argument #1, missing analysis_config', (done) => {
validateCardinality(callWithRequestFactory(mockResponses), {} as CombinedJob).then(
validateCardinality(mlClusterClientFactory(mockResponses), {} as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
});
it('called with non-valid job argument #2, missing datafeed_config', (done) => {
validateCardinality(callWithRequestFactory(mockResponses), {
validateCardinality(mlClusterClientFactory(mockResponses), {
analysis_config: {},
} as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -60,7 +67,7 @@ describe('ML - validateCardinality', () => {
it('called with non-valid job argument #3, missing datafeed_config.indices', (done) => {
const job = { analysis_config: {}, datafeed_config: {} } as CombinedJob;
validateCardinality(callWithRequestFactory(mockResponses), job).then(
validateCardinality(mlClusterClientFactory(mockResponses), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -71,7 +78,7 @@ describe('ML - validateCardinality', () => {
analysis_config: {},
datafeed_config: { indices: [] },
} as unknown) as CombinedJob;
validateCardinality(callWithRequestFactory(mockResponses), job).then(
validateCardinality(mlClusterClientFactory(mockResponses), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -83,7 +90,7 @@ describe('ML - validateCardinality', () => {
data_description: {},
datafeed_config: { indices: [] },
} as unknown) as CombinedJob;
validateCardinality(callWithRequestFactory(mockResponses), job).then(
validateCardinality(mlClusterClientFactory(mockResponses), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -95,7 +102,7 @@ describe('ML - validateCardinality', () => {
datafeed_config: { indices: [] },
data_description: { time_field: '@timestamp' },
} as unknown) as CombinedJob;
validateCardinality(callWithRequestFactory(mockResponses), job).then(
validateCardinality(mlClusterClientFactory(mockResponses), job).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -110,7 +117,7 @@ describe('ML - validateCardinality', () => {
},
} as unknown) as CombinedJob;
return validateCardinality(callWithRequestFactory(mockResponses), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockResponses), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([]);
});
@ -141,7 +148,7 @@ describe('ML - validateCardinality', () => {
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(
callWithRequestFactory(mockCardinality),
mlClusterClientFactory(mockCardinality),
(job as unknown) as CombinedJob
).then((messages) => {
const ids = messages.map((m) => m.id);
@ -153,7 +160,7 @@ describe('ML - validateCardinality', () => {
const job = getJobConfig('partition_field_name');
job.analysis_config.detectors[0].partition_field_name = '_source';
return validateCardinality(
callWithRequestFactory(mockResponses),
mlClusterClientFactory(mockResponses),
(job as unknown) as CombinedJob
).then((messages) => {
const ids = messages.map((m) => m.id);
@ -164,7 +171,7 @@ describe('ML - validateCardinality', () => {
it(`field 'airline' aggregatable`, () => {
const job = getJobConfig('partition_field_name');
return validateCardinality(
callWithRequestFactory(mockResponses),
mlClusterClientFactory(mockResponses),
(job as unknown) as CombinedJob
).then((messages) => {
const ids = messages.map((m) => m.id);
@ -174,7 +181,7 @@ describe('ML - validateCardinality', () => {
it('field not aggregatable', () => {
const job = getJobConfig('partition_field_name');
return validateCardinality(callWithRequestFactory({}), (job as unknown) as CombinedJob).then(
return validateCardinality(mlClusterClientFactory({}), (job as unknown) as CombinedJob).then(
(messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['field_not_aggregatable']);
@ -189,7 +196,7 @@ describe('ML - validateCardinality', () => {
partition_field_name: 'airline',
});
return validateCardinality(
callWithRequestFactory({}, true),
mlClusterClientFactory({}, true),
(job as unknown) as CombinedJob
).then((messages) => {
const ids = messages.map((m) => m.id);
@ -245,7 +252,7 @@ describe('ML - validateCardinality', () => {
job.model_plot_config = { enabled: false };
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(callWithRequestFactory(mockCardinality), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockCardinality), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['success_cardinality']);
});
@ -256,7 +263,7 @@ describe('ML - validateCardinality', () => {
job.model_plot_config = { enabled: true };
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(callWithRequestFactory(mockCardinality), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockCardinality), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['cardinality_model_plot_high']);
});
@ -267,7 +274,7 @@ describe('ML - validateCardinality', () => {
job.model_plot_config = { enabled: false };
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(callWithRequestFactory(mockCardinality), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockCardinality), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['cardinality_by_field']);
});
@ -278,7 +285,7 @@ describe('ML - validateCardinality', () => {
job.model_plot_config = { enabled: true };
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(callWithRequestFactory(mockCardinality), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockCardinality), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['cardinality_model_plot_high', 'cardinality_by_field']);
});
@ -289,7 +296,7 @@ describe('ML - validateCardinality', () => {
job.model_plot_config = { enabled: true, terms: 'AAL,AAB' };
const mockCardinality = _.cloneDeep(mockResponses);
mockCardinality.search.aggregations.airline_cardinality.value = cardinality;
return validateCardinality(callWithRequestFactory(mockCardinality), job).then((messages) => {
return validateCardinality(mlClusterClientFactory(mockCardinality), job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['cardinality_by_field']);
});

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { DataVisualizer } from '../data_visualizer';
import { validateJobObject } from './validate_job_object';
@ -43,8 +43,12 @@ type Validator = (obj: {
messages: Messages;
}>;
const validateFactory = (callWithRequest: LegacyAPICaller, job: CombinedJob): Validator => {
const dv = new DataVisualizer(callWithRequest);
const validateFactory = (
mlClusterClient: ILegacyScopedClusterClient,
job: CombinedJob
): Validator => {
const { callAsCurrentUser } = mlClusterClient;
const dv = new DataVisualizer(mlClusterClient);
const modelPlotConfigTerms = job?.model_plot_config?.terms ?? '';
const modelPlotConfigFieldCount =
@ -73,7 +77,7 @@ const validateFactory = (callWithRequest: LegacyAPICaller, job: CombinedJob): Va
] as string[];
// use fieldCaps endpoint to get data about whether fields are aggregatable
const fieldCaps = await callWithRequest('fieldCaps', {
const fieldCaps = await callAsCurrentUser('fieldCaps', {
index: job.datafeed_config.indices.join(','),
fields: uniqueFieldNames,
});
@ -150,7 +154,7 @@ const validateFactory = (callWithRequest: LegacyAPICaller, job: CombinedJob): Va
};
export async function validateCardinality(
callWithRequest: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
job?: CombinedJob
): Promise<Messages> | never {
const messages: Messages = [];
@ -170,7 +174,7 @@ export async function validateCardinality(
}
// validate({ type, isInvalid }) asynchronously returns an array of validation messages
const validate = validateFactory(callWithRequest, job);
const validate = validateFactory(mlClusterClient, job);
const modelPlotEnabled = job.model_plot_config?.enabled ?? false;

View file

@ -4,28 +4,20 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
import { validateInfluencers } from './validate_influencers';
describe('ML - validateInfluencers', () => {
it('called without arguments throws an error', (done) => {
validateInfluencers(
(undefined as unknown) as LegacyAPICaller,
(undefined as unknown) as CombinedJob
).then(
validateInfluencers((undefined as unknown) as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without job argument.')),
() => done()
);
});
it('called with non-valid job argument #1, missing analysis_config', (done) => {
validateInfluencers(
(undefined as unknown) as LegacyAPICaller,
({} as unknown) as CombinedJob
).then(
validateInfluencers(({} as unknown) as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -37,10 +29,7 @@ describe('ML - validateInfluencers', () => {
datafeed_config: { indices: [] },
data_description: { time_field: '@timestamp' },
};
validateInfluencers(
(undefined as unknown) as LegacyAPICaller,
(job as unknown) as CombinedJob
).then(
validateInfluencers((job as unknown) as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -52,10 +41,7 @@ describe('ML - validateInfluencers', () => {
datafeed_config: { indices: [] },
data_description: { time_field: '@timestamp' },
};
validateInfluencers(
(undefined as unknown) as LegacyAPICaller,
(job as unknown) as CombinedJob
).then(
validateInfluencers((job as unknown) as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
() => done()
);
@ -75,7 +61,7 @@ describe('ML - validateInfluencers', () => {
it('success_influencer', () => {
const job = getJobConfig(['airline']);
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['success_influencers']);
});
@ -93,7 +79,7 @@ describe('ML - validateInfluencers', () => {
]
);
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual([]);
});
@ -101,7 +87,7 @@ describe('ML - validateInfluencers', () => {
it('influencer_low', () => {
const job = getJobConfig();
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['influencer_low']);
});
@ -109,7 +95,7 @@ describe('ML - validateInfluencers', () => {
it('influencer_high', () => {
const job = getJobConfig(['i1', 'i2', 'i3', 'i4']);
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['influencer_high']);
});
@ -127,7 +113,7 @@ describe('ML - validateInfluencers', () => {
},
]
);
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toStrictEqual(['influencer_low_suggestion']);
});
@ -157,7 +143,7 @@ describe('ML - validateInfluencers', () => {
},
]
);
return validateInfluencers((undefined as unknown) as LegacyAPICaller, job).then((messages) => {
return validateInfluencers(job).then((messages) => {
expect(messages).toStrictEqual([
{
id: 'influencer_low_suggestions',

View file

@ -4,8 +4,6 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
import { validateJobObject } from './validate_job_object';
@ -14,7 +12,7 @@ const INFLUENCER_LOW_THRESHOLD = 0;
const INFLUENCER_HIGH_THRESHOLD = 4;
const DETECTOR_FIELD_NAMES_THRESHOLD = 1;
export async function validateInfluencers(callWithRequest: LegacyAPICaller, job: CombinedJob) {
export async function validateInfluencers(job: CombinedJob) {
validateJobObject(job);
const messages = [];

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { CombinedJob, Detector } from '../../../common/types/anomaly_detection_jobs';
import { ModelMemoryEstimate } from '../calculate_model_memory_limit/calculate_model_memory_limit';
import { validateModelMemoryLimit } from './validate_model_memory_limit';
@ -73,15 +73,15 @@ describe('ML - validateModelMemoryLimit', () => {
'ml.estimateModelMemory'?: ModelMemoryEstimate;
}
// mock callWithRequest
// mock callAsCurrentUser
// used in three places:
// - to retrieve the info endpoint
// - to search for cardinality of split field
// - to retrieve field capabilities used in search for split field cardinality
const getMockCallWithRequest = ({
const getMockMlClusterClient = ({
'ml.estimateModelMemory': estimateModelMemory,
}: MockAPICallResponse = {}) =>
((call: string) => {
}: MockAPICallResponse = {}): ILegacyScopedClusterClient => {
const callAs = (call: string) => {
if (typeof call === undefined) {
return Promise.reject();
}
@ -97,7 +97,13 @@ describe('ML - validateModelMemoryLimit', () => {
response = estimateModelMemory || modelMemoryEstimateResponse;
}
return Promise.resolve(response);
}) as LegacyAPICaller;
};
return {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
};
function getJobConfig(influencers: string[] = [], detectors: Detector[] = []) {
return ({
@ -129,7 +135,7 @@ describe('ML - validateModelMemoryLimit', () => {
const job = getJobConfig();
const duration = undefined;
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual([]);
});
@ -138,10 +144,10 @@ describe('ML - validateModelMemoryLimit', () => {
it('Called with no duration or split and mml above limit', () => {
const job = getJobConfig();
const duration = undefined;
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '31mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_greater_than_max_mml']);
});
@ -151,11 +157,11 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(10);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '20mb';
return validateModelMemoryLimit(
getMockCallWithRequest({ 'ml.estimateModelMemory': { model_memory_estimate: '66mb' } }),
getMockMlClusterClient({ 'ml.estimateModelMemory': { model_memory_estimate: '66mb' } }),
job,
duration
).then((messages) => {
@ -168,11 +174,11 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(2);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '30mb';
return validateModelMemoryLimit(
getMockCallWithRequest({ 'ml.estimateModelMemory': { model_memory_estimate: '24mb' } }),
getMockMlClusterClient({ 'ml.estimateModelMemory': { model_memory_estimate: '24mb' } }),
job,
duration
).then((messages) => {
@ -185,11 +191,11 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(2);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '10mb';
return validateModelMemoryLimit(
getMockCallWithRequest({ 'ml.estimateModelMemory': { model_memory_estimate: '22mb' } }),
getMockMlClusterClient({ 'ml.estimateModelMemory': { model_memory_estimate: '22mb' } }),
job,
duration
).then((messages) => {
@ -203,10 +209,10 @@ describe('ML - validateModelMemoryLimit', () => {
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
delete mlInfoResponse.limits.max_model_memory_limit;
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '10mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['half_estimated_mml_greater_than_mml']);
});
@ -215,10 +221,10 @@ describe('ML - validateModelMemoryLimit', () => {
it('Called with no duration or split and mml above limit, no max setting', () => {
const job = getJobConfig();
const duration = undefined;
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '31mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual([]);
});
@ -227,10 +233,10 @@ describe('ML - validateModelMemoryLimit', () => {
it('Called with no duration or split and mml above limit, no max setting, above effective max mml', () => {
const job = getJobConfig();
const duration = undefined;
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '41mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_greater_than_effective_max_mml']);
});
@ -240,11 +246,11 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '20mb';
return validateModelMemoryLimit(
getMockCallWithRequest({ 'ml.estimateModelMemory': { model_memory_estimate: '19mb' } }),
getMockMlClusterClient({ 'ml.estimateModelMemory': { model_memory_estimate: '19mb' } }),
job,
duration
).then((messages) => {
@ -257,10 +263,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '0mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -270,10 +276,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '10mbananas';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -283,10 +289,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '10';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -296,10 +302,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = 'mb';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -309,10 +315,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = 'asdf';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -322,10 +328,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '1023KB';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['mml_value_invalid']);
});
@ -335,10 +341,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '1024KB';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['half_estimated_mml_greater_than_mml']);
});
@ -348,10 +354,10 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '6MB';
return validateModelMemoryLimit(getMockCallWithRequest(), job, duration).then((messages) => {
return validateModelMemoryLimit(getMockMlClusterClient(), job, duration).then((messages) => {
const ids = messages.map((m) => m.id);
expect(ids).toEqual(['half_estimated_mml_greater_than_mml']);
});
@ -361,11 +367,11 @@ describe('ML - validateModelMemoryLimit', () => {
const dtrs = createDetectors(1);
const job = getJobConfig(['instance'], dtrs);
const duration = { start: 0, end: 1 };
// @ts-ignore
// @ts-expect-error
job.analysis_limits.model_memory_limit = '20MB';
return validateModelMemoryLimit(
getMockCallWithRequest({ 'ml.estimateModelMemory': { model_memory_estimate: '20mb' } }),
getMockMlClusterClient({ 'ml.estimateModelMemory': { model_memory_estimate: '20mb' } }),
job,
duration
).then((messages) => {

View file

@ -5,7 +5,7 @@
*/
import numeral from '@elastic/numeral';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
import { validateJobObject } from './validate_job_object';
import { calculateModelMemoryLimitProvider } from '../calculate_model_memory_limit';
@ -16,10 +16,11 @@ import { MlInfoResponse } from '../../../common/types/ml_server_info';
const MODEL_MEMORY_LIMIT_MINIMUM_BYTES = 1048576;
export async function validateModelMemoryLimit(
callWithRequest: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
job: CombinedJob,
duration?: { start?: number; end?: number }
) {
const { callAsInternalUser } = mlClusterClient;
validateJobObject(job);
// retrieve the model memory limit specified by the user in the job config.
@ -51,12 +52,12 @@ export async function validateModelMemoryLimit(
// retrieve the max_model_memory_limit value from the server
// this will be unset unless the user has set this on their cluster
const info = await callWithRequest<MlInfoResponse>('ml.info');
const info = (await callAsInternalUser('ml.info')) as MlInfoResponse;
const maxModelMemoryLimit = info.limits.max_model_memory_limit?.toUpperCase();
const effectiveMaxModelMemoryLimit = info.limits.effective_max_model_memory_limit?.toUpperCase();
if (runCalcModelMemoryTest) {
const { modelMemoryLimit } = await calculateModelMemoryLimitProvider(callWithRequest)(
const { modelMemoryLimit } = await calculateModelMemoryLimitProvider(mlClusterClient)(
job.analysis_config,
job.datafeed_config.indices.join(','),
job.datafeed_config.query,
@ -65,14 +66,14 @@ export async function validateModelMemoryLimit(
duration!.end as number,
true
);
// @ts-ignore
// @ts-expect-error
const mmlEstimateBytes: number = numeral(modelMemoryLimit).value();
let runEstimateGreaterThenMml = true;
// if max_model_memory_limit has been set,
// make sure the estimated value is not greater than it.
if (typeof maxModelMemoryLimit !== 'undefined') {
// @ts-ignore
// @ts-expect-error
const maxMmlBytes: number = numeral(maxModelMemoryLimit).value();
if (mmlEstimateBytes > maxMmlBytes) {
runEstimateGreaterThenMml = false;
@ -89,7 +90,7 @@ export async function validateModelMemoryLimit(
// do not run this if we've already found that it's larger than
// the max mml
if (runEstimateGreaterThenMml && mml !== null) {
// @ts-ignore
// @ts-expect-error
const mmlBytes: number = numeral(mml).value();
if (mmlBytes < MODEL_MEMORY_LIMIT_MINIMUM_BYTES) {
messages.push({
@ -116,11 +117,11 @@ export async function validateModelMemoryLimit(
// make sure the user defined MML is not greater than it
if (mml !== null) {
let maxMmlExceeded = false;
// @ts-ignore
// @ts-expect-error
const mmlBytes = numeral(mml).value();
if (maxModelMemoryLimit !== undefined) {
// @ts-ignore
// @ts-expect-error
const maxMmlBytes = numeral(maxModelMemoryLimit).value();
if (mmlBytes > maxMmlBytes) {
maxMmlExceeded = true;
@ -133,7 +134,7 @@ export async function validateModelMemoryLimit(
}
if (effectiveMaxModelMemoryLimit !== undefined && maxMmlExceeded === false) {
// @ts-ignore
// @ts-expect-error
const effectiveMaxMmlBytes = numeral(effectiveMaxModelMemoryLimit).value();
if (mmlBytes > effectiveMaxMmlBytes) {
messages.push({

View file

@ -6,7 +6,7 @@
import _ from 'lodash';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
@ -21,12 +21,16 @@ const mockSearchResponse = {
search: mockTimeRange,
};
const callWithRequestFactory = (resp: any): LegacyAPICaller => {
return (path: string) => {
const mlClusterClientFactory = (resp: any): ILegacyScopedClusterClient => {
const callAs = (path: string) => {
return new Promise((resolve) => {
resolve(resp[path]);
}) as Promise<any>;
};
return {
callAsCurrentUser: callAs,
callAsInternalUser: callAs,
};
};
function getMinimalValidJob() {
@ -46,7 +50,7 @@ function getMinimalValidJob() {
describe('ML - isValidTimeField', () => {
it('called without job config argument triggers Promise rejection', (done) => {
isValidTimeField(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
(undefined as unknown) as CombinedJob
).then(
() => done(new Error('Promise should not resolve for this test without job argument.')),
@ -55,7 +59,7 @@ describe('ML - isValidTimeField', () => {
});
it('time_field `@timestamp`', (done) => {
isValidTimeField(callWithRequestFactory(mockSearchResponse), getMinimalValidJob()).then(
isValidTimeField(mlClusterClientFactory(mockSearchResponse), getMinimalValidJob()).then(
(valid) => {
expect(valid).toBe(true);
done();
@ -74,7 +78,7 @@ describe('ML - isValidTimeField', () => {
};
isValidTimeField(
callWithRequestFactory(mockSearchResponseNestedDate),
mlClusterClientFactory(mockSearchResponseNestedDate),
mockJobConfigNestedDate
).then(
(valid) => {
@ -89,7 +93,7 @@ describe('ML - isValidTimeField', () => {
describe('ML - validateTimeRange', () => {
it('called without arguments', (done) => {
validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
(undefined as unknown) as CombinedJob
).then(
() => done(new Error('Promise should not resolve for this test without job argument.')),
@ -98,7 +102,7 @@ describe('ML - validateTimeRange', () => {
});
it('called with non-valid job argument #2, missing datafeed_config', (done) => {
validateTimeRange(callWithRequestFactory(mockSearchResponse), ({
validateTimeRange(mlClusterClientFactory(mockSearchResponse), ({
analysis_config: {},
} as unknown) as CombinedJob).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -109,7 +113,7 @@ describe('ML - validateTimeRange', () => {
it('called with non-valid job argument #3, missing datafeed_config.indices', (done) => {
const job = { analysis_config: {}, datafeed_config: {} };
validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
(job as unknown) as CombinedJob
).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -120,7 +124,7 @@ describe('ML - validateTimeRange', () => {
it('called with non-valid job argument #4, missing data_description', (done) => {
const job = { analysis_config: {}, datafeed_config: { indices: [] } };
validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
(job as unknown) as CombinedJob
).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -131,7 +135,7 @@ describe('ML - validateTimeRange', () => {
it('called with non-valid job argument #5, missing data_description.time_field', (done) => {
const job = { analysis_config: {}, data_description: {}, datafeed_config: { indices: [] } };
validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
(job as unknown) as CombinedJob
).then(
() => done(new Error('Promise should not resolve for this test without valid job argument.')),
@ -144,7 +148,7 @@ describe('ML - validateTimeRange', () => {
mockSearchResponseInvalid.fieldCaps = undefined;
const duration = { start: 0, end: 1 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponseInvalid),
mlClusterClientFactory(mockSearchResponseInvalid),
getMinimalValidJob(),
duration
).then((messages) => {
@ -158,7 +162,7 @@ describe('ML - validateTimeRange', () => {
jobShortTimeRange.analysis_config.bucket_span = '1s';
const duration = { start: 0, end: 1 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
jobShortTimeRange,
duration
).then((messages) => {
@ -170,7 +174,7 @@ describe('ML - validateTimeRange', () => {
it('too short time range, 25x bucket span is more than 2h', () => {
const duration = { start: 0, end: 1 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
getMinimalValidJob(),
duration
).then((messages) => {
@ -182,7 +186,7 @@ describe('ML - validateTimeRange', () => {
it('time range between 2h and 25x bucket span', () => {
const duration = { start: 0, end: 8000000 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
getMinimalValidJob(),
duration
).then((messages) => {
@ -194,7 +198,7 @@ describe('ML - validateTimeRange', () => {
it('valid time range', () => {
const duration = { start: 0, end: 100000000 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
getMinimalValidJob(),
duration
).then((messages) => {
@ -206,7 +210,7 @@ describe('ML - validateTimeRange', () => {
it('invalid time range, start time is before the UNIX epoch', () => {
const duration = { start: -1, end: 100000000 };
return validateTimeRange(
callWithRequestFactory(mockSearchResponse),
mlClusterClientFactory(mockSearchResponse),
getMinimalValidJob(),
duration
).then((messages) => {

View file

@ -4,11 +4,10 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ES_FIELD_TYPES } from '../../../../../../src/plugins/data/server';
import { parseInterval } from '../../../common/util/parse_interval';
import { CombinedJob } from '../../../common/types/anomaly_detection_jobs';
// @ts-ignore
import { validateJobObject } from './validate_job_object';
interface ValidateTimeRangeMessage {
@ -27,7 +26,10 @@ const BUCKET_SPAN_COMPARE_FACTOR = 25;
const MIN_TIME_SPAN_MS = 7200000;
const MIN_TIME_SPAN_READABLE = '2 hours';
export async function isValidTimeField(callAsCurrentUser: LegacyAPICaller, job: CombinedJob) {
export async function isValidTimeField(
{ callAsCurrentUser }: ILegacyScopedClusterClient,
job: CombinedJob
) {
const index = job.datafeed_config.indices.join(',');
const timeField = job.data_description.time_field;
@ -45,7 +47,7 @@ export async function isValidTimeField(callAsCurrentUser: LegacyAPICaller, job:
}
export async function validateTimeRange(
callAsCurrentUser: LegacyAPICaller,
mlClientCluster: ILegacyScopedClusterClient,
job: CombinedJob,
timeRange?: Partial<TimeRange>
) {
@ -54,7 +56,7 @@ export async function validateTimeRange(
validateJobObject(job);
// check if time_field is a date type
if (!(await isValidTimeField(callAsCurrentUser, job))) {
if (!(await isValidTimeField(mlClientCluster, job))) {
messages.push({
id: 'time_field_invalid',
timeField: job.data_description.time_field,

View file

@ -5,8 +5,8 @@
*/
import Boom from 'boom';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { ML_RESULTS_INDEX_PATTERN } from '../../../common/constants/index_patterns';
import { callWithRequestType } from '../../../common/types/kibana';
import { CriteriaField } from './results_service';
const PARTITION_FIELDS = ['partition_field', 'over_field', 'by_field'] as const;
@ -76,7 +76,10 @@ function getFieldObject(fieldType: PartitionFieldsType, aggs: any) {
: {};
}
export const getPartitionFieldsValuesFactory = (callWithRequest: callWithRequestType) =>
export const getPartitionFieldsValuesFactory = ({
callAsCurrentUser,
callAsInternalUser,
}: ILegacyScopedClusterClient) =>
/**
* Gets the record of partition fields with possible values that fit the provided queries.
* @param jobId - Job ID
@ -92,7 +95,7 @@ export const getPartitionFieldsValuesFactory = (callWithRequest: callWithRequest
earliestMs: number,
latestMs: number
) {
const jobsResponse = await callWithRequest('ml.jobs', { jobId: [jobId] });
const jobsResponse = await callAsInternalUser('ml.jobs', { jobId: [jobId] });
if (jobsResponse.count === 0 || jobsResponse.jobs === undefined) {
throw Boom.notFound(`Job with the id "${jobId}" not found`);
}
@ -101,7 +104,7 @@ export const getPartitionFieldsValuesFactory = (callWithRequest: callWithRequest
const isModelPlotEnabled = job?.model_plot_config?.enabled;
const resp = await callWithRequest('search', {
const resp = await callAsCurrentUser('search', {
index: ML_RESULTS_INDEX_PATTERN,
size: 0,
body: {

View file

@ -7,7 +7,7 @@
import _ from 'lodash';
import moment from 'moment';
import { SearchResponse } from 'elasticsearch';
import { LegacyAPICaller } from 'kibana/server';
import { ILegacyScopedClusterClient } from 'kibana/server';
import { buildAnomalyTableItems } from './build_anomaly_table_items';
import { ML_RESULTS_INDEX_PATTERN } from '../../../common/constants/index_patterns';
import { ANOMALIES_TABLE_DEFAULT_QUERY_SIZE } from '../../../common/constants/search';
@ -30,7 +30,8 @@ interface Influencer {
fieldValue: any;
}
export function resultsServiceProvider(callAsCurrentUser: LegacyAPICaller) {
export function resultsServiceProvider(mlClusterClient: ILegacyScopedClusterClient) {
const { callAsCurrentUser } = mlClusterClient;
// Obtains data for the anomalies table, aggregating anomalies by day or hour as requested.
// Return an Object with properties 'anomalies' and 'interval' (interval used to aggregate anomalies,
// one of day, hour or second. Note 'auto' can be provided as the aggregationInterval in the request,
@ -435,6 +436,6 @@ export function resultsServiceProvider(callAsCurrentUser: LegacyAPICaller) {
getCategoryExamples,
getLatestBucketTimestampByJob,
getMaxAnomalyScore,
getPartitionFieldsValues: getPartitionFieldsValuesFactory(callAsCurrentUser),
getPartitionFieldsValues: getPartitionFieldsValuesFactory(mlClusterClient),
};
}

View file

@ -75,7 +75,7 @@ export class MlServerPlugin implements Plugin<MlPluginSetup, MlPluginStart, Plug
}
public setup(coreSetup: CoreSetup, plugins: PluginsSetup): MlPluginSetup {
const { user, admin } = getPluginPrivileges();
const { admin, user } = getPluginPrivileges();
plugins.features.registerFeature({
id: PLUGIN_ID,
@ -87,7 +87,13 @@ export class MlServerPlugin implements Plugin<MlPluginSetup, MlPluginStart, Plug
navLinkId: PLUGIN_ID,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
privileges: null,
management: {
insightsAndAlerting: ['jobsListLink'],
},
privileges: {
all: admin,
read: user,
},
reserved: {
description: i18n.translate('xpack.ml.feature.reserved.description', {
defaultMessage:
@ -96,29 +102,11 @@ export class MlServerPlugin implements Plugin<MlPluginSetup, MlPluginStart, Plug
privileges: [
{
id: 'ml_user',
privilege: {
api: user.api,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
savedObject: {
all: [],
read: [],
},
ui: user.ui,
},
privilege: user,
},
{
id: 'ml_admin',
privilege: {
api: admin.api,
app: [PLUGIN_ID, 'kibana'],
catalogue: [PLUGIN_ID],
savedObject: {
all: [],
read: [],
},
ui: admin.ui,
},
privilege: admin,
},
],
},

View file

@ -60,9 +60,7 @@ export function annotationRoutes(
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getAnnotations } = annotationServiceProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { getAnnotations } = annotationServiceProvider(context.ml!.mlClient);
const resp = await getAnnotations(request.body);
return response.ok({
@ -96,19 +94,17 @@ export function annotationRoutes(
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const annotationsFeatureAvailable = await isAnnotationsFeatureAvailable(
context.ml!.mlClient.callAsCurrentUser
context.ml!.mlClient
);
if (annotationsFeatureAvailable === false) {
throw getAnnotationsFeatureUnavailableErrorMessage();
}
const { indexAnnotation } = annotationServiceProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { indexAnnotation } = annotationServiceProvider(context.ml!.mlClient);
const currentUser =
securityPlugin !== undefined ? securityPlugin.authc.getCurrentUser(request) : {};
// @ts-ignore username doesn't exist on {}
// @ts-expect-error username doesn't exist on {}
const username = currentUser?.username ?? ANNOTATION_USER_UNKNOWN;
const resp = await indexAnnotation(request.body, username);
@ -143,16 +139,14 @@ export function annotationRoutes(
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const annotationsFeatureAvailable = await isAnnotationsFeatureAvailable(
context.ml!.mlClient.callAsCurrentUser
context.ml!.mlClient
);
if (annotationsFeatureAvailable === false) {
throw getAnnotationsFeatureUnavailableErrorMessage();
}
const annotationId = request.params.annotationId;
const { deleteAnnotation } = annotationServiceProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { deleteAnnotation } = annotationServiceProvider(context.ml!.mlClient);
const resp = await deleteAnnotation(annotationId);
return response.ok({

View file

@ -45,7 +45,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.jobs');
const results = await context.ml!.mlClient.callAsInternalUser('ml.jobs');
return response.ok({
body: results,
});
@ -77,7 +77,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.jobs', { jobId });
const results = await context.ml!.mlClient.callAsInternalUser('ml.jobs', { jobId });
return response.ok({
body: results,
});
@ -107,7 +107,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.jobStats');
const results = await context.ml!.mlClient.callAsInternalUser('ml.jobStats');
return response.ok({
body: results,
});
@ -139,7 +139,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.jobStats', { jobId });
const results = await context.ml!.mlClient.callAsInternalUser('ml.jobStats', { jobId });
return response.ok({
body: results,
});
@ -175,11 +175,9 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobId } = request.params;
const body = request.body;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.addJob', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.addJob', {
jobId,
body,
body: request.body,
});
return response.ok({
body: results,
@ -214,7 +212,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.updateJob', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.updateJob', {
jobId,
body: request.body,
});
@ -249,7 +247,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.openJob', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.openJob', {
jobId,
});
return response.ok({
@ -289,7 +287,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
if (force !== undefined) {
options.force = force;
}
const results = await context.ml!.mlClient.callAsCurrentUser('ml.closeJob', options);
const results = await context.ml!.mlClient.callAsInternalUser('ml.closeJob', options);
return response.ok({
body: results,
});
@ -327,7 +325,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
if (force !== undefined) {
options.force = force;
}
const results = await context.ml!.mlClient.callAsCurrentUser('ml.deleteJob', options);
const results = await context.ml!.mlClient.callAsInternalUser('ml.deleteJob', options);
return response.ok({
body: results,
});
@ -356,7 +354,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.validateDetector', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.validateDetector', {
body: request.body,
});
return response.ok({
@ -393,7 +391,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
try {
const jobId = request.params.jobId;
const duration = request.body.duration;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.forecast', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.forecast', {
jobId,
duration,
});
@ -432,7 +430,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.records', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.records', {
jobId: request.params.jobId,
body: request.body,
});
@ -471,7 +469,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.buckets', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.buckets', {
jobId: request.params.jobId,
timestamp: request.params.timestamp,
body: request.body,
@ -511,7 +509,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.overallBuckets', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.overallBuckets', {
jobId: request.params.jobId,
top_n: request.body.topN,
bucket_span: request.body.bucketSpan,
@ -548,7 +546,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.categories', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.categories', {
jobId: request.params.jobId,
categoryId: request.params.categoryId,
});
@ -582,7 +580,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.modelSnapshots', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.modelSnapshots', {
jobId: request.params.jobId,
});
return response.ok({
@ -615,7 +613,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.modelSnapshots', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.modelSnapshots', {
jobId: request.params.jobId,
snapshotId: request.params.snapshotId,
});
@ -651,7 +649,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.updateModelSnapshot', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.updateModelSnapshot', {
jobId: request.params.jobId,
snapshotId: request.params.snapshotId,
body: request.body,
@ -686,7 +684,7 @@ export function jobRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.deleteModelSnapshot', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.deleteModelSnapshot', {
jobId: request.params.jobId,
snapshotId: request.params.snapshotId,
});

View file

@ -11,32 +11,32 @@ import { calendarSchema, calendarIdSchema, calendarIdsSchema } from './schemas/c
import { CalendarManager, Calendar, FormCalendar } from '../models/calendar';
function getAllCalendars(context: RequestHandlerContext) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.getAllCalendars();
}
function getCalendar(context: RequestHandlerContext, calendarId: string) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.getCalendar(calendarId);
}
function newCalendar(context: RequestHandlerContext, calendar: FormCalendar) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.newCalendar(calendar);
}
function updateCalendar(context: RequestHandlerContext, calendarId: string, calendar: Calendar) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.updateCalendar(calendarId, calendar);
}
function deleteCalendar(context: RequestHandlerContext, calendarId: string) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.deleteCalendar(calendarId);
}
function getCalendarsByIds(context: RequestHandlerContext, calendarIds: string) {
const cal = new CalendarManager(context.ml!.mlClient.callAsCurrentUser);
const cal = new CalendarManager(context.ml!.mlClient);
return cal.getCalendarsByIds(calendarIds);
}

View file

@ -19,6 +19,7 @@ import {
} from './schemas/data_analytics_schema';
import { IndexPatternHandler } from '../models/data_frame_analytics/index_patterns';
import { DeleteDataFrameAnalyticsWithIndexStatus } from '../../common/types/data_frame_analytics';
import { getAuthorizationHeader } from '../lib/request_authorization';
function getIndexPatternId(context: RequestHandlerContext, patternName: string) {
const iph = new IndexPatternHandler(context.core.savedObjects.client);
@ -77,7 +78,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser('ml.getDataFrameAnalytics');
const results = await context.ml!.mlClient.callAsInternalUser('ml.getDataFrameAnalytics');
return response.ok({
body: results,
});
@ -109,7 +110,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { analyticsId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.getDataFrameAnalytics', {
const results = await context.ml!.mlClient.callAsInternalUser('ml.getDataFrameAnalytics', {
analyticsId,
});
return response.ok({
@ -138,7 +139,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.getDataFrameAnalyticsStats'
);
return response.ok({
@ -172,7 +173,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { analyticsId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.getDataFrameAnalyticsStats',
{
analyticsId,
@ -212,11 +213,12 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { analyticsId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.createDataFrameAnalytics',
{
body: request.body,
analyticsId,
...getAuthorizationHeader(request),
}
);
return response.ok({
@ -249,10 +251,11 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.evaluateDataFrameAnalytics',
{
body: request.body,
...getAuthorizationHeader(request),
}
);
return response.ok({
@ -286,7 +289,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.explainDataFrameAnalytics',
{
body: request.body,
@ -335,7 +338,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
// Check if analyticsId is valid and get destination index
if (deleteDestIndex || deleteDestIndexPattern) {
try {
const dfa = await context.ml!.mlClient.callAsCurrentUser('ml.getDataFrameAnalytics', {
const dfa = await context.ml!.mlClient.callAsInternalUser('ml.getDataFrameAnalytics', {
analyticsId,
});
if (Array.isArray(dfa.data_frame_analytics) && dfa.data_frame_analytics.length > 0) {
@ -381,7 +384,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
// Delete the data frame analytics
try {
await context.ml!.mlClient.callAsCurrentUser('ml.deleteDataFrameAnalytics', {
await context.ml!.mlClient.callAsInternalUser('ml.deleteDataFrameAnalytics', {
analyticsId,
});
analyticsJobDeleted.success = true;
@ -427,9 +430,12 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { analyticsId } = request.params;
const results = await context.ml!.mlClient.callAsCurrentUser('ml.startDataFrameAnalytics', {
analyticsId,
});
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.startDataFrameAnalytics',
{
analyticsId,
}
);
return response.ok({
body: results,
});
@ -465,13 +471,13 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
const options: { analyticsId: string; force?: boolean | undefined } = {
analyticsId: request.params.analyticsId,
};
// @ts-ignore TODO: update types
// @ts-expect-error TODO: update types
if (request.url?.query?.force !== undefined) {
// @ts-ignore TODO: update types
// @ts-expect-error TODO: update types
options.force = request.url.query.force;
}
const results = await context.ml!.mlClient.callAsCurrentUser(
const results = await context.ml!.mlClient.callAsInternalUser(
'ml.stopDataFrameAnalytics',
options
);
@ -545,9 +551,7 @@ export function dataFrameAnalyticsRoutes({ router, mlLicense }: RouteInitializat
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { analyticsId } = request.params;
const { getAnalyticsAuditMessages } = analyticsAuditMessagesProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { getAnalyticsAuditMessages } = analyticsAuditMessagesProvider(context.ml!.mlClient);
const results = await getAnalyticsAuditMessages(analyticsId);
return response.ok({

View file

@ -27,7 +27,7 @@ function getOverallStats(
earliestMs: number,
latestMs: number
) {
const dv = new DataVisualizer(context.ml!.mlClient.callAsCurrentUser);
const dv = new DataVisualizer(context.ml!.mlClient);
return dv.getOverallStats(
indexPatternTitle,
query,
@ -52,7 +52,7 @@ function getStatsForFields(
interval: number,
maxExamples: number
) {
const dv = new DataVisualizer(context.ml!.mlClient.callAsCurrentUser);
const dv = new DataVisualizer(context.ml!.mlClient);
return dv.getStatsForFields(
indexPatternTitle,
query,
@ -73,7 +73,7 @@ function getHistogramsForFields(
fields: HistogramField[],
samplerShardSize: number
) {
const dv = new DataVisualizer(context.ml!.mlClient.callAsCurrentUser);
const dv = new DataVisualizer(context.ml!.mlClient);
return dv.getHistogramsForFields(indexPatternTitle, query, fields, samplerShardSize);
}

View file

@ -12,6 +12,7 @@ import {
datafeedIdSchema,
deleteDatafeedQuerySchema,
} from './schemas/datafeeds_schema';
import { getAuthorizationHeader } from '../lib/request_authorization';
/**
* Routes for datafeed service
@ -34,7 +35,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.datafeeds');
const resp = await context.ml!.mlClient.callAsInternalUser('ml.datafeeds');
return response.ok({
body: resp,
@ -67,7 +68,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.datafeeds', { datafeedId });
const resp = await context.ml!.mlClient.callAsInternalUser('ml.datafeeds', { datafeedId });
return response.ok({
body: resp,
@ -95,7 +96,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.datafeedStats');
const resp = await context.ml!.mlClient.callAsInternalUser('ml.datafeedStats');
return response.ok({
body: resp,
@ -128,7 +129,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.datafeedStats', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.datafeedStats', {
datafeedId,
});
@ -165,9 +166,10 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.addDatafeed', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.addDatafeed', {
datafeedId,
body: request.body,
...getAuthorizationHeader(request),
});
return response.ok({
@ -203,9 +205,10 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.updateDatafeed', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.updateDatafeed', {
datafeedId,
body: request.body,
...getAuthorizationHeader(request),
});
return response.ok({
@ -248,7 +251,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
options.force = force;
}
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.deleteDatafeed', options);
const resp = await context.ml!.mlClient.callAsInternalUser('ml.deleteDatafeed', options);
return response.ok({
body: resp,
@ -285,7 +288,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
const datafeedId = request.params.datafeedId;
const { start, end } = request.body;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.startDatafeed', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.startDatafeed', {
datafeedId,
start,
end,
@ -323,7 +326,7 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.stopDatafeed', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.stopDatafeed', {
datafeedId,
});
@ -358,8 +361,9 @@ export function dataFeedRoutes({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const datafeedId = request.params.datafeedId;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.datafeedPreview', {
const resp = await context.ml!.mlClient.callAsInternalUser('ml.datafeedPreview', {
datafeedId,
...getAuthorizationHeader(request),
});
return response.ok({

View file

@ -14,13 +14,13 @@ import {
import { fieldsServiceProvider } from '../models/fields_service';
function getCardinalityOfFields(context: RequestHandlerContext, payload: any) {
const fs = fieldsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const fs = fieldsServiceProvider(context.ml!.mlClient);
const { index, fieldNames, query, timeFieldName, earliestMs, latestMs } = payload;
return fs.getCardinalityOfFields(index, fieldNames, query, timeFieldName, earliestMs, latestMs);
}
function getTimeFieldRange(context: RequestHandlerContext, payload: any) {
const fs = fieldsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const fs = fieldsServiceProvider(context.ml!.mlClient);
const { index, timeFieldName, query } = payload;
return fs.getTimeFieldRange(index, timeFieldName, query);
}

View file

@ -29,7 +29,7 @@ import {
} from './schemas/file_data_visualizer_schema';
function analyzeFiles(context: RequestHandlerContext, data: InputData, overrides: InputOverrides) {
const { analyzeFile } = fileDataVisualizerProvider(context.ml!.mlClient.callAsCurrentUser);
const { analyzeFile } = fileDataVisualizerProvider(context.ml!.mlClient);
return analyzeFile(data, overrides);
}
@ -42,7 +42,7 @@ function importData(
ingestPipeline: IngestPipelineWrapper,
data: InputData
) {
const { importData: importDataFunc } = importDataProvider(context.ml!.mlClient.callAsCurrentUser);
const { importData: importDataFunc } = importDataProvider(context.ml!.mlClient);
return importDataFunc(id, index, settings, mappings, ingestPipeline, data);
}

View file

@ -13,32 +13,32 @@ import { FilterManager, FormFilter } from '../models/filter';
// TODO - add function for returning a list of just the filter IDs.
// TODO - add function for returning a list of filter IDs plus item count.
function getAllFilters(context: RequestHandlerContext) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.getAllFilters();
}
function getAllFilterStats(context: RequestHandlerContext) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.getAllFilterStats();
}
function getFilter(context: RequestHandlerContext, filterId: string) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.getFilter(filterId);
}
function newFilter(context: RequestHandlerContext, filter: FormFilter) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.newFilter(filter);
}
function updateFilter(context: RequestHandlerContext, filterId: string, filter: FormFilter) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.updateFilter(filterId, filter);
}
function deleteFilter(context: RequestHandlerContext, filterId: string) {
const mgr = new FilterManager(context.ml!.mlClient.callAsCurrentUser);
const mgr = new FilterManager(context.ml!.mlClient);
return mgr.deleteFilter(filterId);
}

View file

@ -39,9 +39,7 @@ export function jobAuditMessagesRoutes({ router, mlLicense }: RouteInitializatio
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getJobAuditMessages } = jobAuditMessagesProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { getJobAuditMessages } = jobAuditMessagesProvider(context.ml!.mlClient);
const { jobId } = request.params;
const { from } = request.query;
const resp = await getJobAuditMessages(jobId, from);
@ -76,9 +74,7 @@ export function jobAuditMessagesRoutes({ router, mlLicense }: RouteInitializatio
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getJobAuditMessages } = jobAuditMessagesProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { getJobAuditMessages } = jobAuditMessagesProvider(context.ml!.mlClient);
const { from } = request.query;
const resp = await getJobAuditMessages(undefined, from);

View file

@ -50,7 +50,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { forceStartDatafeeds } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { forceStartDatafeeds } = jobServiceProvider(context.ml!.mlClient);
const { datafeedIds, start, end } = request.body;
const resp = await forceStartDatafeeds(datafeedIds, start, end);
@ -84,7 +84,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { stopDatafeeds } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { stopDatafeeds } = jobServiceProvider(context.ml!.mlClient);
const { datafeedIds } = request.body;
const resp = await stopDatafeeds(datafeedIds);
@ -118,7 +118,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { deleteJobs } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { deleteJobs } = jobServiceProvider(context.ml!.mlClient);
const { jobIds } = request.body;
const resp = await deleteJobs(jobIds);
@ -152,7 +152,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { closeJobs } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { closeJobs } = jobServiceProvider(context.ml!.mlClient);
const { jobIds } = request.body;
const resp = await closeJobs(jobIds);
@ -186,7 +186,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { forceStopAndCloseJob } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { forceStopAndCloseJob } = jobServiceProvider(context.ml!.mlClient);
const { jobId } = request.body;
const resp = await forceStopAndCloseJob(jobId);
@ -225,7 +225,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobsSummary } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { jobsSummary } = jobServiceProvider(context.ml!.mlClient);
const { jobIds } = request.body;
const resp = await jobsSummary(jobIds);
@ -259,7 +259,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobsWithTimerange } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { jobsWithTimerange } = jobServiceProvider(context.ml!.mlClient);
const resp = await jobsWithTimerange();
return response.ok({
@ -292,7 +292,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { createFullJobsList } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { createFullJobsList } = jobServiceProvider(context.ml!.mlClient);
const { jobIds } = request.body;
const resp = await createFullJobsList(jobIds);
@ -322,7 +322,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getAllGroups } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { getAllGroups } = jobServiceProvider(context.ml!.mlClient);
const resp = await getAllGroups();
return response.ok({
@ -355,7 +355,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { updateGroups } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { updateGroups } = jobServiceProvider(context.ml!.mlClient);
const { jobs } = request.body;
const resp = await updateGroups(jobs);
@ -385,7 +385,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { deletingJobTasks } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { deletingJobTasks } = jobServiceProvider(context.ml!.mlClient);
const resp = await deletingJobTasks();
return response.ok({
@ -418,7 +418,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { jobsExist } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { jobsExist } = jobServiceProvider(context.ml!.mlClient);
const { jobIds } = request.body;
const resp = await jobsExist(jobIds);
@ -454,7 +454,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
const { indexPattern } = request.params;
const isRollup = request.query.rollup === 'true';
const savedObjectsClient = context.core.savedObjects.client;
const { newJobCaps } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { newJobCaps } = jobServiceProvider(context.ml!.mlClient);
const resp = await newJobCaps(indexPattern, isRollup, savedObjectsClient);
return response.ok({
@ -499,7 +499,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
splitFieldValue,
} = request.body;
const { newJobLineChart } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { newJobLineChart } = jobServiceProvider(context.ml!.mlClient);
const resp = await newJobLineChart(
indexPatternTitle,
timeField,
@ -553,9 +553,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
splitFieldName,
} = request.body;
const { newJobPopulationChart } = jobServiceProvider(
context.ml!.mlClient.callAsCurrentUser
);
const { newJobPopulationChart } = jobServiceProvider(context.ml!.mlClient);
const resp = await newJobPopulationChart(
indexPatternTitle,
timeField,
@ -593,7 +591,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getAllJobAndGroupIds } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { getAllJobAndGroupIds } = jobServiceProvider(context.ml!.mlClient);
const resp = await getAllJobAndGroupIds();
return response.ok({
@ -626,7 +624,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { getLookBackProgress } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { getLookBackProgress } = jobServiceProvider(context.ml!.mlClient);
const { jobId, start, end } = request.body;
const resp = await getLookBackProgress(jobId, start, end);
@ -660,10 +658,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { validateCategoryExamples } = categorizationExamplesProvider(
context.ml!.mlClient.callAsCurrentUser,
context.ml!.mlClient.callAsInternalUser
);
const { validateCategoryExamples } = categorizationExamplesProvider(context.ml!.mlClient);
const {
indexPatternTitle,
timeField,
@ -716,7 +711,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { topCategories } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { topCategories } = jobServiceProvider(context.ml!.mlClient);
const { jobId, count } = request.body;
const resp = await topCategories(jobId, count);
@ -750,7 +745,7 @@ export function jobServiceRoutes({ router, mlLicense }: RouteInitialization) {
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { revertModelSnapshot } = jobServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const { revertModelSnapshot } = jobServiceProvider(context.ml!.mlClient);
const {
jobId,
snapshotId,

View file

@ -32,7 +32,7 @@ export function jobValidationRoutes({ router, mlLicense }: RouteInitialization,
) {
const { analysisConfig, indexPattern, query, timeFieldName, earliestMs, latestMs } = payload;
return calculateModelMemoryLimitProvider(context.ml!.mlClient.callAsCurrentUser)(
return calculateModelMemoryLimitProvider(context.ml!.mlClient)(
analysisConfig as AnalysisConfig,
indexPattern,
query,
@ -64,11 +64,7 @@ export function jobValidationRoutes({ router, mlLicense }: RouteInitialization,
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
let errorResp;
const resp = await estimateBucketSpanFactory(
context.ml!.mlClient.callAsCurrentUser,
context.ml!.mlClient.callAsInternalUser,
mlLicense.isSecurityEnabled() === false
)(request.body)
const resp = await estimateBucketSpanFactory(context.ml!.mlClient)(request.body)
// this catch gets triggered when the estimation code runs without error
// but isn't able to come up with a bucket span estimation.
// this doesn't return a HTTP error but an object with an error message
@ -147,10 +143,7 @@ export function jobValidationRoutes({ router, mlLicense }: RouteInitialization,
},
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const resp = await validateCardinality(
context.ml!.mlClient.callAsCurrentUser,
request.body
);
const resp = await validateCardinality(context.ml!.mlClient, request.body);
return response.ok({
body: resp,
@ -184,10 +177,9 @@ export function jobValidationRoutes({ router, mlLicense }: RouteInitialization,
try {
// version corresponds to the version used in documentation links.
const resp = await validateJob(
context.ml!.mlClient.callAsCurrentUser,
context.ml!.mlClient,
request.body,
version,
context.ml!.mlClient.callAsInternalUser,
mlLicense.isSecurityEnabled() === false
);

View file

@ -6,7 +6,7 @@
import { TypeOf } from '@kbn/config-schema';
import { RequestHandlerContext } from 'kibana/server';
import { RequestHandlerContext, KibanaRequest } from 'kibana/server';
import { DatafeedOverride, JobOverride } from '../../common/types/modules';
import { wrapError } from '../client/error_wrapper';
import { DataRecognizer } from '../models/data_recognizer';
@ -18,19 +18,17 @@ import {
} from './schemas/modules';
import { RouteInitialization } from '../types';
function recognize(context: RequestHandlerContext, indexPatternTitle: string) {
const dr = new DataRecognizer(
context.ml!.mlClient.callAsCurrentUser,
context.core.savedObjects.client
);
function recognize(
context: RequestHandlerContext,
request: KibanaRequest,
indexPatternTitle: string
) {
const dr = new DataRecognizer(context.ml!.mlClient, context.core.savedObjects.client, request);
return dr.findMatches(indexPatternTitle);
}
function getModule(context: RequestHandlerContext, moduleId: string) {
const dr = new DataRecognizer(
context.ml!.mlClient.callAsCurrentUser,
context.core.savedObjects.client
);
function getModule(context: RequestHandlerContext, request: KibanaRequest, moduleId: string) {
const dr = new DataRecognizer(context.ml!.mlClient, context.core.savedObjects.client, request);
if (moduleId === undefined) {
return dr.listModules();
} else {
@ -40,6 +38,7 @@ function getModule(context: RequestHandlerContext, moduleId: string) {
function setup(
context: RequestHandlerContext,
request: KibanaRequest,
moduleId: string,
prefix?: string,
groups?: string[],
@ -53,10 +52,7 @@ function setup(
datafeedOverrides?: DatafeedOverride | DatafeedOverride[],
estimateModelMemory?: boolean
) {
const dr = new DataRecognizer(
context.ml!.mlClient.callAsCurrentUser,
context.core.savedObjects.client
);
const dr = new DataRecognizer(context.ml!.mlClient, context.core.savedObjects.client, request);
return dr.setup(
moduleId,
prefix,
@ -73,11 +69,12 @@ function setup(
);
}
function dataRecognizerJobsExist(context: RequestHandlerContext, moduleId: string) {
const dr = new DataRecognizer(
context.ml!.mlClient.callAsCurrentUser,
context.core.savedObjects.client
);
function dataRecognizerJobsExist(
context: RequestHandlerContext,
request: KibanaRequest,
moduleId: string
) {
const dr = new DataRecognizer(context.ml!.mlClient, context.core.savedObjects.client, request);
return dr.dataRecognizerJobsExist(moduleId);
}
@ -125,7 +122,7 @@ export function dataRecognizer({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { indexPatternTitle } = request.params;
const results = await recognize(context, indexPatternTitle);
const results = await recognize(context, request, indexPatternTitle);
return response.ok({ body: results });
} catch (e) {
@ -260,7 +257,7 @@ export function dataRecognizer({ router, mlLicense }: RouteInitialization) {
// the moduleId will be an empty string.
moduleId = undefined;
}
const results = await getModule(context, moduleId);
const results = await getModule(context, request, moduleId);
return response.ok({ body: results });
} catch (e) {
@ -440,6 +437,7 @@ export function dataRecognizer({ router, mlLicense }: RouteInitialization) {
const result = await setup(
context,
request,
moduleId,
prefix,
groups,
@ -526,7 +524,7 @@ export function dataRecognizer({ router, mlLicense }: RouteInitialization) {
mlLicense.fullLicenseAPIGuard(async (context, request, response) => {
try {
const { moduleId } = request.params;
const result = await dataRecognizerJobsExist(context, moduleId);
const result = await dataRecognizerJobsExist(context, request, moduleId);
return response.ok({ body: result });
} catch (e) {

View file

@ -17,7 +17,7 @@ import {
import { resultsServiceProvider } from '../models/results_service';
function getAnomaliesTableData(context: RequestHandlerContext, payload: any) {
const rs = resultsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const rs = resultsServiceProvider(context.ml!.mlClient);
const {
jobIds,
criteriaFields,
@ -47,24 +47,24 @@ function getAnomaliesTableData(context: RequestHandlerContext, payload: any) {
}
function getCategoryDefinition(context: RequestHandlerContext, payload: any) {
const rs = resultsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const rs = resultsServiceProvider(context.ml!.mlClient);
return rs.getCategoryDefinition(payload.jobId, payload.categoryId);
}
function getCategoryExamples(context: RequestHandlerContext, payload: any) {
const rs = resultsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const rs = resultsServiceProvider(context.ml!.mlClient);
const { jobId, categoryIds, maxExamples } = payload;
return rs.getCategoryExamples(jobId, categoryIds, maxExamples);
}
function getMaxAnomalyScore(context: RequestHandlerContext, payload: any) {
const rs = resultsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const rs = resultsServiceProvider(context.ml!.mlClient);
const { jobIds, earliestMs, latestMs } = payload;
return rs.getMaxAnomalyScore(jobIds, earliestMs, latestMs);
}
function getPartitionFieldsValues(context: RequestHandlerContext, payload: any) {
const rs = resultsServiceProvider(context.ml!.mlClient.callAsCurrentUser);
const rs = resultsServiceProvider(context.ml!.mlClient);
const { jobId, searchTerm, criteriaFields, earliestMs, latestMs } = payload;
return rs.getPartitionFieldsValues(jobId, searchTerm, criteriaFields, earliestMs, latestMs);
}

View file

@ -60,9 +60,10 @@ export function systemRoutes(
},
mlLicense.basicLicenseAPIGuard(async (context, request, response) => {
try {
const { callAsCurrentUser, callAsInternalUser } = context.ml!.mlClient;
let upgradeInProgress = false;
try {
const info = await context.ml!.mlClient.callAsCurrentUser('ml.info');
const info = await callAsInternalUser('ml.info');
// if ml indices are currently being migrated, upgrade_mode will be set to true
// pass this back with the privileges to allow for the disabling of UI controls.
upgradeInProgress = info.upgrade_mode === true;
@ -90,7 +91,7 @@ export function systemRoutes(
});
} else {
const body = request.body;
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.privilegeCheck', { body });
const resp = await callAsCurrentUser('ml.privilegeCheck', { body });
resp.upgradeInProgress = upgradeInProgress;
return response.ok({
body: resp,
@ -128,7 +129,7 @@ export function systemRoutes(
}
const { getCapabilities } = capabilitiesProvider(
context.ml!.mlClient.callAsCurrentUser,
context.ml!.mlClient,
mlCapabilities,
mlLicense,
isMlEnabledInSpace
@ -154,43 +155,15 @@ export function systemRoutes(
path: '/api/ml/ml_node_count',
validate: false,
options: {
tags: ['access:ml:canGetJobs'],
tags: ['access:ml:canGetJobs', 'access:ml:canGetDatafeeds'],
},
},
mlLicense.basicLicenseAPIGuard(async (context, request, response) => {
try {
// check for basic license first for consistency with other
// security disabled checks
if (mlLicense.isSecurityEnabled() === false) {
return response.ok({
body: await getNodeCount(context),
});
} else {
// if security is enabled, check that the user has permission to
// view jobs before calling getNodeCount.
// getNodeCount calls the _nodes endpoint as the internal user
// and so could give the user access to more information than
// they are entitled to.
const requiredPrivileges = [
'cluster:monitor/xpack/ml/job/get',
'cluster:monitor/xpack/ml/job/stats/get',
'cluster:monitor/xpack/ml/datafeeds/get',
'cluster:monitor/xpack/ml/datafeeds/stats/get',
];
const body = { cluster: requiredPrivileges };
const resp = await context.ml!.mlClient.callAsCurrentUser('ml.privilegeCheck', { body });
if (resp.has_all_requested) {
return response.ok({
body: await getNodeCount(context),
});
} else {
// if the user doesn't have permission to create jobs
// return a 403
return response.forbidden();
}
}
return response.ok({
body: await getNodeCount(context),
});
} catch (e) {
return response.customError(wrapError(e));
}
@ -214,7 +187,7 @@ export function systemRoutes(
},
mlLicense.basicLicenseAPIGuard(async (context, request, response) => {
try {
const info = await context.ml!.mlClient.callAsCurrentUser('ml.info');
const info = await context.ml!.mlClient.callAsInternalUser('ml.info');
const cloudId = cloud && cloud.cloudId;
return response.ok({
body: { ...info, cloudId },

View file

@ -4,13 +4,13 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest } from 'kibana/server';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { Job } from '../../../common/types/anomaly_detection_jobs';
import { SharedServicesChecks } from '../shared_services';
export interface AnomalyDetectorsProvider {
anomalyDetectorsProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest
): {
jobs(jobId?: string): Promise<{ count: number; jobs: Job[] }>;
@ -22,13 +22,16 @@ export function getAnomalyDetectorsProvider({
getHasMlCapabilities,
}: SharedServicesChecks): AnomalyDetectorsProvider {
return {
anomalyDetectorsProvider(callAsCurrentUser: LegacyAPICaller, request: KibanaRequest) {
anomalyDetectorsProvider(mlClusterClient: ILegacyScopedClusterClient, request: KibanaRequest) {
const hasMlCapabilities = getHasMlCapabilities(request);
return {
async jobs(jobId?: string) {
isFullLicense();
await hasMlCapabilities(['canGetJobs']);
return callAsCurrentUser('ml.jobs', jobId !== undefined ? { jobId } : {});
return mlClusterClient.callAsInternalUser(
'ml.jobs',
jobId !== undefined ? { jobId } : {}
);
},
};
},

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest } from 'kibana/server';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { jobServiceProvider } from '../../models/job_service';
import { SharedServicesChecks } from '../shared_services';
@ -12,7 +12,7 @@ type OrigJobServiceProvider = ReturnType<typeof jobServiceProvider>;
export interface JobServiceProvider {
jobServiceProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest
): {
jobsSummary: OrigJobServiceProvider['jobsSummary'];
@ -24,9 +24,9 @@ export function getJobServiceProvider({
getHasMlCapabilities,
}: SharedServicesChecks): JobServiceProvider {
return {
jobServiceProvider(callAsCurrentUser: LegacyAPICaller, request: KibanaRequest) {
jobServiceProvider(mlClusterClient: ILegacyScopedClusterClient, request: KibanaRequest) {
// const hasMlCapabilities = getHasMlCapabilities(request);
const { jobsSummary } = jobServiceProvider(callAsCurrentUser);
const { jobsSummary } = jobServiceProvider(mlClusterClient);
return {
async jobsSummary(...args) {
isFullLicense();

View file

@ -4,7 +4,11 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest, SavedObjectsClientContract } from 'kibana/server';
import {
ILegacyScopedClusterClient,
KibanaRequest,
SavedObjectsClientContract,
} from 'kibana/server';
import { TypeOf } from '@kbn/config-schema';
import { DataRecognizer } from '../../models/data_recognizer';
import { SharedServicesChecks } from '../shared_services';
@ -15,7 +19,7 @@ export type ModuleSetupPayload = TypeOf<typeof moduleIdParamSchema> &
export interface ModulesProvider {
modulesProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest,
savedObjectsClient: SavedObjectsClientContract
): {
@ -32,12 +36,12 @@ export function getModulesProvider({
}: SharedServicesChecks): ModulesProvider {
return {
modulesProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest,
savedObjectsClient: SavedObjectsClientContract
) {
const hasMlCapabilities = getHasMlCapabilities(request);
const dr = dataRecognizerFactory(callAsCurrentUser, savedObjectsClient);
const dr = dataRecognizerFactory(mlClusterClient, savedObjectsClient, request);
return {
async recognize(...args) {
isFullLicense();
@ -82,8 +86,9 @@ export function getModulesProvider({
}
function dataRecognizerFactory(
callAsCurrentUser: LegacyAPICaller,
savedObjectsClient: SavedObjectsClientContract
mlClusterClient: ILegacyScopedClusterClient,
savedObjectsClient: SavedObjectsClientContract,
request: KibanaRequest
) {
return new DataRecognizer(callAsCurrentUser, savedObjectsClient);
return new DataRecognizer(mlClusterClient, savedObjectsClient, request);
}

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest } from 'kibana/server';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { resultsServiceProvider } from '../../models/results_service';
import { SharedServicesChecks } from '../shared_services';
@ -12,7 +12,7 @@ type OrigResultsServiceProvider = ReturnType<typeof resultsServiceProvider>;
export interface ResultsServiceProvider {
resultsServiceProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest
): {
getAnomaliesTableData: OrigResultsServiceProvider['getAnomaliesTableData'];
@ -24,9 +24,9 @@ export function getResultsServiceProvider({
getHasMlCapabilities,
}: SharedServicesChecks): ResultsServiceProvider {
return {
resultsServiceProvider(callAsCurrentUser: LegacyAPICaller, request: KibanaRequest) {
resultsServiceProvider(mlClusterClient: ILegacyScopedClusterClient, request: KibanaRequest) {
const hasMlCapabilities = getHasMlCapabilities(request);
const { getAnomaliesTableData } = resultsServiceProvider(callAsCurrentUser);
const { getAnomaliesTableData } = resultsServiceProvider(mlClusterClient);
return {
async getAnomaliesTableData(...args) {
isFullLicense();

View file

@ -4,7 +4,7 @@
* you may not use this file except in compliance with the Elastic License.
*/
import { LegacyAPICaller, KibanaRequest } from 'kibana/server';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { SearchResponse, SearchParams } from 'elasticsearch';
import { MlServerLicense } from '../../lib/license';
import { CloudSetup } from '../../../../cloud/server';
@ -18,7 +18,7 @@ import { SharedServicesChecks } from '../shared_services';
export interface MlSystemProvider {
mlSystemProvider(
callAsCurrentUser: LegacyAPICaller,
mlClusterClient: ILegacyScopedClusterClient,
request: KibanaRequest
): {
mlCapabilities(): Promise<MlCapabilitiesResponse>;
@ -35,8 +35,9 @@ export function getMlSystemProvider(
resolveMlCapabilities: ResolveMlCapabilities
): MlSystemProvider {
return {
mlSystemProvider(callAsCurrentUser: LegacyAPICaller, request: KibanaRequest) {
mlSystemProvider(mlClusterClient: ILegacyScopedClusterClient, request: KibanaRequest) {
// const hasMlCapabilities = getHasMlCapabilities(request);
const { callAsCurrentUser, callAsInternalUser } = mlClusterClient;
return {
async mlCapabilities() {
isMinimumLicense();
@ -52,7 +53,7 @@ export function getMlSystemProvider(
}
const { getCapabilities } = capabilitiesProvider(
callAsCurrentUser,
mlClusterClient,
mlCapabilities,
mlLicense,
isMlEnabledInSpace
@ -62,7 +63,7 @@ export function getMlSystemProvider(
async mlInfo(): Promise<MlInfoResponse> {
isMinimumLicense();
const info = await callAsCurrentUser('ml.info');
const info = await callAsInternalUser('ml.info');
const cloudId = cloud && cloud.cloudId;
return {
...info,

View file

@ -6,13 +6,13 @@
import dateMath from '@elastic/datemath';
import { LegacyAPICaller, KibanaRequest } from '../../../../../../../src/core/server';
import { ILegacyScopedClusterClient, KibanaRequest } from '../../../../../../../src/core/server';
import { MlPluginSetup } from '../../../../../ml/server';
import { getAnomalies } from '../../machine_learning';
export const findMlSignals = async ({
ml,
callCluster,
clusterClient,
request,
jobId,
anomalyThreshold,
@ -20,14 +20,14 @@ export const findMlSignals = async ({
to,
}: {
ml: MlPluginSetup;
callCluster: LegacyAPICaller;
clusterClient: ILegacyScopedClusterClient;
request: KibanaRequest;
jobId: string;
anomalyThreshold: number;
from: string;
to: string;
}) => {
const { mlAnomalySearch } = ml.mlSystemProvider(callCluster, request);
const { mlAnomalySearch } = ml.mlSystemProvider(clusterClient, request);
const params = {
jobIds: [jobId],
threshold: anomalyThreshold,

View file

@ -163,11 +163,11 @@ export const signalRulesAlertType = ({
);
}
const scopedMlCallCluster = services.getScopedCallCluster(ml.mlClient);
const scopedClusterClient = services.getLegacyScopedClusterClient(ml.mlClient);
// Using fake KibanaRequest as it is needed to satisfy the ML Services API, but can be empty as it is
// currently unused by the jobsSummary function.
const summaryJobs = await (
await ml.jobServiceProvider(scopedMlCallCluster, ({} as unknown) as KibanaRequest)
await ml.jobServiceProvider(scopedClusterClient, ({} as unknown) as KibanaRequest)
).jobsSummary([machineLearningJobId]);
const jobSummary = summaryJobs.find((job) => job.id === machineLearningJobId);
@ -185,7 +185,7 @@ export const signalRulesAlertType = ({
const anomalyResults = await findMlSignals({
ml,
callCluster: scopedMlCallCluster,
clusterClient: scopedClusterClient,
// Using fake KibanaRequest as it is needed to satisfy the ML Services API, but can be empty as it is
// currently unused by the mlAnomalySearch function.
request: ({} as unknown) as KibanaRequest,

View file

@ -114,7 +114,7 @@ export const isMlAdmin = async ({
request: KibanaRequest;
ml: MlPluginSetup;
}): Promise<boolean> => {
const scopedMlClient = ml.mlClient.asScoped(request).callAsCurrentUser;
const scopedMlClient = ml.mlClient.asScoped(request);
const mlCapabilities = await ml.mlSystemProvider(scopedMlClient, request).mlCapabilities();
return hasMlAdminPermissions(mlCapabilities);
};

View file

@ -5,6 +5,7 @@
*/
import { SearchParams } from 'elasticsearch';
import { ILegacyScopedClusterClient, KibanaRequest } from 'kibana/server';
import { LegacyAPICaller, SavedObjectsClient } from '../../../../../../src/core/server';
// eslint-disable-next-line @kbn/eslint/no-restricted-paths
@ -163,15 +164,25 @@ export const getRulesUsage = async (
export const getMlJobsUsage = async (ml: MlPluginSetup | undefined): Promise<MlJobsUsage> => {
let jobsUsage: MlJobsUsage = initialMlJobsUsage;
// Fake objects to be passed to ML functions.
// TODO - These ML functions should come from ML's setup contract
// and not be imported directly.
const fakeScopedClusterClient = {
callAsCurrentUser: ml?.mlClient.callAsInternalUser,
callAsInternalUser: ml?.mlClient.callAsInternalUser,
} as ILegacyScopedClusterClient;
const fakeSavedObjectsClient = {} as SavedObjectsClient;
const fakeRequest = {} as KibanaRequest;
if (ml) {
try {
const mlCaller = ml.mlClient.callAsInternalUser;
const modules = await new DataRecognizer(
mlCaller,
({} as unknown) as SavedObjectsClient
fakeScopedClusterClient,
fakeSavedObjectsClient,
fakeRequest
).listModules();
const moduleJobs = modules.flatMap((module) => module.jobs);
const jobs = await jobServiceProvider(mlCaller).jobsSummary(['siem']);
const jobs = await jobServiceProvider(fakeScopedClusterClient).jobsSummary(['siem']);
jobsUsage = jobs.reduce((usage, job) => {
const isElastic = moduleJobs.some((moduleJob) => moduleJob.id === job.id);

View file

@ -34,7 +34,7 @@ export function registerFieldHistogramsRoutes({ router, license }: RouteDependen
try {
const resp = await getHistogramsForFields(
ctx.transform!.dataClient.callAsCurrentUser,
ctx.transform!.dataClient,
indexPatternTitle,
query,
fields,

View file

@ -141,11 +141,11 @@ export function defineActionTypes(
callClusterError = e;
}
// Call scoped cluster
const callScopedCluster = services.getScopedCallCluster(clusterClient);
const scopedClusterClient = services.getLegacyScopedClusterClient(clusterClient);
let callScopedClusterSuccess = false;
let callScopedClusterError;
try {
await callScopedCluster('index', {
await scopedClusterClient.callAsCurrentUser('index', {
index: params.callClusterAuthorizationIndex,
refresh: 'wait_for',
body: {

View file

@ -191,11 +191,11 @@ export function defineAlertTypes(
callClusterError = e;
}
// Call scoped cluster
const callScopedCluster = services.getScopedCallCluster(clusterClient);
const scopedClusterClient = services.getLegacyScopedClusterClient(clusterClient);
let callScopedClusterSuccess = false;
let callScopedClusterError;
try {
await callScopedCluster('index', {
await scopedClusterClient.callAsCurrentUser('index', {
index: params.callClusterAuthorizationIndex,
refresh: 'wait_for',
body: {

View file

@ -35,6 +35,7 @@ export default function ({ getService }: FtrProviderContext) {
logs: ['all', 'read'],
uptime: ['all', 'read'],
apm: ['all', 'read'],
ml: ['all', 'read'],
siem: ['all', 'read'],
ingestManager: ['all', 'read'],
},

View file

@ -33,6 +33,7 @@ export default function ({ getService }: FtrProviderContext) {
logs: ['all', 'read'],
uptime: ['all', 'read'],
apm: ['all', 'read'],
ml: ['all', 'read'],
siem: ['all', 'read'],
ingestManager: ['all', 'read'],
},