kibana/x-pack/plugins/ml/server/routes/inference.ts
Dima Arnautov 8f7d213944
[ML] Inference models management (#74978)
* [ML] init tabs

* [ML] init inference API service in UI

* [ML] server-side routes

* [ML] basic table

* [ML] support deletion

* [ML] delete multiple models

* [ML] WIP expanded row

* [ML] fix types

* [ML] expanded row

* [ML] fix types

* [ML] fix i18n id

* [ML] change server-side permission check

* [ML] refactor types

* [ML] show success toast on model deletion, fix models counter

* [ML] update expanded row

* [ML] pipelines stats

* [ML] use refresh observable

* [ML] endpoint to fetch associated pipelines

* [ML] update the endpoint to fetch associated pipelines

* [ML] show pipelines definition in expanded row

* [ML] change stats layout

* [ML] fix headers

* [ML] change breadcrumb title

* [ML] fetch models config with pipelines

* [ML] change default size to 1000

* [ML] fix collections keys, fix double fetch on initial page load

* [ML] adjust models deletion text

* [ML] fix DFA jobs on the management page

* [ML] small tabs in expanded row

* [ML] fix headers text

* [ML] fix models fetching without pipelines get permissions

* [ML] stats rendering as a description list

* [ML] fix i18n id

* [ML] remove an extra copyright comment, add selectable messages

* [ML] update stats on refresh
2020-08-19 16:22:26 +02:00

163 lines
4.9 KiB
TypeScript

/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License;
* you may not use this file except in compliance with the Elastic License.
*/
import { RouteInitialization } from '../types';
import { wrapError } from '../client/error_wrapper';
import {
getInferenceQuerySchema,
modelIdSchema,
optionalModelIdSchema,
} from './schemas/inference_schema';
import { modelsProvider } from '../models/data_frame_analytics';
import { InferenceConfigResponse } from '../../common/types/inference';
export function inferenceRoutes({ router, mlLicense }: RouteInitialization) {
/**
* @apiGroup Inference
*
* @api {get} /api/ml/inference/:modelId Get info of a trained inference model
* @apiName GetInferenceModel
* @apiDescription Retrieves configuration information for a trained inference model.
*/
router.get(
{
path: '/api/ml/inference/{modelId?}',
validate: {
params: optionalModelIdSchema,
query: getInferenceQuerySchema,
},
options: {
tags: ['access:ml:canGetDataFrameAnalytics'],
},
},
mlLicense.fullLicenseAPIGuard(async ({ client, request, response }) => {
try {
const { modelId } = request.params;
const { with_pipelines: withPipelines, ...query } = request.query;
const { body } = await client.asInternalUser.ml.getTrainedModels<InferenceConfigResponse>({
size: 1000,
...query,
...(modelId ? { model_id: modelId } : {}),
});
const result = body.trained_model_configs;
try {
if (withPipelines) {
const pipelinesResponse = await modelsProvider(client).getModelsPipelines(
result.map(({ model_id: id }: { model_id: string }) => id)
);
for (const model of result) {
model.pipelines = pipelinesResponse.get(model.model_id)!;
}
}
} catch (e) {
// the user might not have required permissions to fetch pipelines
// eslint-disable-next-line no-console
console.log(e);
}
return response.ok({
body: result,
});
} catch (e) {
return response.customError(wrapError(e));
}
})
);
/**
* @apiGroup Inference
*
* @api {get} /api/ml/inference/:modelId/_stats Get stats of a trained inference model
* @apiName GetInferenceModelStats
* @apiDescription Retrieves usage information for trained inference models.
*/
router.get(
{
path: '/api/ml/inference/{modelId}/_stats',
validate: {
params: modelIdSchema,
},
options: {
tags: ['access:ml:canGetDataFrameAnalytics'],
},
},
mlLicense.fullLicenseAPIGuard(async ({ client, request, response }) => {
try {
const { modelId } = request.params;
const { body } = await client.asInternalUser.ml.getTrainedModelsStats({
...(modelId ? { model_id: modelId } : {}),
});
return response.ok({
body,
});
} catch (e) {
return response.customError(wrapError(e));
}
})
);
/**
* @apiGroup Inference
*
* @api {get} /api/ml/inference/:modelId/pipelines Get model pipelines
* @apiName GetModelPipelines
* @apiDescription Retrieves pipelines associated with a model
*/
router.get(
{
path: '/api/ml/inference/{modelId}/pipelines',
validate: {
params: modelIdSchema,
},
options: {
tags: ['access:ml:canGetDataFrameAnalytics'],
},
},
mlLicense.fullLicenseAPIGuard(async ({ client, request, response }) => {
try {
const { modelId } = request.params;
const result = await modelsProvider(client).getModelsPipelines(modelId.split(','));
return response.ok({
body: [...result].map(([id, pipelines]) => ({ model_id: id, pipelines })),
});
} catch (e) {
return response.customError(wrapError(e));
}
})
);
/**
* @apiGroup Inference
*
* @api {delete} /api/ml/inference/:modelId Get stats of a trained inference model
* @apiName DeleteInferenceModel
* @apiDescription Deletes an existing trained inference model that is currently not referenced by an ingest pipeline.
*/
router.delete(
{
path: '/api/ml/inference/{modelId}',
validate: {
params: modelIdSchema,
},
options: {
tags: ['access:ml:canDeleteDataFrameAnalytics'],
},
},
mlLicense.fullLicenseAPIGuard(async ({ client, request, response }) => {
try {
const { modelId } = request.params;
const { body } = await client.asInternalUser.ml.deleteTrainedModel({
model_id: modelId,
});
return response.ok({
body,
});
} catch (e) {
return response.customError(wrapError(e));
}
})
);
}