Remove / Consolidate legacy usage adoption metrics (#100480)

* Remove legacydetection rule stat summaries

* Remove ML usage summary and consolidate with ML metric telemetry.

* Remove ML usage summary and consolidate with ML metric telemetry.

* Move legacy helper constructs into index.

* Separate rule logic from ml logic. Add ml unit tests.

* Abstract types away into their own file.

* Update telemetry schema.

Co-authored-by: Kibana Machine <42973632+kibanamachine@users.noreply.github.com>
This commit is contained in:
Pete Hampton 2021-05-27 20:10:01 +01:00 committed by GitHub
parent 71379b755a
commit 3149d46301
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 870 additions and 1068 deletions

View file

@ -8,17 +8,11 @@
import { CoreSetup, SavedObjectsClientContract } from '../../../../../src/core/server';
import { CollectorFetchContext } from '../../../../../src/plugins/usage_collection/server';
import { CollectorDependencies } from './types';
import {
DetectionsUsage,
fetchDetectionsUsage,
defaultDetectionsUsage,
fetchDetectionsMetrics,
} from './detections';
import { fetchDetectionsMetrics } from './detections';
import { EndpointUsage, getEndpointTelemetryFromFleet } from './endpoints';
export type RegisterCollector = (deps: CollectorDependencies) => void;
export interface UsageData {
detections: DetectionsUsage;
endpoints: EndpointUsage | {};
detectionMetrics: {};
}
@ -40,55 +34,10 @@ export const registerCollector: RegisterCollector = ({
if (!usageCollection) {
return;
}
const collector = usageCollection.makeUsageCollector<UsageData>({
type: 'security_solution',
schema: {
detections: {
detection_rules: {
custom: {
enabled: {
type: 'long',
_meta: { description: 'The number of custom detection rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of custom detection rules disabled' },
},
},
elastic: {
enabled: {
type: 'long',
_meta: { description: 'The number of elastic prebuilt detection rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of elastic prebuilt detection rules disabled' },
},
},
},
ml_jobs: {
custom: {
enabled: {
type: 'long',
_meta: { description: 'The number of custom ML jobs rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of custom ML jobs rules disabled' },
},
},
elastic: {
enabled: {
type: 'long',
_meta: { description: 'The number of elastic provided ML jobs rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of elastic provided ML jobs rules disabled' },
},
},
},
},
detectionMetrics: {
detection_rules: {
detection_rule_usage: {
@ -248,172 +197,199 @@ export const registerCollector: RegisterCollector = ({
},
},
ml_jobs: {
type: 'array',
items: {
job_id: {
type: 'keyword',
_meta: { description: 'Identifier for the anomaly detection job' },
},
open_time: {
type: 'keyword',
_meta: {
description: 'For open jobs only, the elapsed time for which the job has been open',
ml_job_usage: {
custom: {
enabled: {
type: 'long',
_meta: { description: 'The number of custom ML jobs rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of custom ML jobs rules disabled' },
},
},
create_time: {
type: 'keyword',
_meta: { description: 'The time the job was created' },
},
finished_time: {
type: 'keyword',
_meta: {
description: 'If the job closed or failed, this is the time the job finished',
elastic: {
enabled: {
type: 'long',
_meta: { description: 'The number of elastic provided ML jobs rules enabled' },
},
disabled: {
type: 'long',
_meta: { description: 'The number of elastic provided ML jobs rules disabled' },
},
},
state: {
type: 'keyword',
_meta: { description: 'The status of the anomaly detection job' },
},
data_counts: {
bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets processed' },
},
ml_job_metrics: {
type: 'array',
items: {
job_id: {
type: 'keyword',
_meta: { description: 'Identifier for the anomaly detection job' },
},
empty_bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets which did not contain any data' },
},
input_bytes: {
type: 'long',
_meta: {
description:
'The number of bytes of input data posted to the anomaly detection job',
},
},
input_record_count: {
type: 'long',
_meta: {
description: 'The number of input documents posted to the anomaly detection job',
},
},
last_data_time: {
type: 'long',
_meta: {
description:
'The timestamp at which data was last analyzed, according to server time',
},
},
processed_record_count: {
type: 'long',
_meta: {
description:
'The number of input documents that have been processed by the anomaly detection job',
},
},
},
model_size_stats: {
bucket_allocation_failures_count: {
type: 'long',
_meta: {
description:
'The number of buckets for which new entities in incoming data were not processed due to insufficient model memory',
},
},
model_bytes: {
type: 'long',
_meta: { description: 'The number of bytes of memory used by the models' },
},
model_bytes_exceeded: {
type: 'long',
_meta: {
description:
'The number of bytes over the high limit for memory usage at the last allocation failure',
},
},
model_bytes_memory_limit: {
type: 'long',
_meta: {
description:
'The upper limit for model memory usage, checked on increasing values',
},
},
peak_model_bytes: {
type: 'long',
_meta: {
description: 'The peak number of bytes of memory ever used by the models',
},
},
},
timing_stats: {
bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets processed' },
},
exponential_average_bucket_processing_time_ms: {
type: 'long',
_meta: {
description:
'Exponential moving average of all bucket processing times, in milliseconds',
},
},
exponential_average_bucket_processing_time_per_hour_ms: {
type: 'long',
_meta: {
description:
'Exponentially-weighted moving average of bucket processing times calculated in a 1 hour time window, in milliseconds',
},
},
maximum_bucket_processing_time_ms: {
type: 'long',
_meta: {
description: 'Maximum among all bucket processing times, in milliseconds',
},
},
minimum_bucket_processing_time_ms: {
type: 'long',
_meta: {
description: 'Minimum among all bucket processing times, in milliseconds',
},
},
total_bucket_processing_time_ms: {
type: 'long',
_meta: { description: 'Sum of all bucket processing times, in milliseconds' },
},
},
datafeed: {
datafeed_id: {
open_time: {
type: 'keyword',
_meta: {
description: 'A numerical character string that uniquely identifies the datafeed',
description:
'For open jobs only, the elapsed time for which the job has been open',
},
},
create_time: {
type: 'keyword',
_meta: { description: 'The time the job was created' },
},
finished_time: {
type: 'keyword',
_meta: {
description: 'If the job closed or failed, this is the time the job finished',
},
},
state: {
type: 'keyword',
_meta: { description: 'The status of the datafeed' },
_meta: { description: 'The status of the anomaly detection job' },
},
timing_stats: {
average_search_time_per_bucket_ms: {
type: 'long',
_meta: { description: 'The average search time per bucket, in milliseconds' },
},
data_counts: {
bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets processed' },
},
exponential_average_search_time_per_hour_ms: {
empty_bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets which did not contain any data' },
},
input_bytes: {
type: 'long',
_meta: {
description: 'The exponential average search time per hour, in milliseconds',
description:
'The number of bytes of input data posted to the anomaly detection job',
},
},
search_count: {
type: 'long',
_meta: { description: 'The number of searches run by the datafeed' },
},
total_search_time_ms: {
input_record_count: {
type: 'long',
_meta: {
description: 'The total time the datafeed spent searching, in milliseconds',
description:
'The number of input documents posted to the anomaly detection job',
},
},
last_data_time: {
type: 'long',
_meta: {
description:
'The timestamp at which data was last analyzed, according to server time',
},
},
processed_record_count: {
type: 'long',
_meta: {
description:
'The number of input documents that have been processed by the anomaly detection job',
},
},
},
model_size_stats: {
bucket_allocation_failures_count: {
type: 'long',
_meta: {
description:
'The number of buckets for which new entities in incoming data were not processed due to insufficient model memory',
},
},
model_bytes: {
type: 'long',
_meta: { description: 'The number of bytes of memory used by the models' },
},
model_bytes_exceeded: {
type: 'long',
_meta: {
description:
'The number of bytes over the high limit for memory usage at the last allocation failure',
},
},
model_bytes_memory_limit: {
type: 'long',
_meta: {
description:
'The upper limit for model memory usage, checked on increasing values',
},
},
peak_model_bytes: {
type: 'long',
_meta: {
description: 'The peak number of bytes of memory ever used by the models',
},
},
},
timing_stats: {
bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets processed' },
},
exponential_average_bucket_processing_time_ms: {
type: 'long',
_meta: {
description:
'Exponential moving average of all bucket processing times, in milliseconds',
},
},
exponential_average_bucket_processing_time_per_hour_ms: {
type: 'long',
_meta: {
description:
'Exponentially-weighted moving average of bucket processing times calculated in a 1 hour time window, in milliseconds',
},
},
maximum_bucket_processing_time_ms: {
type: 'long',
_meta: {
description: 'Maximum among all bucket processing times, in milliseconds',
},
},
minimum_bucket_processing_time_ms: {
type: 'long',
_meta: {
description: 'Minimum among all bucket processing times, in milliseconds',
},
},
total_bucket_processing_time_ms: {
type: 'long',
_meta: { description: 'Sum of all bucket processing times, in milliseconds' },
},
},
datafeed: {
datafeed_id: {
type: 'keyword',
_meta: {
description:
'A numerical character string that uniquely identifies the datafeed',
},
},
state: {
type: 'keyword',
_meta: { description: 'The status of the datafeed' },
},
timing_stats: {
average_search_time_per_bucket_ms: {
type: 'long',
_meta: { description: 'The average search time per bucket, in milliseconds' },
},
bucket_count: {
type: 'long',
_meta: { description: 'The number of buckets processed' },
},
exponential_average_search_time_per_hour_ms: {
type: 'long',
_meta: {
description: 'The exponential average search time per hour, in milliseconds',
},
},
search_count: {
type: 'long',
_meta: { description: 'The number of searches run by the datafeed' },
},
total_search_time_ms: {
type: 'long',
_meta: {
description: 'The total time the datafeed spent searching, in milliseconds',
},
},
},
},
@ -476,14 +452,12 @@ export const registerCollector: RegisterCollector = ({
fetch: async ({ esClient }: CollectorFetchContext): Promise<UsageData> => {
const internalSavedObjectsClient = await getInternalSavedObjectsClient(core);
const savedObjectsClient = (internalSavedObjectsClient as unknown) as SavedObjectsClientContract;
const [detections, detectionMetrics, endpoints] = await Promise.allSettled([
fetchDetectionsUsage(kibanaIndex, esClient, ml, savedObjectsClient),
const [detectionMetrics, endpoints] = await Promise.allSettled([
fetchDetectionsMetrics(kibanaIndex, signalsIndex, esClient, ml, savedObjectsClient),
getEndpointTelemetryFromFleet(savedObjectsClient, endpointAppContext, esClient),
]);
return {
detections: detections.status === 'fulfilled' ? detections.value : defaultDetectionsUsage,
detectionMetrics: detectionMetrics.status === 'fulfilled' ? detectionMetrics.value : {},
endpoints: endpoints.status === 'fulfilled' ? endpoints.value : {},
};

View file

@ -0,0 +1,74 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { initialMlJobsUsage, updateMlJobsUsage } from './detection_ml_helpers';
describe('Security Machine Learning usage metrics', () => {
describe('Updates metrics with job information', () => {
it('Should update ML total for elastic rules', async () => {
const initialUsage = initialMlJobsUsage;
const isElastic = true;
const isEnabled = true;
const updatedUsage = updateMlJobsUsage({ isElastic, isEnabled }, initialUsage);
expect(updatedUsage).toEqual(
expect.objectContaining({
custom: {
disabled: 0,
enabled: 0,
},
elastic: {
disabled: 0,
enabled: 1,
},
})
);
});
it('Should update ML total for custom rules', async () => {
const initialUsage = initialMlJobsUsage;
const isElastic = false;
const isEnabled = true;
const updatedUsage = updateMlJobsUsage({ isElastic, isEnabled }, initialUsage);
expect(updatedUsage).toEqual(
expect.objectContaining({
custom: {
disabled: 0,
enabled: 1,
},
elastic: {
disabled: 0,
enabled: 0,
},
})
);
});
it('Should update ML total for both elastic and custom rules', async () => {
const initialUsage = initialMlJobsUsage;
let updatedUsage = updateMlJobsUsage({ isElastic: true, isEnabled: true }, initialUsage);
updatedUsage = updateMlJobsUsage({ isElastic: false, isEnabled: true }, updatedUsage);
expect(updatedUsage).toEqual(
expect.objectContaining({
custom: {
disabled: 0,
enabled: 1,
},
elastic: {
disabled: 0,
enabled: 1,
},
})
);
});
});
});

View file

@ -0,0 +1,175 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { KibanaRequest, SavedObjectsClientContract } from '../../../../../../src/core/server';
import { DatafeedStats, Job, MlPluginSetup } from '../../../../ml/server';
import { isJobStarted } from '../../../common/machine_learning/helpers';
import { isSecurityJob } from '../../../common/machine_learning/is_security_job';
import { DetectionsMetric, MlJobMetric, MlJobsUsage, MlJobUsage } from './types';
/**
* Default ml job usage count
*/
export const initialMlJobsUsage: MlJobsUsage = {
custom: {
enabled: 0,
disabled: 0,
},
elastic: {
enabled: 0,
disabled: 0,
},
};
export const updateMlJobsUsage = (jobMetric: DetectionsMetric, usage: MlJobsUsage): MlJobsUsage => {
const { isEnabled, isElastic } = jobMetric;
if (isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
enabled: usage.elastic.enabled + 1,
},
};
} else if (!isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
disabled: usage.elastic.disabled + 1,
},
};
} else if (isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
enabled: usage.custom.enabled + 1,
},
};
} else if (!isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
disabled: usage.custom.disabled + 1,
},
};
} else {
return usage;
}
};
export const getMlJobMetrics = async (
ml: MlPluginSetup | undefined,
savedObjectClient: SavedObjectsClientContract
): Promise<MlJobUsage> => {
let jobsUsage: MlJobsUsage = initialMlJobsUsage;
if (ml) {
try {
const fakeRequest = { headers: {} } as KibanaRequest;
const modules = await ml.modulesProvider(fakeRequest, savedObjectClient).listModules();
const moduleJobs = modules.flatMap((module) => module.jobs);
const jobs = await ml.jobServiceProvider(fakeRequest, savedObjectClient).jobsSummary();
jobsUsage = jobs.filter(isSecurityJob).reduce((usage, job) => {
const isElastic = moduleJobs.some((moduleJob) => moduleJob.id === job.id);
const isEnabled = isJobStarted(job.jobState, job.datafeedState);
return updateMlJobsUsage({ isElastic, isEnabled }, usage);
}, initialMlJobsUsage);
const jobsType = 'security';
const securityJobStats = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.jobStats(jobsType);
const jobDetails = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.jobs(jobsType);
const jobDetailsCache = new Map<string, Job>();
jobDetails.jobs.forEach((detail) => jobDetailsCache.set(detail.job_id, detail));
const datafeedStats = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.datafeedStats();
const datafeedStatsCache = new Map<string, DatafeedStats>();
datafeedStats.datafeeds.forEach((datafeedStat) =>
datafeedStatsCache.set(`${datafeedStat.datafeed_id}`, datafeedStat)
);
const jobMetrics: MlJobMetric[] = securityJobStats.jobs.map((stat) => {
const jobId = stat.job_id;
const jobDetail = jobDetailsCache.get(stat.job_id);
const datafeed = datafeedStatsCache.get(`datafeed-${jobId}`);
return {
job_id: jobId,
open_time: stat.open_time,
create_time: jobDetail?.create_time,
finished_time: jobDetail?.finished_time,
state: stat.state,
data_counts: {
bucket_count: stat.data_counts.bucket_count,
empty_bucket_count: stat.data_counts.empty_bucket_count,
input_bytes: stat.data_counts.input_bytes,
input_record_count: stat.data_counts.input_record_count,
last_data_time: stat.data_counts.last_data_time,
processed_record_count: stat.data_counts.processed_record_count,
},
model_size_stats: {
bucket_allocation_failures_count:
stat.model_size_stats.bucket_allocation_failures_count,
memory_status: stat.model_size_stats.memory_status,
model_bytes: stat.model_size_stats.model_bytes,
model_bytes_exceeded: stat.model_size_stats.model_bytes_exceeded,
model_bytes_memory_limit: stat.model_size_stats.model_bytes_memory_limit,
peak_model_bytes: stat.model_size_stats.peak_model_bytes,
},
timing_stats: {
average_bucket_processing_time_ms: stat.timing_stats.average_bucket_processing_time_ms,
bucket_count: stat.timing_stats.bucket_count,
exponential_average_bucket_processing_time_ms:
stat.timing_stats.exponential_average_bucket_processing_time_ms,
exponential_average_bucket_processing_time_per_hour_ms:
stat.timing_stats.exponential_average_bucket_processing_time_per_hour_ms,
maximum_bucket_processing_time_ms: stat.timing_stats.maximum_bucket_processing_time_ms,
minimum_bucket_processing_time_ms: stat.timing_stats.minimum_bucket_processing_time_ms,
total_bucket_processing_time_ms: stat.timing_stats.total_bucket_processing_time_ms,
},
datafeed: {
datafeed_id: datafeed?.datafeed_id,
state: datafeed?.state,
timing_stats: {
bucket_count: datafeed?.timing_stats.bucket_count,
exponential_average_search_time_per_hour_ms:
datafeed?.timing_stats.exponential_average_search_time_per_hour_ms,
search_count: datafeed?.timing_stats.search_count,
total_search_time_ms: datafeed?.timing_stats.total_search_time_ms,
},
},
} as MlJobMetric;
});
return {
ml_job_usage: jobsUsage,
ml_job_metrics: jobMetrics,
};
} catch (e) {
// ignore failure, usage will be zeroed
}
}
return {
ml_job_usage: initialMlJobsUsage,
ml_job_metrics: [],
};
};

View file

@ -5,8 +5,8 @@
* 2.0.
*/
import { initialDetectionRulesUsage, updateDetectionRuleUsage } from './detections_metrics_helpers';
import { DetectionRuleMetric, DetectionRulesTypeUsage } from './index';
import { initialDetectionRulesUsage, updateDetectionRuleUsage } from './detection_rule_helpers';
import { DetectionRuleMetric, DetectionRulesTypeUsage } from './types';
import { v4 as uuid } from 'uuid';
const createStubRule = (

View file

@ -5,22 +5,18 @@
* 2.0.
*/
import {
ElasticsearchClient,
KibanaRequest,
SavedObjectsClientContract,
} from '../../../../../../src/core/server';
import { ElasticsearchClient, SavedObjectsClientContract } from '../../../../../../src/core/server';
import { SIGNALS_ID } from '../../../common/constants';
import { isElasticRule } from './index';
import {
AlertsAggregationResponse,
CasesSavedObject,
DetectionRulesTypeUsage,
DetectionRuleMetric,
DetectionRuleAdoption,
MlJobMetric,
} from './index';
import { SIGNALS_ID } from '../../../common/constants';
import { DatafeedStats, Job, MlPluginSetup } from '../../../../ml/server';
import { isElasticRule, RuleSearchParams, RuleSearchResult } from './detection_telemetry_helpers';
RuleSearchParams,
RuleSearchResult,
} from './types';
/**
* Default detection rule usage count, split by type + elastic/custom
@ -288,91 +284,3 @@ export const getDetectionRuleMetrics = async (
detection_rule_usage: rulesUsage,
};
};
export const getMlJobMetrics = async (
ml: MlPluginSetup | undefined,
savedObjectClient: SavedObjectsClientContract
): Promise<MlJobMetric[]> => {
if (ml) {
try {
const fakeRequest = { headers: {} } as KibanaRequest;
const jobsType = 'security';
const securityJobStats = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.jobStats(jobsType);
const jobDetails = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.jobs(jobsType);
const jobDetailsCache = new Map<string, Job>();
jobDetails.jobs.forEach((detail) => jobDetailsCache.set(detail.job_id, detail));
const datafeedStats = await ml
.anomalyDetectorsProvider(fakeRequest, savedObjectClient)
.datafeedStats();
const datafeedStatsCache = new Map<string, DatafeedStats>();
datafeedStats.datafeeds.forEach((datafeedStat) =>
datafeedStatsCache.set(`${datafeedStat.datafeed_id}`, datafeedStat)
);
return securityJobStats.jobs.map((stat) => {
const jobId = stat.job_id;
const jobDetail = jobDetailsCache.get(stat.job_id);
const datafeed = datafeedStatsCache.get(`datafeed-${jobId}`);
return {
job_id: jobId,
open_time: stat.open_time,
create_time: jobDetail?.create_time,
finished_time: jobDetail?.finished_time,
state: stat.state,
data_counts: {
bucket_count: stat.data_counts.bucket_count,
empty_bucket_count: stat.data_counts.empty_bucket_count,
input_bytes: stat.data_counts.input_bytes,
input_record_count: stat.data_counts.input_record_count,
last_data_time: stat.data_counts.last_data_time,
processed_record_count: stat.data_counts.processed_record_count,
},
model_size_stats: {
bucket_allocation_failures_count:
stat.model_size_stats.bucket_allocation_failures_count,
memory_status: stat.model_size_stats.memory_status,
model_bytes: stat.model_size_stats.model_bytes,
model_bytes_exceeded: stat.model_size_stats.model_bytes_exceeded,
model_bytes_memory_limit: stat.model_size_stats.model_bytes_memory_limit,
peak_model_bytes: stat.model_size_stats.peak_model_bytes,
},
timing_stats: {
average_bucket_processing_time_ms: stat.timing_stats.average_bucket_processing_time_ms,
bucket_count: stat.timing_stats.bucket_count,
exponential_average_bucket_processing_time_ms:
stat.timing_stats.exponential_average_bucket_processing_time_ms,
exponential_average_bucket_processing_time_per_hour_ms:
stat.timing_stats.exponential_average_bucket_processing_time_per_hour_ms,
maximum_bucket_processing_time_ms: stat.timing_stats.maximum_bucket_processing_time_ms,
minimum_bucket_processing_time_ms: stat.timing_stats.minimum_bucket_processing_time_ms,
total_bucket_processing_time_ms: stat.timing_stats.total_bucket_processing_time_ms,
},
datafeed: {
datafeed_id: datafeed?.datafeed_id,
state: datafeed?.state,
timing_stats: {
bucket_count: datafeed?.timing_stats.bucket_count,
exponential_average_search_time_per_hour_ms:
datafeed?.timing_stats.exponential_average_search_time_per_hour_ms,
search_count: datafeed?.timing_stats.search_count,
total_search_time_ms: datafeed?.timing_stats.total_search_time_ms,
},
},
} as MlJobMetric;
});
} catch (e) {
// ignore failure, usage will be zeroed
}
}
return [];
};

View file

@ -1,46 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import { INTERNAL_IMMUTABLE_KEY } from '../../../common/constants';
export const isElasticRule = (tags: string[] = []) =>
tags.includes(`${INTERNAL_IMMUTABLE_KEY}:true`);
interface RuleSearchBody {
query: {
bool: {
filter: {
term: { [key: string]: string };
};
};
};
}
export interface RuleSearchParams {
body: RuleSearchBody;
filterPath: string[];
ignoreUnavailable: boolean;
index: string;
size: number;
}
export interface RuleSearchResult {
alert: {
name: string;
enabled: boolean;
tags: string[];
createdAt: string;
updatedAt: string;
params: DetectionRuleParms;
};
}
interface DetectionRuleParms {
ruleId: string;
version: string;
type: string;
}

View file

@ -5,8 +5,6 @@
* 2.0.
*/
import { INTERNAL_IMMUTABLE_KEY } from '../../../common/constants';
export const getMockJobSummaryResponse = () => [
{
id: 'linux_anomalous_network_activity_ecs',
@ -162,20 +160,6 @@ export const getMockListModulesResponse = () => [
},
];
export const getMockRulesResponse = () => ({
hits: {
hits: [
{ _source: { alert: { enabled: true, tags: [`${INTERNAL_IMMUTABLE_KEY}:true`] } } },
{ _source: { alert: { enabled: true, tags: [`${INTERNAL_IMMUTABLE_KEY}:false`] } } },
{ _source: { alert: { enabled: false, tags: [`${INTERNAL_IMMUTABLE_KEY}:true`] } } },
{ _source: { alert: { enabled: true, tags: [`${INTERNAL_IMMUTABLE_KEY}:true`] } } },
{ _source: { alert: { enabled: false, tags: [`${INTERNAL_IMMUTABLE_KEY}:false`] } } },
{ _source: { alert: { enabled: false, tags: [`${INTERNAL_IMMUTABLE_KEY}:true`] } } },
{ _source: { alert: { enabled: false, tags: [`${INTERNAL_IMMUTABLE_KEY}:true`] } } },
],
},
});
export const getMockMlJobDetailsResponse = () => ({
count: 20,
jobs: [

View file

@ -11,10 +11,11 @@ import {
savedObjectsClientMock,
} from '../../../../../../src/core/server/mocks';
import { mlServicesMock } from '../../lib/machine_learning/mocks';
import { fetchDetectionsMetrics } from './index';
import { initialMlJobsUsage } from './detection_ml_helpers';
import {
getMockJobSummaryResponse,
getMockListModulesResponse,
getMockRulesResponse,
getMockMlJobDetailsResponse,
getMockMlJobStatsResponse,
getMockMlDatafeedStatsResponse,
@ -22,7 +23,6 @@ import {
getMockRuleAlertsResponse,
getMockAlertCasesResponse,
} from './detections.mocks';
import { fetchDetectionsUsage, fetchDetectionsMetrics } from './index';
const savedObjectsClient = savedObjectsClientMock.create();
@ -30,89 +30,6 @@ describe('Detections Usage and Metrics', () => {
let esClientMock: jest.Mocked<ElasticsearchClient>;
let mlMock: ReturnType<typeof mlServicesMock.createSetupContract>;
describe('fetchDetectionsUsage()', () => {
beforeEach(() => {
esClientMock = elasticsearchServiceMock.createClusterClient().asInternalUser;
mlMock = mlServicesMock.createSetupContract();
});
it('returns zeroed counts if both calls are empty', async () => {
const result = await fetchDetectionsUsage('', esClientMock, mlMock, savedObjectsClient);
expect(result).toEqual({
detection_rules: {
custom: {
enabled: 0,
disabled: 0,
},
elastic: {
enabled: 0,
disabled: 0,
},
},
ml_jobs: {
custom: {
enabled: 0,
disabled: 0,
},
elastic: {
enabled: 0,
disabled: 0,
},
},
});
});
it('tallies rules data given rules results', async () => {
(esClientMock.search as jest.Mock).mockResolvedValue({ body: getMockRulesResponse() });
const result = await fetchDetectionsUsage('', esClientMock, mlMock, savedObjectsClient);
expect(result).toEqual(
expect.objectContaining({
detection_rules: {
custom: {
enabled: 1,
disabled: 1,
},
elastic: {
enabled: 2,
disabled: 3,
},
},
})
);
});
it('tallies jobs data given jobs results', async () => {
const mockJobSummary = jest.fn().mockResolvedValue(getMockJobSummaryResponse());
const mockListModules = jest.fn().mockResolvedValue(getMockListModulesResponse());
mlMock.modulesProvider.mockReturnValue(({
listModules: mockListModules,
} as unknown) as ReturnType<typeof mlMock.modulesProvider>);
mlMock.jobServiceProvider.mockReturnValue({
jobsSummary: mockJobSummary,
});
const result = await fetchDetectionsUsage('', esClientMock, mlMock, savedObjectsClient);
expect(result).toEqual(
expect.objectContaining({
ml_jobs: {
custom: {
enabled: 1,
disabled: 1,
},
elastic: {
enabled: 1,
disabled: 1,
},
},
})
);
});
});
describe('getDetectionRuleMetrics()', () => {
beforeEach(() => {
esClientMock = elasticsearchServiceMock.createClusterClient().asInternalUser;
@ -171,7 +88,7 @@ describe('Detections Usage and Metrics', () => {
},
},
},
ml_jobs: [],
ml_jobs: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
})
);
});
@ -246,7 +163,7 @@ describe('Detections Usage and Metrics', () => {
},
},
},
ml_jobs: [],
ml_jobs: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
})
);
});
@ -308,7 +225,7 @@ describe('Detections Usage and Metrics', () => {
},
},
},
ml_jobs: [],
ml_jobs: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
})
);
});
@ -383,7 +300,7 @@ describe('Detections Usage and Metrics', () => {
},
},
},
ml_jobs: [],
ml_jobs: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
})
);
});
@ -404,12 +321,20 @@ describe('Detections Usage and Metrics', () => {
expect(result).toEqual(
expect.objectContaining({
ml_jobs: [],
ml_jobs: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
})
);
});
it('returns an ml job telemetry object from anomaly detectors provider', async () => {
const mockJobSummary = jest.fn().mockResolvedValue(getMockJobSummaryResponse());
const mockListModules = jest.fn().mockResolvedValue(getMockListModulesResponse());
mlMock.modulesProvider.mockReturnValue(({
listModules: mockListModules,
} as unknown) as ReturnType<typeof mlMock.modulesProvider>);
mlMock.jobServiceProvider.mockReturnValue({
jobsSummary: mockJobSummary,
});
const mockJobsResponse = jest.fn().mockResolvedValue(getMockMlJobDetailsResponse());
const mockJobStatsResponse = jest.fn().mockResolvedValue(getMockMlJobStatsResponse());
const mockDatafeedStatsResponse = jest
@ -426,49 +351,61 @@ describe('Detections Usage and Metrics', () => {
expect(result).toEqual(
expect.objectContaining({
ml_jobs: [
{
job_id: 'high_distinct_count_error_message',
create_time: 1603838214983,
finished_time: 1611739871669,
state: 'closed',
data_counts: {
bucket_count: 8612,
empty_bucket_count: 8590,
input_bytes: 45957,
input_record_count: 162,
last_data_time: 1610470367123,
processed_record_count: 162,
ml_jobs: {
ml_job_usage: {
custom: {
disabled: 1,
enabled: 1,
},
model_size_stats: {
bucket_allocation_failures_count: 0,
memory_status: 'ok',
model_bytes: 72574,
model_bytes_exceeded: 0,
model_bytes_memory_limit: 16777216,
peak_model_bytes: 78682,
},
timing_stats: {
average_bucket_processing_time_ms: 0.4900837644740133,
bucket_count: 16236,
exponential_average_bucket_processing_time_ms: 0.23614068552903306,
exponential_average_bucket_processing_time_per_hour_ms: 1.5551298175461634,
maximum_bucket_processing_time_ms: 392,
minimum_bucket_processing_time_ms: 0,
total_bucket_processing_time_ms: 7957.00000000008,
},
datafeed: {
datafeed_id: 'datafeed-high_distinct_count_error_message',
state: 'stopped',
timing_stats: {
bucket_count: 8612,
exponential_average_search_time_per_hour_ms: 86145.39799630083,
search_count: 7202,
total_search_time_ms: 3107147,
},
elastic: {
disabled: 1,
enabled: 1,
},
},
],
ml_job_metrics: [
{
job_id: 'high_distinct_count_error_message',
create_time: 1603838214983,
finished_time: 1611739871669,
state: 'closed',
data_counts: {
bucket_count: 8612,
empty_bucket_count: 8590,
input_bytes: 45957,
input_record_count: 162,
last_data_time: 1610470367123,
processed_record_count: 162,
},
model_size_stats: {
bucket_allocation_failures_count: 0,
memory_status: 'ok',
model_bytes: 72574,
model_bytes_exceeded: 0,
model_bytes_memory_limit: 16777216,
peak_model_bytes: 78682,
},
timing_stats: {
average_bucket_processing_time_ms: 0.4900837644740133,
bucket_count: 16236,
exponential_average_bucket_processing_time_ms: 0.23614068552903306,
exponential_average_bucket_processing_time_per_hour_ms: 1.5551298175461634,
maximum_bucket_processing_time_ms: 392,
minimum_bucket_processing_time_ms: 0,
total_bucket_processing_time_ms: 7957.00000000008,
},
datafeed: {
datafeed_id: 'datafeed-high_distinct_count_error_message',
state: 'stopped',
timing_stats: {
bucket_count: 8612,
exponential_average_search_time_per_hour_ms: 86145.39799630083,
search_count: 7202,
total_search_time_ms: 3107147,
},
},
},
],
},
})
);
});

View file

@ -1,191 +0,0 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
import {
ElasticsearchClient,
KibanaRequest,
SavedObjectsClientContract,
} from '../../../../../../src/core/server';
import { SIGNALS_ID } from '../../../common/constants';
import { isJobStarted } from '../../../common/machine_learning/helpers';
import { isSecurityJob } from '../../../common/machine_learning/is_security_job';
import { MlPluginSetup } from '../../../../ml/server';
import { DetectionRulesUsage, MlJobsUsage } from './index';
import { isElasticRule, RuleSearchParams, RuleSearchResult } from './detection_telemetry_helpers';
interface DetectionsMetric {
isElastic: boolean;
isEnabled: boolean;
}
/**
* Default detection rule usage count
*/
export const initialRulesUsage: DetectionRulesUsage = {
custom: {
enabled: 0,
disabled: 0,
},
elastic: {
enabled: 0,
disabled: 0,
},
};
/**
* Default ml job usage count
*/
export const initialMlJobsUsage: MlJobsUsage = {
custom: {
enabled: 0,
disabled: 0,
},
elastic: {
enabled: 0,
disabled: 0,
},
};
const updateRulesUsage = (
ruleMetric: DetectionsMetric,
usage: DetectionRulesUsage
): DetectionRulesUsage => {
const { isEnabled, isElastic } = ruleMetric;
if (isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
enabled: usage.elastic.enabled + 1,
},
};
} else if (!isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
disabled: usage.elastic.disabled + 1,
},
};
} else if (isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
enabled: usage.custom.enabled + 1,
},
};
} else if (!isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
disabled: usage.custom.disabled + 1,
},
};
} else {
return usage;
}
};
const updateMlJobsUsage = (jobMetric: DetectionsMetric, usage: MlJobsUsage): MlJobsUsage => {
const { isEnabled, isElastic } = jobMetric;
if (isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
enabled: usage.elastic.enabled + 1,
},
};
} else if (!isEnabled && isElastic) {
return {
...usage,
elastic: {
...usage.elastic,
disabled: usage.elastic.disabled + 1,
},
};
} else if (isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
enabled: usage.custom.enabled + 1,
},
};
} else if (!isEnabled && !isElastic) {
return {
...usage,
custom: {
...usage.custom,
disabled: usage.custom.disabled + 1,
},
};
} else {
return usage;
}
};
export const getRulesUsage = async (
index: string,
esClient: ElasticsearchClient
): Promise<DetectionRulesUsage> => {
let rulesUsage: DetectionRulesUsage = initialRulesUsage;
const ruleSearchOptions: RuleSearchParams = {
body: { query: { bool: { filter: { term: { 'alert.alertTypeId': SIGNALS_ID } } } } },
filterPath: ['hits.hits._source.alert.enabled', 'hits.hits._source.alert.tags'],
ignoreUnavailable: true,
index,
size: 10000, // elasticsearch index.max_result_window default value
};
try {
const { body: ruleResults } = await esClient.search<RuleSearchResult>(ruleSearchOptions);
if (ruleResults.hits?.hits?.length > 0) {
rulesUsage = ruleResults.hits.hits.reduce((usage, hit) => {
const isElastic = isElasticRule(hit._source?.alert.tags);
const isEnabled = Boolean(hit._source?.alert.enabled);
return updateRulesUsage({ isElastic, isEnabled }, usage);
}, initialRulesUsage);
}
} catch (e) {
// ignore failure, usage will be zeroed
}
return rulesUsage;
};
export const getMlJobsUsage = async (
ml: MlPluginSetup | undefined,
savedObjectClient: SavedObjectsClientContract
): Promise<MlJobsUsage> => {
let jobsUsage: MlJobsUsage = initialMlJobsUsage;
if (ml) {
try {
const fakeRequest = { headers: {} } as KibanaRequest;
const modules = await ml.modulesProvider(fakeRequest, savedObjectClient).listModules();
const moduleJobs = modules.flatMap((module) => module.jobs);
const jobs = await ml.jobServiceProvider(fakeRequest, savedObjectClient).jobsSummary();
jobsUsage = jobs.filter(isSecurityJob).reduce((usage, job) => {
const isElastic = moduleJobs.some((moduleJob) => moduleJob.id === job.id);
const isEnabled = isJobStarted(job.jobState, job.datafeedState);
return updateMlJobsUsage({ isElastic, isEnabled }, usage);
}, initialMlJobsUsage);
} catch (e) {
// ignore failure, usage will be zeroed
}
}
return jobsUsage;
};

View file

@ -6,157 +6,15 @@
*/
import { ElasticsearchClient, SavedObjectsClientContract } from '../../../../../../src/core/server';
import {
getMlJobsUsage,
getRulesUsage,
initialRulesUsage,
initialMlJobsUsage,
} from './detections_usage_helpers';
import {
getMlJobMetrics,
getDetectionRuleMetrics,
initialDetectionRulesUsage,
} from './detections_metrics_helpers';
import { MlPluginSetup } from '../../../../ml/server';
import { getDetectionRuleMetrics, initialDetectionRulesUsage } from './detection_rule_helpers';
import { getMlJobMetrics, initialMlJobsUsage } from './detection_ml_helpers';
import { DetectionMetrics } from './types';
interface FeatureUsage {
enabled: number;
disabled: number;
}
import { INTERNAL_IMMUTABLE_KEY } from '../../../common/constants';
interface FeatureTypeUsage {
enabled: number;
disabled: number;
alerts: number;
cases: number;
}
export interface DetectionRulesTypeUsage {
query: FeatureTypeUsage;
threshold: FeatureTypeUsage;
eql: FeatureTypeUsage;
machine_learning: FeatureTypeUsage;
threat_match: FeatureTypeUsage;
elastic_total: FeatureTypeUsage;
custom_total: FeatureTypeUsage;
}
export interface DetectionRulesUsage {
custom: FeatureUsage;
elastic: FeatureUsage;
}
export interface MlJobsUsage {
custom: FeatureUsage;
elastic: FeatureUsage;
}
export interface DetectionsUsage {
detection_rules: DetectionRulesUsage;
ml_jobs: MlJobsUsage;
}
export interface DetectionMetrics {
ml_jobs: MlJobMetric[];
detection_rules: DetectionRuleAdoption;
}
export interface MlJobDataCount {
bucket_count: number;
empty_bucket_count: number;
input_bytes: number;
input_record_count: number;
last_data_time: number;
processed_record_count: number;
}
export interface MlJobModelSize {
bucket_allocation_failures_count: number;
memory_status: string;
model_bytes: number;
model_bytes_exceeded: number;
model_bytes_memory_limit: number;
peak_model_bytes: number;
}
export interface MlTimingStats {
bucket_count: number;
exponential_average_bucket_processing_time_ms: number;
exponential_average_bucket_processing_time_per_hour_ms: number;
maximum_bucket_processing_time_ms: number;
minimum_bucket_processing_time_ms: number;
total_bucket_processing_time_ms: number;
}
export interface MlJobMetric {
job_id: string;
open_time: string;
state: string;
data_counts: MlJobDataCount;
model_size_stats: MlJobModelSize;
timing_stats: MlTimingStats;
}
export interface DetectionRuleMetric {
rule_name: string;
rule_id: string;
rule_type: string;
enabled: boolean;
elastic_rule: boolean;
created_on: string;
updated_on: string;
alert_count_daily: number;
cases_count_total: number;
}
export interface DetectionRuleAdoption {
detection_rule_detail: DetectionRuleMetric[];
detection_rule_usage: DetectionRulesTypeUsage;
}
export interface AlertsAggregationResponse {
hits: {
total: { value: number };
};
aggregations: {
[aggName: string]: {
buckets: Array<{ key: string; doc_count: number }>;
};
};
}
export interface CasesSavedObject {
associationType: string;
type: string;
alertId: string;
index: string;
rule: {
id: string;
name: string;
};
}
export const defaultDetectionsUsage = {
detection_rules: initialRulesUsage,
ml_jobs: initialMlJobsUsage,
};
export const fetchDetectionsUsage = async (
kibanaIndex: string,
esClient: ElasticsearchClient,
ml: MlPluginSetup | undefined,
savedObjectClient: SavedObjectsClientContract
): Promise<DetectionsUsage> => {
const [rulesUsage, mlJobsUsage] = await Promise.allSettled([
getRulesUsage(kibanaIndex, esClient),
getMlJobsUsage(ml, savedObjectClient),
]);
return {
detection_rules: rulesUsage.status === 'fulfilled' ? rulesUsage.value : initialRulesUsage,
ml_jobs: mlJobsUsage.status === 'fulfilled' ? mlJobsUsage.value : initialMlJobsUsage,
};
};
export const isElasticRule = (tags: string[] = []) =>
tags.includes(`${INTERNAL_IMMUTABLE_KEY}:true`);
export const fetchDetectionsMetrics = async (
kibanaIndex: string,
@ -171,7 +29,10 @@ export const fetchDetectionsMetrics = async (
]);
return {
ml_jobs: mlJobMetrics.status === 'fulfilled' ? mlJobMetrics.value : [],
ml_jobs:
mlJobMetrics.status === 'fulfilled'
? mlJobMetrics.value
: { ml_job_metrics: [], ml_job_usage: initialMlJobsUsage },
detection_rules:
detectionRuleMetrics.status === 'fulfilled'
? detectionRuleMetrics.value

View file

@ -0,0 +1,162 @@
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0; you may not use this file except in compliance with the Elastic License
* 2.0.
*/
interface RuleSearchBody {
query: {
bool: {
filter: {
term: { [key: string]: string };
};
};
};
}
export interface RuleSearchParams {
body: RuleSearchBody;
filterPath: string[];
ignoreUnavailable: boolean;
index: string;
size: number;
}
export interface RuleSearchResult {
alert: {
name: string;
enabled: boolean;
tags: string[];
createdAt: string;
updatedAt: string;
params: DetectionRuleParms;
};
}
export interface DetectionsMetric {
isElastic: boolean;
isEnabled: boolean;
}
interface DetectionRuleParms {
ruleId: string;
version: string;
type: string;
}
interface FeatureUsage {
enabled: number;
disabled: number;
}
interface FeatureTypeUsage {
enabled: number;
disabled: number;
alerts: number;
cases: number;
}
export interface DetectionRulesTypeUsage {
query: FeatureTypeUsage;
threshold: FeatureTypeUsage;
eql: FeatureTypeUsage;
machine_learning: FeatureTypeUsage;
threat_match: FeatureTypeUsage;
elastic_total: FeatureTypeUsage;
custom_total: FeatureTypeUsage;
}
export interface MlJobsUsage {
custom: FeatureUsage;
elastic: FeatureUsage;
}
export interface DetectionsUsage {
ml_jobs: MlJobsUsage;
}
export interface DetectionMetrics {
ml_jobs: MlJobUsage;
detection_rules: DetectionRuleAdoption;
}
export interface MlJobDataCount {
bucket_count: number;
empty_bucket_count: number;
input_bytes: number;
input_record_count: number;
last_data_time: number;
processed_record_count: number;
}
export interface MlJobModelSize {
bucket_allocation_failures_count: number;
memory_status: string;
model_bytes: number;
model_bytes_exceeded: number;
model_bytes_memory_limit: number;
peak_model_bytes: number;
}
export interface MlTimingStats {
bucket_count: number;
exponential_average_bucket_processing_time_ms: number;
exponential_average_bucket_processing_time_per_hour_ms: number;
maximum_bucket_processing_time_ms: number;
minimum_bucket_processing_time_ms: number;
total_bucket_processing_time_ms: number;
}
export interface MlJobMetric {
job_id: string;
open_time: string;
state: string;
data_counts: MlJobDataCount;
model_size_stats: MlJobModelSize;
timing_stats: MlTimingStats;
}
export interface DetectionRuleMetric {
rule_name: string;
rule_id: string;
rule_type: string;
enabled: boolean;
elastic_rule: boolean;
created_on: string;
updated_on: string;
alert_count_daily: number;
cases_count_total: number;
}
export interface AlertsAggregationResponse {
hits: {
total: { value: number };
};
aggregations: {
[aggName: string]: {
buckets: Array<{ key: string; doc_count: number }>;
};
};
}
export interface CasesSavedObject {
associationType: string;
type: string;
alertId: string;
index: string;
rule: {
id: string;
name: string;
};
}
export interface MlJobUsage {
ml_job_usage: MlJobsUsage;
ml_job_metrics: MlJobMetric[];
}
export interface DetectionRuleAdoption {
detection_rule_detail: DetectionRuleMetric[];
detection_rule_usage: DetectionRulesTypeUsage;
}

View file

@ -4664,82 +4664,6 @@
},
"security_solution": {
"properties": {
"detections": {
"properties": {
"detection_rules": {
"properties": {
"custom": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of custom detection rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of custom detection rules disabled"
}
}
}
},
"elastic": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of elastic prebuilt detection rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of elastic prebuilt detection rules disabled"
}
}
}
}
}
},
"ml_jobs": {
"properties": {
"custom": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of custom ML jobs rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of custom ML jobs rules disabled"
}
}
}
},
"elastic": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of elastic provided ML jobs rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of elastic provided ML jobs rules disabled"
}
}
}
}
}
}
}
},
"detectionMetrics": {
"properties": {
"detection_rules": {
@ -5014,197 +4938,237 @@
}
},
"ml_jobs": {
"type": "array",
"items": {
"properties": {
"job_id": {
"type": "keyword",
"_meta": {
"description": "Identifier for the anomaly detection job"
}
},
"open_time": {
"type": "keyword",
"_meta": {
"description": "For open jobs only, the elapsed time for which the job has been open"
}
},
"create_time": {
"type": "keyword",
"_meta": {
"description": "The time the job was created"
}
},
"finished_time": {
"type": "keyword",
"_meta": {
"description": "If the job closed or failed, this is the time the job finished"
}
},
"state": {
"type": "keyword",
"_meta": {
"description": "The status of the anomaly detection job"
}
},
"data_counts": {
"properties": {
"bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets processed"
"properties": {
"ml_job_usage": {
"properties": {
"custom": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of custom ML jobs rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of custom ML jobs rules disabled"
}
}
},
"empty_bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets which did not contain any data"
}
},
"input_bytes": {
"type": "long",
"_meta": {
"description": "The number of bytes of input data posted to the anomaly detection job"
}
},
"input_record_count": {
"type": "long",
"_meta": {
"description": "The number of input documents posted to the anomaly detection job"
}
},
"last_data_time": {
"type": "long",
"_meta": {
"description": "The timestamp at which data was last analyzed, according to server time"
}
},
"processed_record_count": {
"type": "long",
"_meta": {
"description": "The number of input documents that have been processed by the anomaly detection job"
}
},
"elastic": {
"properties": {
"enabled": {
"type": "long",
"_meta": {
"description": "The number of elastic provided ML jobs rules enabled"
}
},
"disabled": {
"type": "long",
"_meta": {
"description": "The number of elastic provided ML jobs rules disabled"
}
}
}
}
},
"model_size_stats": {
}
},
"ml_job_metrics": {
"type": "array",
"items": {
"properties": {
"bucket_allocation_failures_count": {
"type": "long",
"_meta": {
"description": "The number of buckets for which new entities in incoming data were not processed due to insufficient model memory"
}
},
"model_bytes": {
"type": "long",
"_meta": {
"description": "The number of bytes of memory used by the models"
}
},
"model_bytes_exceeded": {
"type": "long",
"_meta": {
"description": "The number of bytes over the high limit for memory usage at the last allocation failure"
}
},
"model_bytes_memory_limit": {
"type": "long",
"_meta": {
"description": "The upper limit for model memory usage, checked on increasing values"
}
},
"peak_model_bytes": {
"type": "long",
"_meta": {
"description": "The peak number of bytes of memory ever used by the models"
}
}
}
},
"timing_stats": {
"properties": {
"bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets processed"
}
},
"exponential_average_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Exponential moving average of all bucket processing times, in milliseconds"
}
},
"exponential_average_bucket_processing_time_per_hour_ms": {
"type": "long",
"_meta": {
"description": "Exponentially-weighted moving average of bucket processing times calculated in a 1 hour time window, in milliseconds"
}
},
"maximum_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Maximum among all bucket processing times, in milliseconds"
}
},
"minimum_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Minimum among all bucket processing times, in milliseconds"
}
},
"total_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Sum of all bucket processing times, in milliseconds"
}
}
}
},
"datafeed": {
"properties": {
"datafeed_id": {
"job_id": {
"type": "keyword",
"_meta": {
"description": "A numerical character string that uniquely identifies the datafeed"
"description": "Identifier for the anomaly detection job"
}
},
"open_time": {
"type": "keyword",
"_meta": {
"description": "For open jobs only, the elapsed time for which the job has been open"
}
},
"create_time": {
"type": "keyword",
"_meta": {
"description": "The time the job was created"
}
},
"finished_time": {
"type": "keyword",
"_meta": {
"description": "If the job closed or failed, this is the time the job finished"
}
},
"state": {
"type": "keyword",
"_meta": {
"description": "The status of the datafeed"
"description": "The status of the anomaly detection job"
}
},
"timing_stats": {
"data_counts": {
"properties": {
"average_search_time_per_bucket_ms": {
"type": "long",
"_meta": {
"description": "The average search time per bucket, in milliseconds"
}
},
"bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets processed"
}
},
"exponential_average_search_time_per_hour_ms": {
"empty_bucket_count": {
"type": "long",
"_meta": {
"description": "The exponential average search time per hour, in milliseconds"
"description": "The number of buckets which did not contain any data"
}
},
"search_count": {
"input_bytes": {
"type": "long",
"_meta": {
"description": "The number of searches run by the datafeed"
"description": "The number of bytes of input data posted to the anomaly detection job"
}
},
"total_search_time_ms": {
"input_record_count": {
"type": "long",
"_meta": {
"description": "The total time the datafeed spent searching, in milliseconds"
"description": "The number of input documents posted to the anomaly detection job"
}
},
"last_data_time": {
"type": "long",
"_meta": {
"description": "The timestamp at which data was last analyzed, according to server time"
}
},
"processed_record_count": {
"type": "long",
"_meta": {
"description": "The number of input documents that have been processed by the anomaly detection job"
}
}
}
},
"model_size_stats": {
"properties": {
"bucket_allocation_failures_count": {
"type": "long",
"_meta": {
"description": "The number of buckets for which new entities in incoming data were not processed due to insufficient model memory"
}
},
"model_bytes": {
"type": "long",
"_meta": {
"description": "The number of bytes of memory used by the models"
}
},
"model_bytes_exceeded": {
"type": "long",
"_meta": {
"description": "The number of bytes over the high limit for memory usage at the last allocation failure"
}
},
"model_bytes_memory_limit": {
"type": "long",
"_meta": {
"description": "The upper limit for model memory usage, checked on increasing values"
}
},
"peak_model_bytes": {
"type": "long",
"_meta": {
"description": "The peak number of bytes of memory ever used by the models"
}
}
}
},
"timing_stats": {
"properties": {
"bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets processed"
}
},
"exponential_average_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Exponential moving average of all bucket processing times, in milliseconds"
}
},
"exponential_average_bucket_processing_time_per_hour_ms": {
"type": "long",
"_meta": {
"description": "Exponentially-weighted moving average of bucket processing times calculated in a 1 hour time window, in milliseconds"
}
},
"maximum_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Maximum among all bucket processing times, in milliseconds"
}
},
"minimum_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Minimum among all bucket processing times, in milliseconds"
}
},
"total_bucket_processing_time_ms": {
"type": "long",
"_meta": {
"description": "Sum of all bucket processing times, in milliseconds"
}
}
}
},
"datafeed": {
"properties": {
"datafeed_id": {
"type": "keyword",
"_meta": {
"description": "A numerical character string that uniquely identifies the datafeed"
}
},
"state": {
"type": "keyword",
"_meta": {
"description": "The status of the datafeed"
}
},
"timing_stats": {
"properties": {
"average_search_time_per_bucket_ms": {
"type": "long",
"_meta": {
"description": "The average search time per bucket, in milliseconds"
}
},
"bucket_count": {
"type": "long",
"_meta": {
"description": "The number of buckets processed"
}
},
"exponential_average_search_time_per_hour_ms": {
"type": "long",
"_meta": {
"description": "The exponential average search time per hour, in milliseconds"
}
},
"search_count": {
"type": "long",
"_meta": {
"description": "The number of searches run by the datafeed"
}
},
"total_search_time_ms": {
"type": "long",
"_meta": {
"description": "The total time the datafeed spent searching, in milliseconds"
}
}
}
}
}