Telemetry plugins esclient migration (#87356)

This commit is contained in:
Christiane (Tina) Heiligers 2021-01-06 12:57:18 -07:00 committed by GitHub
parent f09f19ac28
commit 70c70875c6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 34 additions and 74 deletions

View file

@ -38,7 +38,6 @@ import { IUiSettingsClient } from 'src/core/server';
import { IUiSettingsClient as IUiSettingsClient_3 } from 'kibana/server';
import { KibanaRequest } from 'kibana/server';
import { KibanaRequest as KibanaRequest_2 } from 'src/core/server';
import { LegacyAPICaller } from 'src/core/server';
import { Logger } from 'src/core/server';
import { Logger as Logger_2 } from 'kibana/server';
import { LoggerFactory } from '@kbn/logging';

View file

@ -31,7 +31,7 @@ import {
SavedObjectsClientContract,
SavedObjectsClient,
CoreStart,
ILegacyCustomClusterClient,
ICustomClusterClient,
} from '../../../core/server';
import {
getTelemetryOptIn,
@ -65,7 +65,7 @@ export class FetcherTask {
private isSending = false;
private internalRepository?: SavedObjectsClientContract;
private telemetryCollectionManager?: TelemetryCollectionManagerPluginStart;
private elasticsearchClient?: ILegacyCustomClusterClient;
private elasticsearchClient?: ICustomClusterClient;
constructor(initializerContext: PluginInitializerContext<TelemetryConfigType>) {
this.config$ = initializerContext.config.create();
@ -79,7 +79,7 @@ export class FetcherTask {
) {
this.internalRepository = new SavedObjectsClient(savedObjects.createInternalRepository());
this.telemetryCollectionManager = telemetryCollectionManager;
this.elasticsearchClient = elasticsearch.legacy.createClient('telemetry-fetcher');
this.elasticsearchClient = elasticsearch.createClient('telemetry-fetcher');
this.intervalId = timer(this.initialCheckDelayMs, this.checkIntervalMs).subscribe(() =>
this.sendIfDue()

View file

@ -99,7 +99,7 @@ export class TelemetryPlugin implements Plugin<TelemetryPluginSetup, TelemetryPl
}
public setup(
{ elasticsearch, http, savedObjects }: CoreSetup,
{ http, savedObjects }: CoreSetup,
{ usageCollection, telemetryCollectionManager }: TelemetryPluginsDepsSetup
): TelemetryPluginSetup {
const currentKibanaVersion = this.currentKibanaVersion;

View file

@ -19,12 +19,7 @@
import { omit } from 'lodash';
import { UsageCollectionSetup } from 'src/plugins/usage_collection/server';
import {
ISavedObjectsRepository,
KibanaRequest,
LegacyAPICaller,
SavedObjectsClientContract,
} from 'kibana/server';
import { ISavedObjectsRepository, KibanaRequest, SavedObjectsClientContract } from 'kibana/server';
import { StatsCollectionContext } from 'src/plugins/telemetry_collection_manager/server';
import { ElasticsearchClient } from 'src/core/server';
@ -88,16 +83,10 @@ export function handleKibanaStats(
export async function getKibana(
usageCollection: UsageCollectionSetup,
callWithInternalUser: LegacyAPICaller,
asInternalUser: ElasticsearchClient,
soClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined // intentionally `| undefined` to enforce providing the parameter
): Promise<KibanaUsageStats> {
const usage = await usageCollection.bulkFetch(
callWithInternalUser,
asInternalUser,
soClient,
kibanaRequest
);
const usage = await usageCollection.bulkFetch(asInternalUser, soClient, kibanaRequest);
return usageCollection.toObject(usage);
}

View file

@ -71,7 +71,7 @@ export const getLocalStats: StatsGetter<TelemetryLocalStats> = async (
config,
context
) => {
const { callCluster, usageCollection, esClient, soClient, kibanaRequest } = config;
const { usageCollection, esClient, soClient, kibanaRequest } = config;
return await Promise.all(
clustersDetails.map(async (clustersDetail) => {
@ -79,7 +79,7 @@ export const getLocalStats: StatsGetter<TelemetryLocalStats> = async (
getClusterInfo(esClient), // cluster info
getClusterStats(esClient), // cluster stats (not to be confused with cluster _state_)
getNodesUsage(esClient), // nodes_usage info
getKibana(usageCollection, callCluster, esClient, soClient, kibanaRequest),
getKibana(usageCollection, esClient, soClient, kibanaRequest),
getDataTelemetry(esClient),
]);
return handleLocalStats(

View file

@ -26,7 +26,6 @@ import {
Logger,
IClusterClient,
SavedObjectsServiceStart,
ILegacyClusterClient,
} from 'src/core/server';
import {
@ -53,7 +52,6 @@ export class TelemetryCollectionManagerPlugin
private collectionStrategy: CollectionStrategy<any> | undefined;
private usageGetterMethodPriority = -1;
private usageCollection?: UsageCollectionSetup;
private legacyElasticsearchClient?: ILegacyClusterClient;
private elasticsearchClient?: IClusterClient;
private savedObjectsService?: SavedObjectsServiceStart;
private readonly isDistributable: boolean;
@ -77,7 +75,6 @@ export class TelemetryCollectionManagerPlugin
}
public start(core: CoreStart) {
this.legacyElasticsearchClient = core.elasticsearch.legacy.client; // TODO: Remove when all the collectors have migrated
this.elasticsearchClient = core.elasticsearch.client;
this.savedObjectsService = core.savedObjects;
@ -129,9 +126,6 @@ export class TelemetryCollectionManagerPlugin
config: StatsGetterConfig,
usageCollection: UsageCollectionSetup
): StatsCollectionConfig | undefined {
const callCluster = config.unencrypted
? this.legacyElasticsearchClient?.asScoped(config.request).callAsCurrentUser
: this.legacyElasticsearchClient?.callAsInternalUser;
// Scope the new elasticsearch Client appropriately and pass to the stats collection config
const esClient = config.unencrypted
? this.elasticsearchClient?.asScoped(config.request).asCurrentUser
@ -143,8 +137,8 @@ export class TelemetryCollectionManagerPlugin
// Provide the kibanaRequest so opted-in plugins can scope their custom clients only if the request is not encrypted
const kibanaRequest = config.unencrypted ? config.request : void 0;
if (callCluster && esClient && soClient) {
return { callCluster, usageCollection, esClient, soClient, kibanaRequest };
if (esClient && soClient) {
return { usageCollection, esClient, soClient, kibanaRequest };
}
}

View file

@ -18,7 +18,6 @@
*/
import {
LegacyAPICaller,
ElasticsearchClient,
Logger,
KibanaRequest,
@ -68,7 +67,6 @@ export interface ClusterDetails {
export interface StatsCollectionConfig {
usageCollection: UsageCollectionSetup;
callCluster: LegacyAPICaller;
esClient: ElasticsearchClient;
soClient: SavedObjectsClientContract | ISavedObjectsRepository;
kibanaRequest: KibanaRequest | undefined; // intentionally `| undefined` to enforce providing the parameter

View file

@ -95,10 +95,10 @@ Some background:
- `isReady` (added in v7.2.0 and v6.8.4) is a way for a usage collector to announce that some async process must finish first before it can return data in the `fetch` method (e.g. a client needs to ne initialized, or the task manager needs to run a task first). If any collector reports that it is not ready when we call its `fetch` method, we reset a flag to try again and, after a set amount of time, collect data from those collectors that are ready and skip any that are not. This means that if a collector returns `true` for `isReady` and it actually isn't ready to return data, there won't be telemetry data from that collector in that telemetry report (usually once per day). You should consider what it means if your collector doesn't return data in the first few documents when Kibana starts or, if we should wait for any other reason (e.g. the task manager needs to run your task first). If you need to tell telemetry collection to wait, you should implement this function with custom logic. If your `fetch` method can run without the need of any previous dependencies, then you can return true for `isReady` as shown in the example below.
- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `callCluster`, `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user.
- The `fetch` method needs to support multiple contexts in which it is called. For example, when a user requests the example of what we collect in the **Kibana>Advanced Settings>Usage data** section, the clients provided in the context of the function (`CollectorFetchContext`) are scoped to that user's privileges. The reason is to avoid exposing via telemetry any data that user should not have access to (i.e.: if the user does not have access to certain indices, they shouldn't be allowed to see the number of documents that exists in it). In this case, the `fetch` method receives the clients `esClient` and `soClient` scoped to the user who performed the HTTP API request. Alternatively, when requesting the usage data to be reported to the Remote Telemetry Service, the clients are scoped to the internal Kibana user (`kibana_system`). Please, mind it might have lower-level access than the default super-admin `elastic` test user.
In some scenarios, your collector might need to maintain its own client. An example of that is the `monitoring` plugin, that maintains a connection to the Remote Monitoring Cluster to push its monitoring data. If that's the case, your plugin can opt-in to receive the additional `kibanaRequest` parameter by adding `extendFetchContext.kibanaRequest: true` to the collector's config: it will be appended to the context of the `fetch` method only if the request needs to be scoped to a user other than Kibana Internal, so beware that your collector will need to work for both scenarios (especially for the scenario when `kibanaRequest` is missing).
Note: there will be many cases where you won't need to use the `callCluster`, `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS.
Note: there will be many cases where you won't need to use the `esClient` or `soClient` function that gets passed in to your `fetch` method at all. Your feature might have an accumulating value in server memory, or read something from the OS.
In the case of using a custom SavedObjects client, it is up to the plugin to initialize the client to save the data and it is strongly recommended to scope that client to the `kibana_system` user.
@ -302,7 +302,7 @@ New fields added to the telemetry payload currently mean that telemetry cluster
There are a few ways you can test that your usage collector is working properly.
1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where `callCluster` wraps `callWithRequest`.
1. The `/api/stats?extended=true&legacy=true` HTTP API in Kibana (added in 6.4.0) will call the fetch methods of all the registered collectors, and add them to a stats object you can see in a browser or in curl. To test that your usage collector has been registered correctly and that it has the model of data you expected it to have, call that HTTP API manually and you should see a key in the `usage` object of the response named after your usage collector's `type` field. This method tests the Metricbeat scenario described above where the elasticsearch client wraps the call with the request.
2. There is a dev script in x-pack that will give a sample of a payload of data that gets sent up to the telemetry cluster for the sending phase of telemetry. Collected data comes from:
- The `.monitoring-*` indices, when Monitoring is enabled. Monitoring enhances the sent payload of telemetry by producing usage data potentially of multiple clusters that exist in the monitoring data. Monitoring data is time-based, and the time frame of collection is the last 15 minutes.
- Live-pulled from ES API endpoints. This will get just real-time stats without context of historical data.

View file

@ -19,7 +19,6 @@
import {
Logger,
LegacyAPICaller,
ElasticsearchClient,
ISavedObjectsRepository,
SavedObjectsClientContract,
@ -54,10 +53,6 @@ export type MakeSchemaFrom<Base> = {
* @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster.
*/
export type CollectorFetchContext<WithKibanaRequest extends boolean | undefined = false> = {
/**
* @deprecated Scoped Legacy Elasticsearch client: use esClient instead
*/
callCluster: LegacyAPICaller;
/**
* Request-scoped Elasticsearch client
* @remark Bear in mind when testing your collector that your user has the same privileges as the Kibana Internal user to ensure the expected data is sent to the remote cluster (more info: {@link CollectorFetchContext})

View file

@ -44,7 +44,6 @@ describe('CollectorSet', () => {
loggerSpies.debug.mockRestore();
loggerSpies.warn.mockRestore();
});
const mockCallCluster = jest.fn().mockResolvedValue({ passTest: 1000 });
const mockEsClient = elasticsearchServiceMock.createClusterClient().asInternalUser;
const mockSoClient = savedObjectsRepositoryMock.create();
const req = void 0; // No need to instantiate any KibanaRequest in these tests
@ -83,18 +82,19 @@ describe('CollectorSet', () => {
});
it('should log debug status of fetching from the collector', async () => {
mockEsClient.get.mockResolvedValue({ passTest: 1000 } as any);
const collectors = new CollectorSet({ logger });
collectors.registerCollector(
new Collector(logger, {
type: 'MY_TEST_COLLECTOR',
fetch: (collectorFetchContext: any) => {
return collectorFetchContext.callCluster();
return collectorFetchContext.esClient.get();
},
isReady: () => true,
})
);
const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(loggerSpies.debug).toHaveBeenCalledTimes(1);
expect(loggerSpies.debug).toHaveBeenCalledWith(
'Fetching data from MY_TEST_COLLECTOR collector'
@ -119,7 +119,7 @@ describe('CollectorSet', () => {
let result;
try {
result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
} catch (err) {
// Do nothing
}
@ -137,7 +137,7 @@ describe('CollectorSet', () => {
})
);
const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(result).toStrictEqual([
{
type: 'MY_TEST_COLLECTOR',
@ -155,7 +155,7 @@ describe('CollectorSet', () => {
} as any)
);
const result = await collectors.bulkFetch(mockCallCluster, mockEsClient, mockSoClient, req);
const result = await collectors.bulkFetch(mockEsClient, mockSoClient, req);
expect(result).toStrictEqual([
{
type: 'MY_TEST_COLLECTOR',

View file

@ -20,7 +20,6 @@
import { snakeCase } from 'lodash';
import {
Logger,
LegacyAPICaller,
ElasticsearchClient,
ISavedObjectsRepository,
SavedObjectsClientContract,
@ -171,7 +170,6 @@ export class CollectorSet {
};
public bulkFetch = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
soClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined, // intentionally `| undefined` to enforce providing the parameter
@ -182,7 +180,6 @@ export class CollectorSet {
this.logger.debug(`Fetching data from ${collector.type} collector`);
try {
const context = {
callCluster,
esClient,
soClient,
...(collector.extendFetchContext.kibanaRequest && { kibanaRequest }),
@ -212,14 +209,12 @@ export class CollectorSet {
};
public bulkFetchUsage = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest | undefined // intentionally `| undefined` to enforce providing the parameter
) => {
const usageCollectors = this.getFilteredCollectorSet((c) => c instanceof UsageCollector);
return await this.bulkFetch(
callCluster,
esClient,
savedObjectsClient,
kibanaRequest,

View file

@ -28,7 +28,6 @@ import {
IRouter,
ISavedObjectsRepository,
KibanaRequest,
LegacyAPICaller,
MetricsServiceSetup,
SavedObjectsClientContract,
ServiceStatus,
@ -66,17 +65,11 @@ export function registerStatsRoute({
overallStatus$: Observable<ServiceStatus>;
}) {
const getUsage = async (
callCluster: LegacyAPICaller,
esClient: ElasticsearchClient,
savedObjectsClient: SavedObjectsClientContract | ISavedObjectsRepository,
kibanaRequest: KibanaRequest
): Promise<any> => {
const usage = await collectorSet.bulkFetchUsage(
callCluster,
esClient,
savedObjectsClient,
kibanaRequest
);
const usage = await collectorSet.bulkFetchUsage(esClient, savedObjectsClient, kibanaRequest);
return collectorSet.toObject(usage);
};
@ -110,7 +103,6 @@ export function registerStatsRoute({
let extended;
if (isExtended) {
const callCluster = context.core.elasticsearch.legacy.client.callAsCurrentUser;
const { asCurrentUser } = context.core.elasticsearch.client;
const savedObjectsClient = context.core.savedObjects.client;
@ -122,7 +114,7 @@ export function registerStatsRoute({
}
const usagePromise = shouldGetUsage
? getUsage(callCluster, asCurrentUser, savedObjectsClient, req)
? getUsage(asCurrentUser, savedObjectsClient, req)
: Promise.resolve({});
const [usage, clusterUuid] = await Promise.all([
usagePromise,

View file

@ -50,7 +50,6 @@ export const createUsageCollectionSetupMock = () => {
export function createCollectorFetchContextMock(): jest.Mocked<CollectorFetchContext<false>> {
const collectorFetchClientsMock: jest.Mocked<CollectorFetchContext<false>> = {
callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser,
esClient: elasticsearchServiceMock.createClusterClient().asInternalUser,
soClient: savedObjectsRepositoryMock.create(),
};
@ -61,7 +60,6 @@ export function createCollectorFetchContextWithKibanaMock(): jest.Mocked<
CollectorFetchContext<true>
> {
const collectorFetchClientsMock: jest.Mocked<CollectorFetchContext<true>> = {
callCluster: elasticsearchServiceMock.createLegacyClusterClient().callAsInternalUser,
esClient: elasticsearchServiceMock.createClusterClient().asInternalUser,
soClient: savedObjectsRepositoryMock.create(),
kibanaRequest: httpServerMock.createKibanaRequest(),

View file

@ -6,7 +6,7 @@
import { get } from 'lodash';
import { SearchResponse } from 'elasticsearch';
import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server';
import { LegacyAPICaller } from 'kibana/server';
import { createQuery } from './create_query';
import { INDEX_PATTERN_BEATS } from '../../common/constants';
@ -318,7 +318,7 @@ export function processResults(
* @return {Promise}
*/
async function fetchBeatsByType(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,
@ -382,7 +382,7 @@ async function fetchBeatsByType(
}
export async function fetchBeatsStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,
@ -392,7 +392,7 @@ export async function fetchBeatsStats(
}
export async function fetchBeatsStates(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,
@ -410,7 +410,7 @@ export interface BeatsStatsByClusterUuid {
* @return {Object} - Beats stats in an object keyed by the cluster UUIDs
*/
export async function getBeatsStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string

View file

@ -5,7 +5,7 @@
*/
import { SearchResponse } from 'elasticsearch';
import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server';
import { LegacyAPICaller } from 'kibana/server';
import { INDEX_PATTERN_ELASTICSEARCH } from '../../common/constants';
/**
@ -16,7 +16,7 @@ import { INDEX_PATTERN_ELASTICSEARCH } from '../../common/constants';
* @param {Array} clusterUuids The string Cluster UUIDs to fetch details for
*/
export async function getElasticsearchStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
maxBucketSize: number
) {
@ -34,7 +34,7 @@ export async function getElasticsearchStats(
* Returns the response for the aggregations to fetch details for the product.
*/
export function fetchElasticsearchStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
maxBucketSize: number
) {

View file

@ -6,7 +6,7 @@
import { get } from 'lodash';
import { SearchResponse } from 'elasticsearch';
import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server';
import { LegacyAPICaller } from 'kibana/server';
import { createQuery } from './create_query';
import {
INDEX_PATTERN_KIBANA,
@ -247,7 +247,7 @@ function getIndexPatternForStackProduct(product: string) {
* Returns an object keyed by the cluster UUIDs to make grouping easier.
*/
export async function getHighLevelStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,
@ -268,7 +268,7 @@ export async function getHighLevelStats(
export async function fetchHighLevelStats<
T extends { cluster_uuid?: string } = { cluster_uuid?: string }
>(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,

View file

@ -7,7 +7,7 @@
import moment from 'moment';
import { isEmpty } from 'lodash';
import { SearchResponse } from 'elasticsearch';
import { StatsCollectionConfig } from 'src/plugins/telemetry_collection_manager/server';
import { LegacyAPICaller } from 'kibana/server';
import { KIBANA_SYSTEM_ID, TELEMETRY_COLLECTION_INTERVAL } from '../../common/constants';
import {
fetchHighLevelStats,
@ -182,7 +182,7 @@ export function ensureTimeSpan(
* specialized usage data that comes with kibana stats (kibana_stats.usage).
*/
export async function getKibanaStats(
callCluster: StatsCollectionConfig['callCluster'],
callCluster: LegacyAPICaller,
clusterUuids: string[],
start: string,
end: string,