Compare commits
105 commits
issue-1040
...
main
Author | SHA1 | Date | |
---|---|---|---|
b87852071b | |||
f555c737d0 | |||
e774ece489 | |||
ff86a51a01 | |||
f4b61d01be | |||
a03e12bec0 | |||
40939b2c67 | |||
264bb7acdd | |||
6d951fee69 | |||
9f02de6ed2 | |||
f815eaa1d2 | |||
fe3272f835 | |||
dc13f4ee9b | |||
f3a6e6bf52 | |||
2cb46839aa | |||
baa658f8a7 | |||
3d7209ce7d | |||
145ce01ea7 | |||
bde802fed6 | |||
6f5faf93f0 | |||
05246c67a0 | |||
75bafde760 | |||
b389cfe0ab | |||
252b949b61 | |||
42168954b4 | |||
6b81454795 | |||
8ba9ebf592 | |||
68f46a5c8e | |||
5cccf0cdd6 | |||
60a9221527 | |||
30de97bc2d | |||
9888b7fec3 | |||
d0e30f5475 | |||
0b5a434d19 | |||
7b6ac0e658 | |||
36bab568f4 | |||
49fe04196a | |||
94333460b5 | |||
cecfc358ab | |||
f7163878c0 | |||
7ab3593fb7 | |||
b7974a331e | |||
9a8499cd4a | |||
932d797b9c | |||
4ef23ae668 | |||
89f9c8f9fb | |||
a861170c47 | |||
24477e9af9 | |||
92ffb9e09a | |||
ad1e2ad00b | |||
90ea8f8e4c | |||
9e36356f9d | |||
eb9c8692aa | |||
ed19a9c1a9 | |||
5ffec0bd2c | |||
21ba17d801 | |||
14f6d23922 | |||
7d27afeabd | |||
d6de4b570b | |||
994a4e44f1 | |||
139a3c9866 | |||
195e5bebcb | |||
d1d5e79398 | |||
b122a23d04 | |||
4e443cca4f | |||
29148d3ed7 | |||
92e462c5e1 | |||
27f5ff326d | |||
13d651bf62 | |||
54fe3c580e | |||
2f171fbd00 | |||
3789b4ba54 | |||
d1ebdf17de | |||
687b97b7e4 | |||
f343870495 | |||
c44dd6e701 | |||
56bfd0e53d | |||
32f75a47ab | |||
69c0e6c601 | |||
90395c5589 | |||
c32191007d | |||
09523079c1 | |||
4bedc1cd93 | |||
903df6d80e | |||
55b4944109 | |||
c7c759f2f8 | |||
df71a4e872 | |||
502db21a3d | |||
df0d9a6959 | |||
a815bd271a | |||
c48ef8a0ab | |||
8819bd8fae | |||
7f50f34358 | |||
b7173dd1a3 | |||
06ab7848ff | |||
1837e4a854 | |||
e6dc051b86 | |||
433ffa5876 | |||
a43fd6507c | |||
8978fd27f6 | |||
90df011d9b | |||
f9c982ddc2 | |||
a34d3d10dd | |||
c3d81c6427 | |||
a14782a15e |
|
@ -14,9 +14,18 @@ query --experimental_guard_against_concurrent_changes
|
|||
|
||||
## Cache action outputs on disk so they persist across output_base and bazel shutdown (eg. changing branches)
|
||||
build --disk_cache=~/.bazel-cache/disk-cache
|
||||
fetch --disk_cache=~/.bazel-cache/disk-cache
|
||||
query --disk_cache=~/.bazel-cache/disk-cache
|
||||
sync --disk_cache=~/.bazel-cache/disk-cache
|
||||
test --disk_cache=~/.bazel-cache/disk-cache
|
||||
|
||||
## Bazel repo cache settings
|
||||
build --repository_cache=~/.bazel-cache/repository-cache
|
||||
fetch --repository_cache=~/.bazel-cache/repository-cache
|
||||
query --repository_cache=~/.bazel-cache/repository-cache
|
||||
run --repository_cache=~/.bazel-cache/repository-cache
|
||||
sync --repository_cache=~/.bazel-cache/repository-cache
|
||||
test --repository_cache=~/.bazel-cache/repository-cache
|
||||
|
||||
# Bazel will create symlinks from the workspace directory to output artifacts.
|
||||
# Build results will be placed in a directory called "bazel-bin"
|
||||
|
|
|
@ -27,9 +27,9 @@ steps:
|
|||
|
||||
- command: .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Default CI Group'
|
||||
parallelism: 13
|
||||
parallelism: 27
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 150
|
||||
key: default-cigroup
|
||||
|
@ -41,7 +41,7 @@ steps:
|
|||
- command: CI_GROUP=Docker .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Docker CI Group'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
key: default-cigroup-docker
|
||||
|
@ -77,7 +77,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/test/api_integration.sh
|
||||
label: 'API Integration Tests'
|
||||
agents:
|
||||
queue: jest
|
||||
queue: n2-2
|
||||
timeout_in_minutes: 120
|
||||
key: api-integration
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ const stepInput = (key, nameOfSuite) => {
|
|||
};
|
||||
|
||||
const OSS_CI_GROUPS = 12;
|
||||
const XPACK_CI_GROUPS = 13;
|
||||
const XPACK_CI_GROUPS = 27;
|
||||
|
||||
const inputs = [
|
||||
{
|
||||
|
@ -23,11 +23,16 @@ for (let i = 1; i <= OSS_CI_GROUPS; i++) {
|
|||
inputs.push(stepInput(`oss/cigroup/${i}`, `OSS CI Group ${i}`));
|
||||
}
|
||||
|
||||
inputs.push(stepInput(`oss/firefox`, 'OSS Firefox'));
|
||||
inputs.push(stepInput(`oss/accessibility`, 'OSS Accessibility'));
|
||||
|
||||
for (let i = 1; i <= XPACK_CI_GROUPS; i++) {
|
||||
inputs.push(stepInput(`xpack/cigroup/${i}`, `Default CI Group ${i}`));
|
||||
}
|
||||
|
||||
inputs.push(stepInput(`xpack/cigroup/Docker`, 'Default CI Group Docker'));
|
||||
inputs.push(stepInput(`xpack/firefox`, 'Default Firefox'));
|
||||
inputs.push(stepInput(`xpack/accessibility`, 'Default Accessibility'));
|
||||
|
||||
const pipeline = {
|
||||
steps: [
|
||||
|
|
|
@ -65,34 +65,67 @@ for (const testSuite of testSuites) {
|
|||
|
||||
const JOB_PARTS = TEST_SUITE.split('/');
|
||||
const IS_XPACK = JOB_PARTS[0] === 'xpack';
|
||||
const TASK = JOB_PARTS[1];
|
||||
const CI_GROUP = JOB_PARTS.length > 2 ? JOB_PARTS[2] : '';
|
||||
|
||||
if (RUN_COUNT < 1) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (IS_XPACK) {
|
||||
steps.push({
|
||||
command: `CI_GROUP=${CI_GROUP} .buildkite/scripts/steps/functional/xpack_cigroup.sh`,
|
||||
label: `Default CI Group ${CI_GROUP}`,
|
||||
agents: { queue: 'ci-group-6' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
} else {
|
||||
steps.push({
|
||||
command: `CI_GROUP=${CI_GROUP} .buildkite/scripts/steps/functional/oss_cigroup.sh`,
|
||||
label: `OSS CI Group ${CI_GROUP}`,
|
||||
agents: { queue: 'ci-group-4d' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
switch (TASK) {
|
||||
case 'cigroup':
|
||||
if (IS_XPACK) {
|
||||
steps.push({
|
||||
command: `CI_GROUP=${CI_GROUP} .buildkite/scripts/steps/functional/xpack_cigroup.sh`,
|
||||
label: `Default CI Group ${CI_GROUP}`,
|
||||
agents: { queue: 'n2-4' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
} else {
|
||||
steps.push({
|
||||
command: `CI_GROUP=${CI_GROUP} .buildkite/scripts/steps/functional/oss_cigroup.sh`,
|
||||
label: `OSS CI Group ${CI_GROUP}`,
|
||||
agents: { queue: 'ci-group-4d' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
||||
case 'firefox':
|
||||
steps.push({
|
||||
command: `.buildkite/scripts/steps/functional/${IS_XPACK ? 'xpack' : 'oss'}_firefox.sh`,
|
||||
label: `${IS_XPACK ? 'Default' : 'OSS'} Firefox`,
|
||||
agents: { queue: IS_XPACK ? 'n2-4' : 'ci-group-4d' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
break;
|
||||
|
||||
case 'accessibility':
|
||||
steps.push({
|
||||
command: `.buildkite/scripts/steps/functional/${
|
||||
IS_XPACK ? 'xpack' : 'oss'
|
||||
}_accessibility.sh`,
|
||||
label: `${IS_XPACK ? 'Default' : 'OSS'} Accessibility`,
|
||||
agents: { queue: IS_XPACK ? 'n2-4' : 'ci-group-4d' },
|
||||
depends_on: 'build',
|
||||
parallelism: RUN_COUNT,
|
||||
concurrency: concurrency,
|
||||
concurrency_group: UUID,
|
||||
concurrency_method: 'eager',
|
||||
});
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -17,9 +17,9 @@ steps:
|
|||
|
||||
- command: .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Default CI Group'
|
||||
parallelism: 13
|
||||
parallelism: 27
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 250
|
||||
key: default-cigroup
|
||||
|
@ -31,7 +31,7 @@ steps:
|
|||
- command: CI_GROUP=Docker .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Docker CI Group'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
key: default-cigroup-docker
|
||||
|
@ -67,7 +67,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_accessibility.sh
|
||||
label: 'Default Accessibility Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -89,7 +89,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_firefox.sh
|
||||
label: 'Default Firefox Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -100,7 +100,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/oss_misc.sh
|
||||
label: 'OSS Misc Functional Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -111,7 +111,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_saved_object_field_metrics.sh
|
||||
label: 'Saved Object Field Metrics'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -119,6 +119,14 @@ steps:
|
|||
- exit_status: '*'
|
||||
limit: 1
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest.sh
|
||||
label: 'Jest Tests'
|
||||
parallelism: 8
|
||||
agents:
|
||||
queue: n2-4
|
||||
timeout_in_minutes: 90
|
||||
key: jest
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest_integration.sh
|
||||
label: 'Jest Integration Tests'
|
||||
agents:
|
||||
|
@ -133,13 +141,6 @@ steps:
|
|||
timeout_in_minutes: 120
|
||||
key: api-integration
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest.sh
|
||||
label: 'Jest Tests'
|
||||
agents:
|
||||
queue: c2-16
|
||||
timeout_in_minutes: 120
|
||||
key: jest
|
||||
|
||||
- command: .buildkite/scripts/steps/lint.sh
|
||||
label: 'Linting'
|
||||
agents:
|
||||
|
|
|
@ -15,9 +15,9 @@ steps:
|
|||
|
||||
- command: .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Default CI Group'
|
||||
parallelism: 13
|
||||
parallelism: 27
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 150
|
||||
key: default-cigroup
|
||||
|
@ -29,7 +29,7 @@ steps:
|
|||
- command: CI_GROUP=Docker .buildkite/scripts/steps/functional/xpack_cigroup.sh
|
||||
label: 'Docker CI Group'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
key: default-cigroup-docker
|
||||
|
@ -65,7 +65,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_accessibility.sh
|
||||
label: 'Default Accessibility Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -87,7 +87,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_firefox.sh
|
||||
label: 'Default Firefox Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -98,7 +98,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/oss_misc.sh
|
||||
label: 'OSS Misc Functional Tests'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -109,7 +109,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/functional/xpack_saved_object_field_metrics.sh
|
||||
label: 'Saved Object Field Metrics'
|
||||
agents:
|
||||
queue: ci-group-6
|
||||
queue: n2-4
|
||||
depends_on: build
|
||||
timeout_in_minutes: 120
|
||||
retry:
|
||||
|
@ -117,6 +117,14 @@ steps:
|
|||
- exit_status: '*'
|
||||
limit: 1
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest.sh
|
||||
label: 'Jest Tests'
|
||||
parallelism: 8
|
||||
agents:
|
||||
queue: n2-4
|
||||
timeout_in_minutes: 90
|
||||
key: jest
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest_integration.sh
|
||||
label: 'Jest Integration Tests'
|
||||
agents:
|
||||
|
@ -131,13 +139,6 @@ steps:
|
|||
timeout_in_minutes: 120
|
||||
key: api-integration
|
||||
|
||||
- command: .buildkite/scripts/steps/test/jest.sh
|
||||
label: 'Jest Tests'
|
||||
agents:
|
||||
queue: c2-16
|
||||
timeout_in_minutes: 120
|
||||
key: jest
|
||||
|
||||
- command: .buildkite/scripts/steps/lint.sh
|
||||
label: 'Linting'
|
||||
agents:
|
||||
|
@ -155,7 +156,7 @@ steps:
|
|||
- command: .buildkite/scripts/steps/checks.sh
|
||||
label: 'Checks'
|
||||
agents:
|
||||
queue: c2-4
|
||||
queue: c2-8
|
||||
key: checks
|
||||
timeout_in_minutes: 120
|
||||
|
||||
|
|
|
@ -11,6 +11,19 @@ else
|
|||
node scripts/build
|
||||
fi
|
||||
|
||||
if [[ "${GITHUB_PR_LABELS:-}" == *"ci:deploy-cloud"* ]]; then
|
||||
echo "--- Build Kibana Cloud Distribution"
|
||||
node scripts/build \
|
||||
--skip-initialize \
|
||||
--skip-generic-folders \
|
||||
--skip-platform-folders \
|
||||
--skip-archives \
|
||||
--docker-images \
|
||||
--skip-docker-ubi \
|
||||
--skip-docker-centos \
|
||||
--skip-docker-contexts
|
||||
fi
|
||||
|
||||
echo "--- Archive Kibana Distribution"
|
||||
linuxBuild="$(find "$KIBANA_DIR/target" -name 'kibana-*-linux-x86_64.tar.gz')"
|
||||
installDir="$KIBANA_DIR/install/kibana"
|
||||
|
|
|
@ -11,9 +11,27 @@ checks-reporter-with-killswitch "Build TS Refs" \
|
|||
--no-cache \
|
||||
--force
|
||||
|
||||
echo --- Check Types
|
||||
checks-reporter-with-killswitch "Check Types" \
|
||||
node scripts/type_check
|
||||
node scripts/type_check &> target/check_types.log &
|
||||
check_types_pid=$!
|
||||
|
||||
node --max-old-space-size=12000 scripts/build_api_docs &> target/build_api_docs.log &
|
||||
api_docs_pid=$!
|
||||
|
||||
wait $check_types_pid
|
||||
check_types_exit=$?
|
||||
|
||||
wait $api_docs_pid
|
||||
api_docs_exit=$?
|
||||
|
||||
echo --- Check Types
|
||||
cat target/check_types.log
|
||||
if [[ "$check_types_exit" != "0" ]]; then echo "^^^ +++"; fi
|
||||
|
||||
echo --- Building api docs
|
||||
node --max-old-space-size=12000 scripts/build_api_docs
|
||||
cat target/build_api_docs.log
|
||||
if [[ "$api_docs_exit" != "0" ]]; then echo "^^^ +++"; fi
|
||||
|
||||
if [[ "${api_docs_exit}${check_types_exit}" != "00" ]]; then
|
||||
exit 1
|
||||
fi
|
||||
|
|
|
@ -9,5 +9,5 @@ is_test_execution_step
|
|||
.buildkite/scripts/bootstrap.sh
|
||||
|
||||
echo '--- Jest'
|
||||
checks-reporter-with-killswitch "Jest Unit Tests" \
|
||||
node scripts/jest --ci --verbose --maxWorkers=10
|
||||
checks-reporter-with-killswitch "Jest Unit Tests $((BUILDKITE_PARALLEL_JOB+1))" \
|
||||
.buildkite/scripts/steps/test/jest_parallel.sh
|
||||
|
|
30
.buildkite/scripts/steps/test/jest_parallel.sh
Executable file
30
.buildkite/scripts/steps/test/jest_parallel.sh
Executable file
|
@ -0,0 +1,30 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -uo pipefail
|
||||
|
||||
JOB=$BUILDKITE_PARALLEL_JOB
|
||||
JOB_COUNT=$BUILDKITE_PARALLEL_JOB_COUNT
|
||||
|
||||
# a jest failure will result in the script returning an exit code of 10
|
||||
|
||||
i=0
|
||||
exitCode=0
|
||||
|
||||
while read -r config; do
|
||||
if [ "$((i % JOB_COUNT))" -eq "$JOB" ]; then
|
||||
echo "--- $ node scripts/jest --config $config"
|
||||
node --max-old-space-size=14336 ./node_modules/.bin/jest --config="$config" --runInBand --coverage=false
|
||||
lastCode=$?
|
||||
|
||||
if [ $lastCode -ne 0 ]; then
|
||||
exitCode=10
|
||||
echo "Jest exited with code $lastCode"
|
||||
echo "^^^ +++"
|
||||
fi
|
||||
fi
|
||||
|
||||
((i=i+1))
|
||||
# uses heredoc to avoid the while loop being in a sub-shell thus unable to overwrite exitCode
|
||||
done <<< "$(find src x-pack packages -name jest.config.js -not -path "*/__fixtures__/*" | sort)"
|
||||
|
||||
exit $exitCode
|
|
@ -25,4 +25,18 @@ xpack:
|
|||
- ciGroup11
|
||||
- ciGroup12
|
||||
- ciGroup13
|
||||
- ciGroup14
|
||||
- ciGroup15
|
||||
- ciGroup16
|
||||
- ciGroup17
|
||||
- ciGroup18
|
||||
- ciGroup19
|
||||
- ciGroup20
|
||||
- ciGroup21
|
||||
- ciGroup22
|
||||
- ciGroup23
|
||||
- ciGroup24
|
||||
- ciGroup25
|
||||
- ciGroup26
|
||||
- ciGroup27
|
||||
- ciGroupDocker
|
||||
|
|
|
@ -226,6 +226,10 @@ const RESTRICTED_IMPORTS = [
|
|||
name: 'react-use',
|
||||
message: 'Please use react-use/lib/{method} instead.',
|
||||
},
|
||||
{
|
||||
name: '@kbn/io-ts-utils',
|
||||
message: `Import directly from @kbn/io-ts-utils/{method} submodules`,
|
||||
},
|
||||
];
|
||||
|
||||
module.exports = {
|
||||
|
@ -700,6 +704,7 @@ module.exports = {
|
|||
'packages/kbn-eslint-plugin-eslint/**/*',
|
||||
'x-pack/gulpfile.js',
|
||||
'x-pack/scripts/*.js',
|
||||
'**/jest.config.js',
|
||||
],
|
||||
excludedFiles: ['**/integration_tests/**/*'],
|
||||
rules: {
|
||||
|
|
6
.github/CODEOWNERS
vendored
6
.github/CODEOWNERS
vendored
|
@ -405,6 +405,12 @@
|
|||
/x-pack/plugins/security_solution/scripts/endpoint/trusted_apps/ @elastic/security-onboarding-and-lifecycle-mgt
|
||||
/x-pack/test/security_solution_endpoint/apps/endpoint/ @elastic/security-onboarding-and-lifecycle-mgt
|
||||
|
||||
## Security Solution sub teams - security-engineering-productivity
|
||||
x-pack/plugins/security_solution/cypress/ccs_integration
|
||||
x-pack/plugins/security_solution/cypress/upgrade_integration
|
||||
x-pack/plugins/security_solution/cypress/README.md
|
||||
x-pack/test/security_solution_cypress
|
||||
|
||||
# Security Intelligence And Analytics
|
||||
/x-pack/plugins/security_solution/server/lib/detection_engine/rules/prepackaged_rules @elastic/security-intelligence-analytics
|
||||
|
||||
|
|
45
dev_docs/getting_started/development_windows.mdx
Normal file
45
dev_docs/getting_started/development_windows.mdx
Normal file
|
@ -0,0 +1,45 @@
|
|||
---
|
||||
id: kibDevTutorialSetupDevWindows
|
||||
slug: /kibana-dev-docs/tutorial/setup-dev-windows
|
||||
title: Development on Windows
|
||||
summary: Learn how to setup a development environment on Windows
|
||||
date: 2021-08-11
|
||||
tags: ['kibana', 'onboarding', 'dev', 'windows', 'setup']
|
||||
---
|
||||
|
||||
|
||||
# Overview
|
||||
|
||||
Development on Windows is recommended through WSL2. WSL lets users run a Linux environment on Windows, providing a supported development environment for Kibana.
|
||||
|
||||
## Install WSL
|
||||
|
||||
The latest setup instructions can be found at https://docs.microsoft.com/en-us/windows/wsl/install-win10
|
||||
|
||||
1) Open Powershell as an administrator
|
||||
1) Enable WSL
|
||||
```
|
||||
dism.exe /online /enable-feature /featurename:Microsoft-Windows-Subsystem-Linux /all /norestart
|
||||
```
|
||||
1) Enable Virtual Machine Platform
|
||||
```
|
||||
dism.exe /online /enable-feature /featurename:VirtualMachinePlatform /all /norestart
|
||||
```
|
||||
1) Download and install the [Linux kernel update package](https://wslstorestorage.blob.core.windows.net/wslblob/wsl_update_x64.msi)
|
||||
1) Set WSL 2 as the default version
|
||||
```
|
||||
wsl --set-default-version 2
|
||||
```
|
||||
1) Open the Micrsoft Store application and install a Linux distribution
|
||||
|
||||
## Setup Kibana
|
||||
|
||||
1. <DocLink id="kibDevTutorialSetupDevEnv" text="Set up your development environment" />
|
||||
|
||||
## Install VS Code
|
||||
|
||||
Remote development is supported with an extension. [Reference](https://code.visualstudio.com/docs/remote/wsl).
|
||||
|
||||
1) Install VS Code on Windows
|
||||
1) Check the "Add to PATH" option during setup
|
||||
1) Install the [Remote Development](https://aka.ms/vscode-remote/download/extension) package
|
|
@ -18,8 +18,6 @@ Review important information about the {kib} 8.0.0 releases.
|
|||
[[release-notes-8.0.0-beta1]]
|
||||
== {kib} 8.0.0-beta1
|
||||
|
||||
coming::[8.0.0-beta1]
|
||||
|
||||
Review the {kib} 8.0.0-beta1 changes, then use the <<upgrade-assistant,Upgrade Assistant>> to complete the upgrade.
|
||||
|
||||
[float]
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
experimental["The underlying Upgrade Assistant concepts are stable, but the APIs for managing Upgrade Assistant are experimental."]
|
||||
|
||||
Start or resume multiple reindexing tasks in one request. Additionally, reindexing tasks started or resumed
|
||||
Start or resume multiple <<start-resume-reindex, reindexing>> tasks in one request. Additionally, reindexing tasks started or resumed
|
||||
via the batch endpoint will be placed on a queue and executed one-by-one, which ensures that minimal cluster resources
|
||||
are consumed over time.
|
||||
|
||||
|
@ -76,7 +76,7 @@ Similar to the <<start-resume-reindex, start or resume endpoint>>, the API retur
|
|||
}
|
||||
--------------------------------------------------
|
||||
|
||||
<1> A list of reindex operations created, the order in the array indicates the order in which tasks will be executed.
|
||||
<1> A list of reindex tasks created, the order in the array indicates the order in which tasks will be executed.
|
||||
<2> Presence of this key indicates that the reindex job will occur in the batch.
|
||||
<3> A Unix timestamp of when the reindex task was placed in the queue.
|
||||
<4> A list of errors that may have occurred preventing the reindex task from being created.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<titleabbrev>Cancel reindex</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[] Cancel reindexes that are waiting for the {es} reindex task to complete. For example, `lastCompletedStep` set to `40`.
|
||||
experimental["The underlying Upgrade Assistant concepts are stable, but the APIs for managing Upgrade Assistant are experimental."]
|
||||
|
||||
Cancel reindexes that are waiting for the Elasticsearch reindex task to complete. For example, `lastCompletedStep` set to `40`.
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@
|
|||
<titleabbrev>Check reindex status</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[] Check the status of the reindex operation.
|
||||
experimental["The underlying Upgrade Assistant concepts are stable, but the APIs for managing Upgrade Assistant are experimental."]
|
||||
|
||||
Check the status of the reindex task.
|
||||
|
||||
[[check-reindex-status-request]]
|
||||
==== Request
|
||||
|
@ -43,7 +45,7 @@ The API returns the following:
|
|||
<2> Current status of the reindex. For details, see <<status-code,Status codes>>.
|
||||
<3> Last successfully completed step of the reindex. For details, see <<step-code,Step codes>> table.
|
||||
<4> Task ID of the reindex task in Elasticsearch. Only present if reindexing has started.
|
||||
<5> Percentage of how far the reindexing task in Elasticsearch has progressed, in decimal from from 0 to 1.
|
||||
<5> Percentage of how far the reindexing task in Elasticsearch has progressed, in decimal form from 0 to 1.
|
||||
<6> Error that caused the reindex to fail, if it failed.
|
||||
<7> An array of any warning codes explaining what changes are required for this reindex. For details, see <<warning-code,Warning codes>>.
|
||||
<8> Specifies if the user has sufficient privileges to reindex this index. When security is unavailable or disables, returns `true`.
|
||||
|
@ -73,7 +75,7 @@ To resume the reindex, you must submit a new POST request to the `/api/upgrade_a
|
|||
==== Step codes
|
||||
|
||||
`0`::
|
||||
The reindex operation has been created in Kibana.
|
||||
The reindex task has been created in Kibana.
|
||||
|
||||
`10`::
|
||||
The index group services stopped. Only applies to some system indices.
|
||||
|
|
|
@ -4,9 +4,18 @@
|
|||
<titleabbrev>Start or resume reindex</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[] Start a new reindex or resume a paused reindex.
|
||||
experimental["The underlying Upgrade Assistant concepts are stable, but the APIs for managing Upgrade Assistant are experimental."]
|
||||
|
||||
Start a new reindex or resume a paused reindex. Following steps are performed during
|
||||
a reindex task:
|
||||
|
||||
. Setting the index to read-only
|
||||
. Creating a new index
|
||||
. {ref}/docs-reindex.html[Reindexing] documents into the new index
|
||||
. Creating an index alias for the new index
|
||||
. Deleting the old index
|
||||
|
||||
|
||||
Start a new reindex or resume a paused reindex.
|
||||
|
||||
[[start-resume-reindex-request]]
|
||||
==== Request
|
||||
|
@ -40,6 +49,6 @@ The API returns the following:
|
|||
<1> The name of the new index.
|
||||
<2> The reindex status. For more information, refer to <<status-code,Status codes>>.
|
||||
<3> The last successfully completed step of the reindex. For more information, refer to <<step-code,Step codes>>.
|
||||
<4> The task ID of the reindex task in {es}. Appears when the reindexing starts.
|
||||
<5> The progress of the reindexing task in {es}. Appears in decimal form, from 0 to 1.
|
||||
<4> The task ID of the {ref}/docs-reindex.html[reindex] task in {es}. Appears when the reindexing starts.
|
||||
<5> The progress of the {ref}/docs-reindex.html[reindexing] task in {es}. Appears in decimal form, from 0 to 1.
|
||||
<6> The error that caused the reindex to fail, if it failed.
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
<titleabbrev>Upgrade readiness status</titleabbrev>
|
||||
++++
|
||||
|
||||
experimental[] Check the status of your cluster.
|
||||
experimental["The underlying Upgrade Assistant concepts are stable, but the APIs for managing Upgrade Assistant are experimental."]
|
||||
|
||||
Check the status of your cluster.
|
||||
|
||||
|
|
|
@ -490,7 +490,7 @@ From the command line run:
|
|||
|
||||
["source","shell"]
|
||||
-----------
|
||||
node --debug-brk --inspect scripts/functional_test_runner
|
||||
node --inspect-brk scripts/functional_test_runner
|
||||
-----------
|
||||
|
||||
This prints out a URL that you can visit in Chrome and debug your functional tests in the browser.
|
||||
|
|
|
@ -75,7 +75,7 @@ In order to ease the pain specialized tasks provide alternate methods
|
|||
for running the tests.
|
||||
|
||||
You could also add the `--debug` option so that `node` is run using
|
||||
the `--debug-brk` flag. You’ll need to connect a remote debugger such
|
||||
the `--inspect-brk` flag. You’ll need to connect a remote debugger such
|
||||
as https://github.com/node-inspector/node-inspector[`node-inspector`]
|
||||
to proceed in this mode.
|
||||
|
||||
|
|
|
@ -600,8 +600,7 @@ As a developer you can reuse and extend built-in alerts and actions UI functiona
|
|||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/upgrade_assistant/README.md[upgradeAssistant]
|
||||
|Upgrade Assistant helps users prepare their Stack for being upgraded to the next major. Its primary
|
||||
purposes are to:
|
||||
|Upgrade Assistant helps users prepare their Stack for being upgraded to the next major. It will only be enabled on the last minor before the next major release. This is controlled via the config: xpack.upgrade_assistant.readonly (#101296).
|
||||
|
||||
|
||||
|{kib-repo}blob/{branch}/x-pack/plugins/uptime/README.md[uptime]
|
||||
|
|
|
@ -10,6 +10,9 @@
|
|||
readonly links: {
|
||||
readonly settings: string;
|
||||
readonly elasticStackGetStarted: string;
|
||||
readonly upgrade: {
|
||||
readonly upgradingElasticStack: string;
|
||||
};
|
||||
readonly apm: {
|
||||
readonly kibanaSettings: string;
|
||||
readonly supportedServiceMaps: string;
|
||||
|
@ -133,7 +136,11 @@ readonly links: {
|
|||
};
|
||||
readonly addData: string;
|
||||
readonly kibana: string;
|
||||
readonly upgradeAssistant: string;
|
||||
readonly upgradeAssistant: {
|
||||
readonly overview: string;
|
||||
readonly batchReindex: string;
|
||||
readonly remoteReindex: string;
|
||||
};
|
||||
readonly rollupJobs: string;
|
||||
readonly elasticsearch: Record<string, string>;
|
||||
readonly siem: {
|
||||
|
@ -236,6 +243,7 @@ readonly links: {
|
|||
fleetServerAddFleetServer: string;
|
||||
settings: string;
|
||||
settingsFleetServerHostSettings: string;
|
||||
settingsFleetServerProxySettings: string;
|
||||
troubleshooting: string;
|
||||
elasticAgent: string;
|
||||
datastreams: string;
|
||||
|
@ -245,6 +253,7 @@ readonly links: {
|
|||
upgradeElasticAgent712lower: string;
|
||||
learnMoreBlog: string;
|
||||
apiKeysLearnMore: string;
|
||||
onPremRegistry: string;
|
||||
}>;
|
||||
readonly ecs: {
|
||||
readonly guide: string;
|
||||
|
|
|
@ -64,7 +64,7 @@
|
|||
</li>
|
||||
<li class="listitem">
|
||||
<a href="https://www.elastic.co/guide/en/kibana/current/index-patterns.html"
|
||||
>Create an index pattern</a
|
||||
>Create a data view</a
|
||||
>
|
||||
</li>
|
||||
<li class="listitem">
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
== Advanced Settings
|
||||
|
||||
*Advanced Settings* control the behavior of {kib}. For example, you can change the format used to display dates,
|
||||
specify the default index pattern, and set the precision for displayed decimal values.
|
||||
specify the default data view, and set the precision for displayed decimal values.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Advanced Settings*.
|
||||
. Scroll or search for the setting.
|
||||
|
@ -134,10 +134,6 @@ value by the maximum number of aggregations in each visualization.
|
|||
[[history-limit]]`history:limit`::
|
||||
In fields that have history, such as query inputs, show this many recent values.
|
||||
|
||||
[[indexpattern-placeholder]]`indexPattern:placeholder`::
|
||||
The default placeholder value to use in
|
||||
*Management > Index Patterns > Create Index Pattern*.
|
||||
|
||||
[[metafields]]`metaFields`::
|
||||
Fields that exist outside of `_source`. Kibana merges these fields into the
|
||||
document when displaying it.
|
||||
|
@ -283,7 +279,7 @@ value is 5.
|
|||
[[context-tiebreakerfields]]`context:tieBreakerFields`::
|
||||
A comma-separated list of fields to use for breaking a tie between documents
|
||||
that have the same timestamp value. The first field that is present and sortable
|
||||
in the current index pattern is used.
|
||||
in the current data view is used.
|
||||
|
||||
[[defaultcolumns]]`defaultColumns`::
|
||||
The columns that appear by default on the *Discover* page. The default is
|
||||
|
@ -296,7 +292,7 @@ The number of rows to show in the *Discover* table.
|
|||
Specifies the maximum number of fields to show in the document column of the *Discover* table.
|
||||
|
||||
[[discover-modify-columns-on-switch]]`discover:modifyColumnsOnSwitch`::
|
||||
When enabled, removes the columns that are not in the new index pattern.
|
||||
When enabled, removes the columns that are not in the new data view.
|
||||
|
||||
[[discover-sample-size]]`discover:sampleSize`::
|
||||
Specifies the number of rows to display in the *Discover* table.
|
||||
|
@ -314,7 +310,7 @@ does not have an effect when loading a saved search.
|
|||
When enabled, displays multi-fields in the expanded document view.
|
||||
|
||||
[[discover-sort-defaultorder]]`discover:sort:defaultOrder`::
|
||||
The default sort direction for time-based index patterns.
|
||||
The default sort direction for time-based data views.
|
||||
|
||||
[[doctable-hidetimecolumn]]`doc_table:hideTimeColumn`::
|
||||
Hides the "Time" column in *Discover* and in all saved searches on dashboards.
|
||||
|
@ -391,8 +387,8 @@ A custom image to use in the footer of the PDF.
|
|||
==== Rollup
|
||||
|
||||
[horizontal]
|
||||
[[rollups-enableindexpatterns]]`rollups:enableIndexPatterns`::
|
||||
Enables the creation of index patterns that capture rollup indices, which in
|
||||
[[rollups-enabledataviews]]`rollups:enableDataViews`::
|
||||
Enables the creation of data views that capture rollup indices, which in
|
||||
turn enables visualizations based on rollup data. Refresh the page to apply the
|
||||
changes.
|
||||
|
||||
|
@ -408,7 +404,7 @@ to use when `courier:setRequestPreference` is set to "custom".
|
|||
[[courier-ignorefilteriffieldnotinindex]]`courier:ignoreFilterIfFieldNotInIndex`::
|
||||
Skips filters that apply to fields that don't exist in the index for a
|
||||
visualization. Useful when dashboards consist of visualizations from multiple
|
||||
index patterns.
|
||||
data views.
|
||||
|
||||
[[courier-maxconcurrentshardrequests]]`courier:maxConcurrentShardRequests`::
|
||||
Controls the {ref}/search-multi-search.html[max_concurrent_shard_requests]
|
||||
|
|
Binary file not shown.
Before Width: | Height: | Size: 58 KiB |
|
@ -1,26 +1,29 @@
|
|||
[[managing-index-patterns]]
|
||||
== Manage index pattern data fields
|
||||
[[managing-data-views]]
|
||||
== Manage data views
|
||||
|
||||
To customize the data fields in your index pattern, you can add runtime fields to the existing documents, add scrited fields to compute data on the fly, and change how {kib} displays the data fields.
|
||||
To customize the data fields in your data view,
|
||||
you can add runtime fields to the existing documents,
|
||||
add scripted fields to compute data on the fly, and change how {kib} displays the data fields.
|
||||
|
||||
[float]
|
||||
[[runtime-fields]]
|
||||
=== Explore your data with runtime fields
|
||||
=== Explore your data with runtime fields
|
||||
|
||||
Runtime fields are fields that you add to documents after you've ingested your data, and are evaluated at query time. With runtime fields, you allow for a smaller index and faster ingest time so that you can use less resources and reduce your operating costs. You can use runtime fields anywhere index patterns are used, for example, you can explore runtime fields in *Discover* and create visualizations with runtime fields for your dashboard.
|
||||
Runtime fields are fields that you add to documents after you've ingested your data, and are evaluated at query time. With runtime fields, you allow for a smaller index and faster ingest time so that you can use less resources and reduce your operating costs.
|
||||
You can use runtime fields anywhere data views are used, for example, you can explore runtime fields in *Discover* and create visualizations with runtime fields for your dashboard.
|
||||
|
||||
With runtime fields, you can:
|
||||
|
||||
* Define fields for a specific use case without modifying the underlying schema.
|
||||
* Define fields for a specific use case without modifying the underlying schema.
|
||||
|
||||
* Override the returned values from index fields.
|
||||
|
||||
* Start working on your data without understanding the structure.
|
||||
* Start working on your data without understanding the structure.
|
||||
|
||||
* Add fields to existing documents without reindexing your data.
|
||||
* Add fields to existing documents without reindexing your data.
|
||||
|
||||
WARNING: Runtime fields can impact {kib} performance. When you run a query, {es} uses the fields you index first to shorten the response time.
|
||||
Index the fields that you commonly search for and filter on, such as `timestamp`, then use runtime fields to limit the number of fields {es} uses to calculate values.
|
||||
WARNING: Runtime fields can impact {kib} performance. When you run a query, {es} uses the fields you index first to shorten the response time.
|
||||
Index the fields that you commonly search for and filter on, such as `timestamp`, then use runtime fields to limit the number of fields {es} uses to calculate values.
|
||||
|
||||
For detailed information on how to use runtime fields with {es}, refer to {ref}/runtime.html[Runtime fields].
|
||||
|
||||
|
@ -28,17 +31,21 @@ For detailed information on how to use runtime fields with {es}, refer to {ref}/
|
|||
[[create-runtime-fields]]
|
||||
==== Add runtime fields
|
||||
|
||||
To add runtime fields to your index patterns, open the index pattern you want to change, then define the field values by emitting a single value using the {ref}/modules-scripting-painless.html[Painless scripting language]. You can also add runtime fields in <<add-field-in-discover,*Discover*>> and <<change-the-fields,*Lens*>>.
|
||||
To add runtime fields to your data views, open the data view you want to change,
|
||||
then define the field values by emitting a single value using
|
||||
the {ref}/modules-scripting-painless.html[Painless scripting language].
|
||||
You can also add runtime fields in <<add-field-in-discover,*Discover*>> and <<change-the-fields,*Lens*>>.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Select the index pattern you want to add the runtime field to, then click *Add field*.
|
||||
. Select the data view that you want to add the runtime field to, then click *Add field*.
|
||||
|
||||
. Enter the field *Name*, then select the *Type*.
|
||||
|
||||
. Select *Set custom label*, then enter the label you want to display where the index pattern is used, such as *Discover*.
|
||||
. Select *Set custom label*, then enter the label you want to display where the data view is used,
|
||||
such as *Discover*.
|
||||
|
||||
. Select *Set value*, then define the script. The script must match the *Type*, or the index pattern fails anywhere it is used.
|
||||
. Select *Set value*, then define the script. The script must match the *Type*, or the data view fails anywhere it is used.
|
||||
|
||||
. To help you define the script, use the *Preview*:
|
||||
|
||||
|
@ -46,7 +53,8 @@ To add runtime fields to your index patterns, open the index pattern you want to
|
|||
|
||||
* To filter the fields list, enter the keyword in *Filter fields*.
|
||||
|
||||
* To pin frequently used fields to the top of the list, hover over the field, then click image:images/stackManagement-indexPatterns-pinRuntimeField-7.15.png[Icon to pin field to the top of the list].
|
||||
* To pin frequently used fields to the top of the list, hover over the field,
|
||||
then click image:images/stackManagement-indexPatterns-pinRuntimeField-7.15.png[Icon to pin field to the top of the list].
|
||||
|
||||
. Click *Create field*.
|
||||
|
||||
|
@ -54,7 +62,7 @@ To add runtime fields to your index patterns, open the index pattern you want to
|
|||
[[runtime-field-examples]]
|
||||
==== Runtime field examples
|
||||
|
||||
Try the runtime field examples on your own using the <<gs-get-data-into-kibana,*Sample web logs*>> data index pattern.
|
||||
Try the runtime field examples on your own using the <<gs-get-data-into-kibana,*Sample web logs*>> data.
|
||||
|
||||
[float]
|
||||
[[simple-hello-world-example]]
|
||||
|
@ -110,7 +118,7 @@ if (source != null) {
|
|||
emit(source);
|
||||
return;
|
||||
}
|
||||
else {
|
||||
else {
|
||||
emit("None");
|
||||
}
|
||||
----
|
||||
|
@ -123,7 +131,7 @@ def source = doc['machine.os.keyword'].value;
|
|||
if (source != "") {
|
||||
emit(source);
|
||||
}
|
||||
else {
|
||||
else {
|
||||
emit("None");
|
||||
}
|
||||
----
|
||||
|
@ -132,15 +140,15 @@ else {
|
|||
[[manage-runtime-fields]]
|
||||
==== Manage runtime fields
|
||||
|
||||
Edit the settings for runtime fields, or remove runtime fields from index patterns.
|
||||
Edit the settings for runtime fields, or remove runtime fields from data views.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Select the index pattern that contains the runtime field you want to manage, then open the runtime field edit options or delete the runtime field.
|
||||
. Select the data view that contains the runtime field you want to manage, then open the runtime field edit options or delete the runtime field.
|
||||
|
||||
[float]
|
||||
[[scripted-fields]]
|
||||
=== Add scripted fields to index patterns
|
||||
=== Add scripted fields to data views
|
||||
|
||||
deprecated::[7.13,Use {ref}/runtime.html[runtime fields] instead of scripted fields. Runtime fields support Painless scripts and provide greater flexibility.]
|
||||
|
||||
|
@ -168,11 +176,11 @@ https://www.elastic.co/blog/using-painless-kibana-scripted-fields[Using Painless
|
|||
[[create-scripted-field]]
|
||||
==== Create scripted fields
|
||||
|
||||
Create and add scripted fields to your index patterns.
|
||||
Create and add scripted fields to your data views.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Select the index pattern you want to add a scripted field to.
|
||||
. Select the data view you want to add a scripted field to.
|
||||
|
||||
. Select the *Scripted fields* tab, then click *Add scripted field*.
|
||||
|
||||
|
@ -186,9 +194,9 @@ For more information about scripted fields in {es}, refer to {ref}/modules-scrip
|
|||
[[update-scripted-field]]
|
||||
==== Manage scripted fields
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Select the index pattern that contains the scripted field you want to manage.
|
||||
. Select the data view that contains the scripted field you want to manage.
|
||||
|
||||
. Select the *Scripted fields* tab, then open the scripted field edit options or delete the scripted field.
|
||||
|
||||
|
@ -202,9 +210,9 @@ exceptions when you view the dynamically generated data.
|
|||
{kib} uses the same field types as {es}, however, some {es} field types are unsupported in {kib}.
|
||||
To customize how {kib} displays data fields, use the formatting options.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Click the index pattern that contains the field you want to change.
|
||||
. Click the data view that contains the field you want to change.
|
||||
|
||||
. Find the field, then open the edit options (image:management/index-patterns/images/edit_icon.png[Data field edit icon]).
|
||||
|
||||
|
@ -261,4 +269,4 @@ include::field-formatters/string-formatter.asciidoc[]
|
|||
|
||||
include::field-formatters/duration-formatter.asciidoc[]
|
||||
|
||||
include::field-formatters/color-formatter.asciidoc[]
|
||||
include::field-formatters/color-formatter.asciidoc[]
|
|
@ -2,10 +2,10 @@
|
|||
== Saved Objects
|
||||
|
||||
The *Saved Objects* UI helps you keep track of and manage your saved objects. These objects
|
||||
store data for later use, including dashboards, visualizations, maps, index patterns,
|
||||
store data for later use, including dashboards, visualizations, maps, data views,
|
||||
Canvas workpads, and more.
|
||||
|
||||
To get started, open the main menu, then click *Stack Management > Saved Objects*.
|
||||
To get started, open the main menu, then click *Stack Management > Saved Objects*.
|
||||
|
||||
[role="screenshot"]
|
||||
image::images/management-saved-objects.png[Saved Objects]
|
||||
|
@ -85,7 +85,7 @@ You have two options for exporting saved objects.
|
|||
* Click *Export x objects*, and export objects by type.
|
||||
|
||||
This action creates an NDJSON with all your saved objects. By default, the NDJSON includes child objects that are related to the saved
|
||||
objects. Exported dashboards include their associated index patterns.
|
||||
objects. Exported dashboards include their associated data views.
|
||||
|
||||
NOTE: The <<savedObjects-maxImportExportSize, `savedObjects.maxImportExportSize`>> configuration setting
|
||||
limits the number of saved objects which may be exported.
|
||||
|
@ -120,7 +120,7 @@ If you access an object whose index has been deleted, you can:
|
|||
* Recreate the index so you can continue using the object.
|
||||
* Delete the object and recreate it using a different index.
|
||||
* Change the index name in the object's `reference` array to point to an existing
|
||||
index pattern. This is useful if the index you were working with has been renamed.
|
||||
data view. This is useful if the index you were working with has been renamed.
|
||||
|
||||
WARNING: Validation is not performed for object properties. Submitting an invalid
|
||||
change will render the object unusable. A more failsafe approach is to use
|
||||
|
|
|
@ -9,7 +9,7 @@ they are now maintained by {kib}.
|
|||
Numeral formatting patterns are used in multiple places in {kib}, including:
|
||||
|
||||
* <<advanced-options, Advanced settings>>
|
||||
* <<field-formatters-numeric, Index pattern formatters>>
|
||||
* <<field-formatters-numeric, Data view formatters>>
|
||||
* <<tsvb, *TSVB*>>
|
||||
* <<canvas, *Canvas*>>
|
||||
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
experimental::[]
|
||||
|
||||
A rollup job is a periodic task that aggregates data from indices specified
|
||||
by an index pattern, and then rolls it into a new index. Rollup indices are a good way to
|
||||
by a data view, and then rolls it into a new index. Rollup indices are a good way to
|
||||
compactly store months or years of historical
|
||||
data for use in visualizations and reports.
|
||||
|
||||
|
@ -33,9 +33,9 @@ the process. You fill in the name, data flow, and how often you want to roll
|
|||
up the data. Then you define a date histogram aggregation for the rollup job
|
||||
and optionally define terms, histogram, and metrics aggregations.
|
||||
|
||||
When defining the index pattern, you must enter a name that is different than
|
||||
When defining the data view, you must enter a name that is different than
|
||||
the output rollup index. Otherwise, the job
|
||||
will attempt to capture the data in the rollup index. For example, if your index pattern is `metricbeat-*`,
|
||||
will attempt to capture the data in the rollup index. For example, if your data view is `metricbeat-*`,
|
||||
you can name your rollup index `rollup-metricbeat`, but not `metricbeat-rollup`.
|
||||
|
||||
[role="screenshot"]
|
||||
|
@ -66,7 +66,7 @@ You can read more at {ref}/rollup-job-config.html[rollup job configuration].
|
|||
This example creates a rollup job to capture log data from sample web logs.
|
||||
Before you start, <<add-sample-data, add the web logs sample data set>>.
|
||||
|
||||
In this example, you want data that is older than 7 days in the target index pattern `kibana_sample_data_logs`
|
||||
In this example, you want data that is older than 7 days in the target data view `kibana_sample_data_logs`
|
||||
to roll up into the `rollup_logstash` index. You’ll bucket the
|
||||
rolled up data on an hourly basis, using 60m for the time bucket configuration.
|
||||
This allows for more granular queries, such as 2h and 12h.
|
||||
|
@ -85,7 +85,7 @@ As you walk through the *Create rollup job* UI, enter the data:
|
|||
|Name
|
||||
|`logs_job`
|
||||
|
||||
|Index pattern
|
||||
|Data view
|
||||
|`kibana_sample_data_logs`
|
||||
|
||||
|Rollup index name
|
||||
|
@ -139,27 +139,23 @@ rollup index, or you can remove or archive it using <<creating-index-lifecycle-p
|
|||
Your next step is to visualize your rolled up data in a vertical bar chart.
|
||||
Most visualizations support rolled up data, with the exception of Timelion and Vega visualizations.
|
||||
|
||||
. Open the main menu, then click *Stack Management > Index Patterns*.
|
||||
. Open the main menu, then click *Stack Management > Data Views*.
|
||||
|
||||
. Click *Create index pattern*, and select *Rollup index pattern* from the dropdown.
|
||||
+
|
||||
[role="screenshot"]
|
||||
image::images/management-rollup-index-pattern.png[][Create rollup index pattern]
|
||||
. Click *Create data view*, and select *Rollup data view* from the dropdown.
|
||||
|
||||
. Enter *rollup_logstash,kibana_sample_logs* as your *Index Pattern* and `@timestamp`
|
||||
. Enter *rollup_logstash,kibana_sample_logs* as your *Data View* and `@timestamp`
|
||||
as the *Time Filter field name*.
|
||||
+
|
||||
The notation for a combination index pattern with both raw and rolled up data
|
||||
is `rollup_logstash,kibana_sample_data_logs`. In this index pattern, `rollup_logstash`
|
||||
matches the rolled up index pattern and `kibana_sample_data_logs` matches the index
|
||||
pattern for raw data.
|
||||
The notation for a combination data view with both raw and rolled up data
|
||||
is `rollup_logstash,kibana_sample_data_logs`. In this data view, `rollup_logstash`
|
||||
matches the rolled up data view and `kibana_sample_data_logs` matches the data view for raw data.
|
||||
|
||||
. Open the main menu, click *Dashboard*, then *Create dashboard*.
|
||||
|
||||
. Set the <<set-time-filter,time filter>> to *Last 90 days*.
|
||||
|
||||
. On the dashboard, click *Create visualization*.
|
||||
|
||||
|
||||
. Choose `rollup_logstash,kibana_sample_data_logs`
|
||||
as your source to see both the raw and rolled up data.
|
||||
+
|
||||
|
|
|
@ -363,3 +363,8 @@ This content has moved. Refer to <<xpack-profiler>>.
|
|||
== Index patterns has been renamed to data views.
|
||||
|
||||
This content has moved. Refer to <<data-views>>.
|
||||
|
||||
[role="exclude",id="managing-index-patterns"]
|
||||
== Index patterns has been renamed to data views.
|
||||
|
||||
This content has moved. Refer to <<managing-data-views>>.
|
||||
|
|
|
@ -332,7 +332,7 @@ For more details and a reference of audit events, refer to <<xpack-security-audi
|
|||
xpack.security.audit.enabled: true
|
||||
xpack.security.audit.appender: <1>
|
||||
type: rolling-file
|
||||
fileName: ./data/audit.log
|
||||
fileName: ./logs/audit.log
|
||||
policy:
|
||||
type: time-interval
|
||||
interval: 24h <2>
|
||||
|
|
|
@ -17,7 +17,7 @@ Define properties to detect the condition.
|
|||
[role="screenshot"]
|
||||
image::user/alerting/images/rule-types-es-query-conditions.png[Five clauses define the condition to detect]
|
||||
|
||||
Index:: This clause requires an *index or index pattern* and a *time field* that will be used for the *time window*.
|
||||
Index:: This clause requires an *index or data view* and a *time field* that will be used for the *time window*.
|
||||
Size:: This clause specifies the number of documents to pass to the configured actions when the the threshold condition is met.
|
||||
{es} query:: This clause specifies the ES DSL query to execute. The number of documents that match this query will be evaluated against the threshold
|
||||
condition. Aggregations are not supported at this time.
|
||||
|
|
|
@ -10,17 +10,17 @@ In the event that an entity is contained within a boundary, an alert may be gene
|
|||
==== Requirements
|
||||
To create a Tracking containment rule, the following requirements must be present:
|
||||
|
||||
- *Tracks index or index pattern*: An index containing a `geo_point` field, `date` field,
|
||||
- *Tracks index or data view*: An index containing a `geo_point` field, `date` field,
|
||||
and some form of entity identifier. An entity identifier is a `keyword` or `number`
|
||||
field that consistently identifies the entity to be tracked. The data in this index should be dynamically
|
||||
updating so that there are entity movements to alert upon.
|
||||
- *Boundaries index or index pattern*: An index containing `geo_shape` data, such as boundary data and bounding box data.
|
||||
- *Boundaries index or data view*: An index containing `geo_shape` data, such as boundary data and bounding box data.
|
||||
This data is presumed to be static (not updating). Shape data matching the query is
|
||||
harvested once when the rule is created and anytime after when the rule is re-enabled
|
||||
after disablement.
|
||||
|
||||
By design, current interval entity locations (_current_ is determined by `date` in
|
||||
the *Tracked index or index pattern*) are queried to determine if they are contained
|
||||
the *Tracked index or data view*) are queried to determine if they are contained
|
||||
within any monitored boundaries. Entity
|
||||
data should be somewhat "real time", meaning the dates of new documents aren’t older
|
||||
than the current time minus the amount of the interval. If data older than
|
||||
|
@ -39,13 +39,13 @@ as well as 2 Kuery bars used to provide additional filtering context for each of
|
|||
[role="screenshot"]
|
||||
image::user/alerting/images/alert-types-tracking-containment-conditions.png[Five clauses define the condition to detect]
|
||||
|
||||
Index (entity):: This clause requires an *index or index pattern*, a *time field* that will be used for the *time window*, and a *`geo_point` field* for tracking.
|
||||
Index (entity):: This clause requires an *index or data view*, a *time field* that will be used for the *time window*, and a *`geo_point` field* for tracking.
|
||||
When entity:: This clause specifies which crossing option to track. The values
|
||||
*Entered*, *Exited*, and *Crossed* can be selected to indicate which crossing conditions
|
||||
should trigger a rule. *Entered* alerts on entry into a boundary, *Exited* alerts on exit
|
||||
from a boundary, and *Crossed* alerts on all boundary crossings whether they be entrances
|
||||
or exits.
|
||||
Index (Boundary):: This clause requires an *index or index pattern*, a *`geo_shape` field*
|
||||
Index (Boundary):: This clause requires an *index or data view*, a *`geo_shape` field*
|
||||
identifying boundaries, and an optional *Human-readable boundary name* for better alerting
|
||||
messages.
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ Define properties to detect the condition.
|
|||
[role="screenshot"]
|
||||
image::user/alerting/images/rule-types-index-threshold-conditions.png[Five clauses define the condition to detect]
|
||||
|
||||
Index:: This clause requires an *index or index pattern* and a *time field* that will be used for the *time window*.
|
||||
Index:: This clause requires an *index or data view* and a *time field* that will be used for the *time window*.
|
||||
When:: This clause specifies how the value to be compared to the threshold is calculated. The value is calculated by aggregating a numeric field a the *time window*. The aggregation options are: `count`, `average`, `sum`, `min`, and `max`. When using `count` the document count is used, and an aggregation field is not necessary.
|
||||
Over/Grouped Over:: This clause lets you configure whether the aggregation is applied over all documents, or should be split into groups using a grouping field. If grouping is used, an <<alerting-concepts-alerts, alert>> will be created for each group when it exceeds the threshold. To limit the number of alerts on high cardinality fields, you must specify the number of groups to check against the threshold. Only the *top* groups are checked.
|
||||
Threshold:: This clause defines a threshold value and a comparison operator (one of `is above`, `is above or equals`, `is below`, `is below or equals`, or `is between`). The result of the aggregation is compared to this threshold.
|
||||
|
|
|
@ -8,7 +8,7 @@ By default, both the configuration and data are saved for the workspace:
|
|||
|
||||
[horizontal]
|
||||
*configuration*::
|
||||
The selected index pattern, fields, colors, icons,
|
||||
The selected data view, fields, colors, icons,
|
||||
and settings.
|
||||
*data*::
|
||||
The visualized content (the vertices and connections displayed in
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
[partintro]
|
||||
--
|
||||
*Stack Management* is home to UIs for managing all things Elastic Stack—
|
||||
indices, clusters, licenses, UI settings, index patterns, spaces, and more.
|
||||
indices, clusters, licenses, UI settings, data views, spaces, and more.
|
||||
|
||||
|
||||
Access to individual features is governed by {es} and {kib} privileges.
|
||||
|
@ -128,12 +128,12 @@ Kerberos, PKI, OIDC, and SAML.
|
|||
[cols="50, 50"]
|
||||
|===
|
||||
|
||||
a| <<managing-index-patterns, Manage index pattern data fields>>
|
||||
|Manage the data fields in the index patterns that retrieve your data from {es}.
|
||||
a| <<data-views, Data Views>>
|
||||
|Manage the fields in the data views that retrieve your data from {es}.
|
||||
|
||||
| <<managing-saved-objects, Saved Objects>>
|
||||
| Copy, edit, delete, import, and export your saved objects.
|
||||
These include dashboards, visualizations, maps, index patterns, Canvas workpads, and more.
|
||||
These include dashboards, visualizations, maps, data views, Canvas workpads, and more.
|
||||
|
||||
| <<managing-tags, Tags>>
|
||||
|Create, manage, and assign tags to your saved objects.
|
||||
|
@ -183,7 +183,7 @@ include::{kib-repo-dir}/management/action-types.asciidoc[]
|
|||
|
||||
include::{kib-repo-dir}/management/managing-licenses.asciidoc[]
|
||||
|
||||
include::{kib-repo-dir}/management/manage-index-patterns.asciidoc[]
|
||||
include::{kib-repo-dir}/management/manage-data-views.asciidoc[]
|
||||
|
||||
include::{kib-repo-dir}/management/numeral.asciidoc[]
|
||||
|
||||
|
|
|
@ -5,21 +5,21 @@
|
|||
The {stack} {monitor-features} provide
|
||||
<<alerting-getting-started,{kib} alerting rules>> out-of-the box to notify you
|
||||
of potential issues in the {stack}. These rules are preconfigured based on the
|
||||
best practices recommended by Elastic. However, you can tailor them to meet your
|
||||
best practices recommended by Elastic. However, you can tailor them to meet your
|
||||
specific needs.
|
||||
|
||||
[role="screenshot"]
|
||||
image::user/monitoring/images/monitoring-kibana-alerting-notification.png["{kib} alerting notifications in {stack-monitor-app}"]
|
||||
|
||||
When you open *{stack-monitor-app}* for the first time, you will be asked to acknowledge the creation of these default rules. They are initially configured to detect and notify on various
|
||||
When you open *{stack-monitor-app}* for the first time, you will be asked to acknowledge the creation of these default rules. They are initially configured to detect and notify on various
|
||||
conditions across your monitored clusters. You can view notifications for: *Cluster health*, *Resource utilization*, and *Errors and exceptions* for {es}
|
||||
in real time.
|
||||
|
||||
NOTE: The default {watcher} based "cluster alerts" for {stack-monitor-app} have
|
||||
been recreated as rules in {kib} {alert-features}. For this reason, the existing
|
||||
{watcher} email action
|
||||
NOTE: The default {watcher} based "cluster alerts" for {stack-monitor-app} have
|
||||
been recreated as rules in {kib} {alert-features}. For this reason, the existing
|
||||
{watcher} email action
|
||||
`monitoring.cluster_alerts.email_notifications.email_address` no longer works.
|
||||
The default action for all {stack-monitor-app} rules is to write to {kib} logs
|
||||
The default action for all {stack-monitor-app} rules is to write to {kib} logs
|
||||
and display a notification in the UI.
|
||||
|
||||
To review and modify existing *{stack-monitor-app}* rules, click *Enter setup mode* on the *Cluster overview* page.
|
||||
|
@ -47,21 +47,21 @@ checks on a schedule time of 1 minute with a re-notify interval of 1 day.
|
|||
|
||||
This rule checks for {es} nodes that use a high amount of JVM memory. By
|
||||
default, the condition is set at 85% or more averaged over the last 5 minutes.
|
||||
The default rule checks on a schedule time of 1 minute with a re-notify interval of 1 day.
|
||||
The default rule checks on a schedule time of 1 minute with a re-notify interval of 1 day.
|
||||
|
||||
[discrete]
|
||||
[[kibana-alerts-missing-monitoring-data]]
|
||||
== Missing monitoring data
|
||||
|
||||
This rule checks for {es} nodes that stop sending monitoring data. By default,
|
||||
This rule checks for {es} nodes that stop sending monitoring data. By default,
|
||||
the condition is set to missing for 15 minutes looking back 1 day. The default rule checks on a schedule
|
||||
time of 1 minute with a re-notify interval of 6 hours.
|
||||
time of 1 minute with a re-notify interval of 6 hours.
|
||||
|
||||
[discrete]
|
||||
[[kibana-alerts-thread-pool-rejections]]
|
||||
== Thread pool rejections (search/write)
|
||||
|
||||
This rule checks for {es} nodes that experience thread pool rejections. By
|
||||
This rule checks for {es} nodes that experience thread pool rejections. By
|
||||
default, the condition is set at 300 or more over the last 5 minutes. The default rule
|
||||
checks on a schedule time of 1 minute with a re-notify interval of 1 day. Thresholds can be set
|
||||
independently for `search` and `write` type rejections.
|
||||
|
@ -72,14 +72,14 @@ independently for `search` and `write` type rejections.
|
|||
|
||||
This rule checks for read exceptions on any of the replicated {es} clusters. The
|
||||
condition is met if 1 or more read exceptions are detected in the last hour. The
|
||||
default rule checks on a schedule time of 1 minute with a re-notify interval of 6 hours.
|
||||
default rule checks on a schedule time of 1 minute with a re-notify interval of 6 hours.
|
||||
|
||||
[discrete]
|
||||
[[kibana-alerts-large-shard-size]]
|
||||
== Large shard size
|
||||
|
||||
This rule checks for a large average shard size (across associated primaries) on
|
||||
any of the specified index patterns in an {es} cluster. The condition is met if
|
||||
any of the specified data views in an {es} cluster. The condition is met if
|
||||
an index's average shard size is 55gb or higher in the last 5 minutes. The default rule
|
||||
matches the pattern of `-.*` by running checks on a schedule time of 1 minute with a re-notify interval of 12 hours.
|
||||
|
||||
|
@ -124,8 +124,8 @@ valid for 30 days.
|
|||
== Alerts and rules
|
||||
[discrete]
|
||||
=== Create default rules
|
||||
This option can be used to create default rules in this kibana space. This is
|
||||
useful for scenarios when you didn't choose to create these default rules initially
|
||||
This option can be used to create default rules in this Kibana space. This is
|
||||
useful for scenarios when you didn't choose to create these default rules initially
|
||||
or anytime later if the rules were accidentally deleted.
|
||||
|
||||
NOTE: Some action types are subscription features, while others are free.
|
||||
|
|
|
@ -33,7 +33,8 @@ export class HelloWorldEmbeddable extends Embeddable {
|
|||
* @param node
|
||||
*/
|
||||
public render(node: HTMLElement) {
|
||||
node.innerHTML = '<div data-test-subj="helloWorldEmbeddable">HELLO WORLD!</div>';
|
||||
node.innerHTML =
|
||||
'<div data-test-subj="helloWorldEmbeddable" data-render-complete="true">HELLO WORLD!</div>';
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -41,7 +41,7 @@ function wrapSearchTerms(task: string, search?: string) {
|
|||
|
||||
export function TodoEmbeddableComponentInner({ input: { icon, title, task, search } }: Props) {
|
||||
return (
|
||||
<EuiFlexGroup gutterSize="none">
|
||||
<EuiFlexGroup gutterSize="none" data-render-complete="true">
|
||||
<EuiFlexItem grow={false}>
|
||||
{icon ? <EuiIcon type={icon} size="l" /> : <EuiAvatar name={title || task} size="l" />}
|
||||
</EuiFlexItem>
|
||||
|
|
|
@ -45,7 +45,7 @@ export function TodoRefEmbeddableComponentInner({
|
|||
const title = savedAttributes?.title;
|
||||
const task = savedAttributes?.task;
|
||||
return (
|
||||
<EuiFlexGroup>
|
||||
<EuiFlexGroup data-render-complete="true">
|
||||
<EuiFlexItem grow={false}>
|
||||
{icon ? (
|
||||
<EuiIcon type={icon} size="l" />
|
||||
|
|
|
@ -17,5 +17,8 @@ module.exports = {
|
|||
'<rootDir>/src/plugins/vis_types/*/jest.config.js',
|
||||
'<rootDir>/test/*/jest.config.js',
|
||||
'<rootDir>/x-pack/plugins/*/jest.config.js',
|
||||
'<rootDir>/x-pack/plugins/security_solution/*/jest.config.js',
|
||||
'<rootDir>/x-pack/plugins/security_solution/public/*/jest.config.js',
|
||||
'<rootDir>/x-pack/plugins/security_solution/server/*/jest.config.js',
|
||||
],
|
||||
};
|
||||
|
|
0
logs/.empty
Normal file
0
logs/.empty
Normal file
|
@ -99,7 +99,7 @@
|
|||
"@elastic/apm-rum": "^5.9.1",
|
||||
"@elastic/apm-rum-react": "^1.3.1",
|
||||
"@elastic/apm-synthtrace": "link:bazel-bin/packages/elastic-apm-synthtrace",
|
||||
"@elastic/charts": "38.1.3",
|
||||
"@elastic/charts": "39.0.0",
|
||||
"@elastic/datemath": "link:bazel-bin/packages/elastic-datemath",
|
||||
"@elastic/elasticsearch": "npm:@elastic/elasticsearch-canary@^8.0.0-canary.35",
|
||||
"@elastic/ems-client": "8.0.0",
|
||||
|
@ -761,7 +761,7 @@
|
|||
"mocha-junit-reporter": "^2.0.0",
|
||||
"mochawesome": "^6.2.1",
|
||||
"mochawesome-merge": "^4.2.0",
|
||||
"mock-fs": "^5.1.1",
|
||||
"mock-fs": "^5.1.2",
|
||||
"mock-http-server": "1.3.0",
|
||||
"ms-chromium-edge-driver": "^0.4.2",
|
||||
"multimatch": "^4.0.0",
|
||||
|
|
|
@ -88,6 +88,16 @@ module.exports = {
|
|||
exclude: USES_STYLED_COMPONENTS,
|
||||
disallowedMessage: `Prefer using @emotion/react instead. To use styled-components, ensure you plugin is enabled in @kbn/dev-utils/src/babel.ts.`
|
||||
},
|
||||
...[
|
||||
'@elastic/eui/dist/eui_theme_light.json',
|
||||
'@elastic/eui/dist/eui_theme_dark.json',
|
||||
'@elastic/eui/dist/eui_theme_amsterdam_light.json',
|
||||
'@elastic/eui/dist/eui_theme_amsterdam_dark.json',
|
||||
].map(from => ({
|
||||
from,
|
||||
to: false,
|
||||
disallowedMessage: `Use "@kbn/ui-shared-deps-src/theme" to access theme vars.`
|
||||
})),
|
||||
],
|
||||
],
|
||||
|
||||
|
|
|
@ -79,6 +79,8 @@ expect.addSnapshotSerializer(extendedEnvSerializer);
|
|||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
log.messages.length = 0;
|
||||
process.execArgv = ['--inheritted', '--exec', '--argv'];
|
||||
process.env.FORCE_COLOR = process.env.FORCE_COLOR || '1';
|
||||
currentProc = undefined;
|
||||
});
|
||||
|
||||
|
@ -120,9 +122,6 @@ describe('#run$', () => {
|
|||
it('starts the dev server with the right options', () => {
|
||||
run(new DevServer(defaultOptions)).unsubscribe();
|
||||
|
||||
// ensure that FORCE_COLOR is in the env for consistency in snapshot
|
||||
process.env.FORCE_COLOR = process.env.FORCE_COLOR || 'true';
|
||||
|
||||
expect(execa.node.mock.calls).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
Array [
|
||||
|
@ -135,11 +134,13 @@ describe('#run$', () => {
|
|||
"env": Object {
|
||||
"<inheritted process.env>": true,
|
||||
"ELASTIC_APM_SERVICE_NAME": "kibana",
|
||||
"FORCE_COLOR": "true",
|
||||
"isDevCliChild": "true",
|
||||
},
|
||||
"nodeOptions": Array [
|
||||
"--preserve-symlinks-main",
|
||||
"--preserve-symlinks",
|
||||
"--inheritted",
|
||||
"--exec",
|
||||
"--argv",
|
||||
],
|
||||
"stdio": "pipe",
|
||||
},
|
||||
|
|
|
@ -40,6 +40,7 @@ export async function loadAction({
|
|||
inputDir,
|
||||
skipExisting,
|
||||
useCreate,
|
||||
docsOnly,
|
||||
client,
|
||||
log,
|
||||
kbnClient,
|
||||
|
@ -47,6 +48,7 @@ export async function loadAction({
|
|||
inputDir: string;
|
||||
skipExisting: boolean;
|
||||
useCreate: boolean;
|
||||
docsOnly?: boolean;
|
||||
client: Client;
|
||||
log: ToolingLog;
|
||||
kbnClient: KbnClient;
|
||||
|
@ -76,7 +78,7 @@ export async function loadAction({
|
|||
|
||||
await createPromiseFromStreams([
|
||||
recordStream,
|
||||
createCreateIndexStream({ client, stats, skipExisting, log }),
|
||||
createCreateIndexStream({ client, stats, skipExisting, docsOnly, log }),
|
||||
createIndexDocRecordsStream(client, stats, progress, useCreate),
|
||||
]);
|
||||
|
||||
|
|
|
@ -27,6 +27,7 @@ export async function saveAction({
|
|||
client,
|
||||
log,
|
||||
raw,
|
||||
keepIndexNames,
|
||||
query,
|
||||
}: {
|
||||
outputDir: string;
|
||||
|
@ -34,6 +35,7 @@ export async function saveAction({
|
|||
client: Client;
|
||||
log: ToolingLog;
|
||||
raw: boolean;
|
||||
keepIndexNames?: boolean;
|
||||
query?: Record<string, any>;
|
||||
}) {
|
||||
const name = relative(REPO_ROOT, outputDir);
|
||||
|
@ -50,7 +52,7 @@ export async function saveAction({
|
|||
// export and save the matching indices to mappings.json
|
||||
createPromiseFromStreams([
|
||||
createListStream(indices),
|
||||
createGenerateIndexRecordsStream(client, stats),
|
||||
createGenerateIndexRecordsStream({ client, stats, keepIndexNames }),
|
||||
...createFormatArchiveStreams(),
|
||||
createWriteStream(resolve(outputDir, 'mappings.json')),
|
||||
] as [Readable, ...Writable[]]),
|
||||
|
@ -58,7 +60,7 @@ export async function saveAction({
|
|||
// export all documents from matching indexes into data.json.gz
|
||||
createPromiseFromStreams([
|
||||
createListStream(indices),
|
||||
createGenerateDocRecordsStream({ client, stats, progress, query }),
|
||||
createGenerateDocRecordsStream({ client, stats, progress, keepIndexNames, query }),
|
||||
...createFormatArchiveStreams({ gzip: !raw }),
|
||||
createWriteStream(resolve(outputDir, `data.json${raw ? '' : '.gz'}`)),
|
||||
] as [Readable, ...Writable[]]),
|
||||
|
|
|
@ -143,11 +143,12 @@ export function runCli() {
|
|||
$ node scripts/es_archiver save test/functional/es_archives/my_test_data logstash-*
|
||||
`,
|
||||
flags: {
|
||||
boolean: ['raw'],
|
||||
boolean: ['raw', 'keep-index-names'],
|
||||
string: ['query'],
|
||||
help: `
|
||||
--raw don't gzip the archives
|
||||
--query query object to limit the documents being archived, needs to be properly escaped JSON
|
||||
--raw don't gzip the archives
|
||||
--keep-index-names don't change the names of Kibana indices to .kibana_1
|
||||
--query query object to limit the documents being archived, needs to be properly escaped JSON
|
||||
`,
|
||||
},
|
||||
async run({ flags, esArchiver, statsMeta }) {
|
||||
|
@ -168,6 +169,11 @@ export function runCli() {
|
|||
throw createFlagError('--raw does not take a value');
|
||||
}
|
||||
|
||||
const keepIndexNames = flags['keep-index-names'];
|
||||
if (typeof keepIndexNames !== 'boolean') {
|
||||
throw createFlagError('--keep-index-names does not take a value');
|
||||
}
|
||||
|
||||
const query = flags.query;
|
||||
let parsedQuery;
|
||||
if (typeof query === 'string' && query.length > 0) {
|
||||
|
@ -178,7 +184,7 @@ export function runCli() {
|
|||
}
|
||||
}
|
||||
|
||||
await esArchiver.save(path, indices, { raw, query: parsedQuery });
|
||||
await esArchiver.save(path, indices, { raw, keepIndexNames, query: parsedQuery });
|
||||
},
|
||||
})
|
||||
.command({
|
||||
|
@ -196,9 +202,10 @@ export function runCli() {
|
|||
$ node scripts/es_archiver load my_test_data --config ../config.js
|
||||
`,
|
||||
flags: {
|
||||
boolean: ['use-create'],
|
||||
boolean: ['use-create', 'docs-only'],
|
||||
help: `
|
||||
--use-create use create instead of index for loading documents
|
||||
--docs-only load only documents, not indices
|
||||
`,
|
||||
},
|
||||
async run({ flags, esArchiver, statsMeta }) {
|
||||
|
@ -217,7 +224,12 @@ export function runCli() {
|
|||
throw createFlagError('--use-create does not take a value');
|
||||
}
|
||||
|
||||
await esArchiver.load(path, { useCreate });
|
||||
const docsOnly = flags['docs-only'];
|
||||
if (typeof docsOnly !== 'boolean') {
|
||||
throw createFlagError('--docs-only does not take a value');
|
||||
}
|
||||
|
||||
await esArchiver.load(path, { useCreate, docsOnly });
|
||||
},
|
||||
})
|
||||
.command({
|
||||
|
|
|
@ -50,16 +50,22 @@ export class EsArchiver {
|
|||
* @param {String|Array<String>} indices - the indices to archive
|
||||
* @param {Object} options
|
||||
* @property {Boolean} options.raw - should the archive be raw (unzipped) or not
|
||||
* @property {Boolean} options.keepIndexNames - should the Kibana index name be kept as-is or renamed
|
||||
*/
|
||||
async save(
|
||||
path: string,
|
||||
indices: string | string[],
|
||||
{ raw = false, query }: { raw?: boolean; query?: Record<string, any> } = {}
|
||||
{
|
||||
raw = false,
|
||||
keepIndexNames = false,
|
||||
query,
|
||||
}: { raw?: boolean; keepIndexNames?: boolean; query?: Record<string, any> } = {}
|
||||
) {
|
||||
return await saveAction({
|
||||
outputDir: Path.resolve(this.baseDir, path),
|
||||
indices,
|
||||
raw,
|
||||
keepIndexNames,
|
||||
client: this.client,
|
||||
log: this.log,
|
||||
query,
|
||||
|
@ -74,18 +80,21 @@ export class EsArchiver {
|
|||
* @property {Boolean} options.skipExisting - should existing indices
|
||||
* be ignored or overwritten
|
||||
* @property {Boolean} options.useCreate - use a create operation instead of index for documents
|
||||
* @property {Boolean} options.docsOnly - load only documents, not indices
|
||||
*/
|
||||
async load(
|
||||
path: string,
|
||||
{
|
||||
skipExisting = false,
|
||||
useCreate = false,
|
||||
}: { skipExisting?: boolean; useCreate?: boolean } = {}
|
||||
docsOnly = false,
|
||||
}: { skipExisting?: boolean; useCreate?: boolean; docsOnly?: boolean } = {}
|
||||
) {
|
||||
return await loadAction({
|
||||
inputDir: this.findArchive(path),
|
||||
skipExisting: !!skipExisting,
|
||||
useCreate: !!useCreate,
|
||||
docsOnly,
|
||||
client: this.client,
|
||||
log: this.log,
|
||||
kbnClient: this.kbnClient,
|
||||
|
|
|
@ -20,48 +20,24 @@ import { createStats } from '../stats';
|
|||
|
||||
const log = new ToolingLog();
|
||||
|
||||
it('transforms each input index to a stream of docs using scrollSearch helper', async () => {
|
||||
const responses: any = {
|
||||
foo: [
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 5,
|
||||
hits: [
|
||||
{ _index: 'foo', _type: '_doc', _id: '0', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '1', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '2', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 5,
|
||||
hits: [
|
||||
{ _index: 'foo', _type: '_doc', _id: '3', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '4', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
bar: [
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 2,
|
||||
hits: [
|
||||
{ _index: 'bar', _type: '_doc', _id: '0', _source: {} },
|
||||
{ _index: 'bar', _type: '_doc', _id: '1', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
interface SearchResponses {
|
||||
[key: string]: Array<{
|
||||
body: {
|
||||
hits: {
|
||||
total: number;
|
||||
hits: Array<{
|
||||
_index: string;
|
||||
_type: string;
|
||||
_id: string;
|
||||
_source: Record<string, unknown>;
|
||||
}>;
|
||||
};
|
||||
};
|
||||
}>;
|
||||
}
|
||||
|
||||
function createMockClient(responses: SearchResponses) {
|
||||
// TODO: replace with proper mocked client
|
||||
const client: any = {
|
||||
helpers: {
|
||||
scrollSearch: jest.fn(function* ({ index }) {
|
||||
|
@ -71,29 +47,76 @@ it('transforms each input index to a stream of docs using scrollSearch helper',
|
|||
}),
|
||||
},
|
||||
};
|
||||
return client;
|
||||
}
|
||||
|
||||
const stats = createStats('test', log);
|
||||
const progress = new Progress();
|
||||
describe('esArchiver: createGenerateDocRecordsStream()', () => {
|
||||
it('transforms each input index to a stream of docs using scrollSearch helper', async () => {
|
||||
const responses = {
|
||||
foo: [
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 5,
|
||||
hits: [
|
||||
{ _index: 'foo', _type: '_doc', _id: '0', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '1', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '2', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 5,
|
||||
hits: [
|
||||
{ _index: 'foo', _type: '_doc', _id: '3', _source: {} },
|
||||
{ _index: 'foo', _type: '_doc', _id: '4', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
bar: [
|
||||
{
|
||||
body: {
|
||||
hits: {
|
||||
total: 2,
|
||||
hits: [
|
||||
{ _index: 'bar', _type: '_doc', _id: '0', _source: {} },
|
||||
{ _index: 'bar', _type: '_doc', _id: '1', _source: {} },
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
const results = await createPromiseFromStreams([
|
||||
createListStream(['bar', 'foo']),
|
||||
createGenerateDocRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
progress,
|
||||
}),
|
||||
createMapStream((record: any) => {
|
||||
expect(record).toHaveProperty('type', 'doc');
|
||||
expect(record.value.source).toEqual({});
|
||||
expect(record.value.type).toBe('_doc');
|
||||
expect(record.value.index).toMatch(/^(foo|bar)$/);
|
||||
expect(record.value.id).toMatch(/^\d+$/);
|
||||
return `${record.value.index}:${record.value.id}`;
|
||||
}),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
const client = createMockClient(responses);
|
||||
|
||||
expect(client.helpers.scrollSearch).toMatchInlineSnapshot(`
|
||||
const stats = createStats('test', log);
|
||||
const progress = new Progress();
|
||||
|
||||
const results = await createPromiseFromStreams([
|
||||
createListStream(['bar', 'foo']),
|
||||
createGenerateDocRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
progress,
|
||||
}),
|
||||
createMapStream((record: any) => {
|
||||
expect(record).toHaveProperty('type', 'doc');
|
||||
expect(record.value.source).toEqual({});
|
||||
expect(record.value.type).toBe('_doc');
|
||||
expect(record.value.index).toMatch(/^(foo|bar)$/);
|
||||
expect(record.value.id).toMatch(/^\d+$/);
|
||||
return `${record.value.index}:${record.value.id}`;
|
||||
}),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
expect(client.helpers.scrollSearch).toMatchInlineSnapshot(`
|
||||
[MockFunction] {
|
||||
"calls": Array [
|
||||
Array [
|
||||
|
@ -139,7 +162,7 @@ it('transforms each input index to a stream of docs using scrollSearch helper',
|
|||
],
|
||||
}
|
||||
`);
|
||||
expect(results).toMatchInlineSnapshot(`
|
||||
expect(results).toMatchInlineSnapshot(`
|
||||
Array [
|
||||
"bar:0",
|
||||
"bar:1",
|
||||
|
@ -150,14 +173,14 @@ it('transforms each input index to a stream of docs using scrollSearch helper',
|
|||
"foo:4",
|
||||
]
|
||||
`);
|
||||
expect(progress).toMatchInlineSnapshot(`
|
||||
expect(progress).toMatchInlineSnapshot(`
|
||||
Progress {
|
||||
"complete": 7,
|
||||
"loggingInterval": undefined,
|
||||
"total": 7,
|
||||
}
|
||||
`);
|
||||
expect(stats).toMatchInlineSnapshot(`
|
||||
expect(stats).toMatchInlineSnapshot(`
|
||||
Object {
|
||||
"bar": Object {
|
||||
"archived": false,
|
||||
|
@ -193,4 +216,80 @@ it('transforms each input index to a stream of docs using scrollSearch helper',
|
|||
},
|
||||
}
|
||||
`);
|
||||
});
|
||||
|
||||
describe('keepIndexNames', () => {
|
||||
it('changes .kibana* index names if keepIndexNames is not enabled', async () => {
|
||||
const hits = [{ _index: '.kibana_7.16.0_001', _type: '_doc', _id: '0', _source: {} }];
|
||||
const responses = {
|
||||
['.kibana_7.16.0_001']: [{ body: { hits: { hits, total: hits.length } } }],
|
||||
};
|
||||
const client = createMockClient(responses);
|
||||
const stats = createStats('test', log);
|
||||
const progress = new Progress();
|
||||
|
||||
const results = await createPromiseFromStreams([
|
||||
createListStream(['.kibana_7.16.0_001']),
|
||||
createGenerateDocRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
progress,
|
||||
}),
|
||||
createMapStream((record: { value: { index: string; id: string } }) => {
|
||||
return `${record.value.index}:${record.value.id}`;
|
||||
}),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
expect(results).toEqual(['.kibana_1:0']);
|
||||
});
|
||||
|
||||
it('does not change non-.kibana* index names if keepIndexNames is not enabled', async () => {
|
||||
const hits = [{ _index: '.foo', _type: '_doc', _id: '0', _source: {} }];
|
||||
const responses = {
|
||||
['.foo']: [{ body: { hits: { hits, total: hits.length } } }],
|
||||
};
|
||||
const client = createMockClient(responses);
|
||||
const stats = createStats('test', log);
|
||||
const progress = new Progress();
|
||||
|
||||
const results = await createPromiseFromStreams([
|
||||
createListStream(['.foo']),
|
||||
createGenerateDocRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
progress,
|
||||
}),
|
||||
createMapStream((record: { value: { index: string; id: string } }) => {
|
||||
return `${record.value.index}:${record.value.id}`;
|
||||
}),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
expect(results).toEqual(['.foo:0']);
|
||||
});
|
||||
|
||||
it('does not change .kibana* index names if keepIndexNames is enabled', async () => {
|
||||
const hits = [{ _index: '.kibana_7.16.0_001', _type: '_doc', _id: '0', _source: {} }];
|
||||
const responses = {
|
||||
['.kibana_7.16.0_001']: [{ body: { hits: { hits, total: hits.length } } }],
|
||||
};
|
||||
const client = createMockClient(responses);
|
||||
const stats = createStats('test', log);
|
||||
const progress = new Progress();
|
||||
|
||||
const results = await createPromiseFromStreams([
|
||||
createListStream(['.kibana_7.16.0_001']),
|
||||
createGenerateDocRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
progress,
|
||||
keepIndexNames: true,
|
||||
}),
|
||||
createMapStream((record: { value: { index: string; id: string } }) => {
|
||||
return `${record.value.index}:${record.value.id}`;
|
||||
}),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
expect(results).toEqual(['.kibana_7.16.0_001:0']);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -19,11 +19,13 @@ export function createGenerateDocRecordsStream({
|
|||
client,
|
||||
stats,
|
||||
progress,
|
||||
keepIndexNames,
|
||||
query,
|
||||
}: {
|
||||
client: Client;
|
||||
stats: Stats;
|
||||
progress: Progress;
|
||||
keepIndexNames?: boolean;
|
||||
query?: Record<string, any>;
|
||||
}) {
|
||||
return new Transform({
|
||||
|
@ -59,9 +61,10 @@ export function createGenerateDocRecordsStream({
|
|||
this.push({
|
||||
type: 'doc',
|
||||
value: {
|
||||
// always rewrite the .kibana_* index to .kibana_1 so that
|
||||
// if keepIndexNames is false, rewrite the .kibana_* index to .kibana_1 so that
|
||||
// when it is loaded it can skip migration, if possible
|
||||
index: hit._index.startsWith('.kibana') ? '.kibana_1' : hit._index,
|
||||
index:
|
||||
hit._index.startsWith('.kibana') && !keepIndexNames ? '.kibana_1' : hit._index,
|
||||
type: hit._type,
|
||||
id: hit._id,
|
||||
source: hit._source,
|
||||
|
|
|
@ -6,7 +6,10 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
export const migrationRetryCallClusterMock = jest.fn((fn) => fn());
|
||||
jest.doMock('../../../elasticsearch/client/retry_call_cluster', () => ({
|
||||
migrationRetryCallCluster: migrationRetryCallClusterMock,
|
||||
import type { deleteKibanaIndices } from './kibana_index';
|
||||
|
||||
export const mockDeleteKibanaIndices = jest.fn() as jest.MockedFunction<typeof deleteKibanaIndices>;
|
||||
|
||||
jest.mock('./kibana_index', () => ({
|
||||
deleteKibanaIndices: mockDeleteKibanaIndices,
|
||||
}));
|
|
@ -6,6 +6,8 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { mockDeleteKibanaIndices } from './create_index_stream.test.mock';
|
||||
|
||||
import sinon from 'sinon';
|
||||
import Chance from 'chance';
|
||||
import { createPromiseFromStreams, createConcatStream, createListStream } from '@kbn/utils';
|
||||
|
@ -24,6 +26,10 @@ const chance = new Chance();
|
|||
|
||||
const log = createStubLogger();
|
||||
|
||||
beforeEach(() => {
|
||||
mockDeleteKibanaIndices.mockClear();
|
||||
});
|
||||
|
||||
describe('esArchiver: createCreateIndexStream()', () => {
|
||||
describe('defaults', () => {
|
||||
it('deletes existing indices, creates all', async () => {
|
||||
|
@ -167,6 +173,73 @@ describe('esArchiver: createCreateIndexStream()', () => {
|
|||
});
|
||||
});
|
||||
|
||||
describe('deleteKibanaIndices', () => {
|
||||
function doTest(...indices: string[]) {
|
||||
return createPromiseFromStreams([
|
||||
createListStream(indices.map((index) => createStubIndexRecord(index))),
|
||||
createCreateIndexStream({ client: createStubClient(), stats: createStubStats(), log }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
}
|
||||
|
||||
it('does not delete Kibana indices for indexes that do not start with .kibana', async () => {
|
||||
await doTest('.foo');
|
||||
|
||||
expect(mockDeleteKibanaIndices).not.toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('deletes Kibana indices at most once for indices that start with .kibana', async () => {
|
||||
// If we are loading the main Kibana index, we should delete all Kibana indices for backwards compatibility reasons.
|
||||
await doTest('.kibana_7.16.0_001', '.kibana_task_manager_7.16.0_001');
|
||||
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenCalledTimes(1);
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenCalledWith(
|
||||
expect.not.objectContaining({ onlyTaskManager: true })
|
||||
);
|
||||
});
|
||||
|
||||
it('deletes Kibana task manager index at most once, using onlyTaskManager: true', async () => {
|
||||
// If we are loading the Kibana task manager index, we should only delete that index, not any other Kibana indices.
|
||||
await doTest('.kibana_task_manager_7.16.0_001', '.kibana_task_manager_7.16.0_002');
|
||||
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenCalledTimes(1);
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ onlyTaskManager: true })
|
||||
);
|
||||
});
|
||||
|
||||
it('deletes Kibana task manager index AND deletes all Kibana indices', async () => {
|
||||
// Because we are reading from a stream, we can't look ahead to see if we'll eventually wind up deleting all Kibana indices.
|
||||
// So, we first delete only the Kibana task manager indices, then we wind up deleting all Kibana indices.
|
||||
await doTest('.kibana_task_manager_7.16.0_001', '.kibana_7.16.0_001');
|
||||
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenCalledTimes(2);
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenNthCalledWith(
|
||||
1,
|
||||
expect.objectContaining({ onlyTaskManager: true })
|
||||
);
|
||||
expect(mockDeleteKibanaIndices).toHaveBeenNthCalledWith(
|
||||
2,
|
||||
expect.not.objectContaining({ onlyTaskManager: true })
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('docsOnly = true', () => {
|
||||
it('passes through "hit" records without attempting to create indices', async () => {
|
||||
const client = createStubClient();
|
||||
const stats = createStubStats();
|
||||
const output = await createPromiseFromStreams([
|
||||
createListStream([createStubIndexRecord('index'), createStubDocRecord('index', 1)]),
|
||||
createCreateIndexStream({ client, stats, log, docsOnly: true }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
sinon.assert.notCalled(client.indices.create as sinon.SinonSpy);
|
||||
expect(output).toEqual([createStubDocRecord('index', 1)]);
|
||||
});
|
||||
});
|
||||
|
||||
describe('skipExisting = true', () => {
|
||||
it('ignores preexisting indexes', async () => {
|
||||
const client = createStubClient(['existing-index']);
|
||||
|
|
|
@ -29,11 +29,13 @@ export function createCreateIndexStream({
|
|||
client,
|
||||
stats,
|
||||
skipExisting = false,
|
||||
docsOnly = false,
|
||||
log,
|
||||
}: {
|
||||
client: Client;
|
||||
stats: Stats;
|
||||
skipExisting?: boolean;
|
||||
docsOnly?: boolean;
|
||||
log: ToolingLog;
|
||||
}) {
|
||||
const skipDocsFromIndices = new Set();
|
||||
|
@ -42,6 +44,7 @@ export function createCreateIndexStream({
|
|||
// previous indices are removed so we're starting w/ a clean slate for
|
||||
// migrations. This only needs to be done once per archive load operation.
|
||||
let kibanaIndexAlreadyDeleted = false;
|
||||
let kibanaTaskManagerIndexAlreadyDeleted = false;
|
||||
|
||||
async function handleDoc(stream: Readable, record: DocRecord) {
|
||||
if (skipDocsFromIndices.has(record.value.index)) {
|
||||
|
@ -53,13 +56,21 @@ export function createCreateIndexStream({
|
|||
|
||||
async function handleIndex(record: DocRecord) {
|
||||
const { index, settings, mappings, aliases } = record.value;
|
||||
const isKibana = index.startsWith('.kibana');
|
||||
const isKibanaTaskManager = index.startsWith('.kibana_task_manager');
|
||||
const isKibana = index.startsWith('.kibana') && !isKibanaTaskManager;
|
||||
|
||||
if (docsOnly) {
|
||||
return;
|
||||
}
|
||||
|
||||
async function attemptToCreate(attemptNumber = 1) {
|
||||
try {
|
||||
if (isKibana && !kibanaIndexAlreadyDeleted) {
|
||||
await deleteKibanaIndices({ client, stats, log });
|
||||
kibanaIndexAlreadyDeleted = true;
|
||||
await deleteKibanaIndices({ client, stats, log }); // delete all .kibana* indices
|
||||
kibanaIndexAlreadyDeleted = kibanaTaskManagerIndexAlreadyDeleted = true;
|
||||
} else if (isKibanaTaskManager && !kibanaTaskManagerIndexAlreadyDeleted) {
|
||||
await deleteKibanaIndices({ client, stats, onlyTaskManager: true, log }); // delete only .kibana_task_manager* indices
|
||||
kibanaTaskManagerIndexAlreadyDeleted = true;
|
||||
}
|
||||
|
||||
await client.indices.create(
|
||||
|
|
|
@ -21,7 +21,7 @@ describe('esArchiver: createGenerateIndexRecordsStream()', () => {
|
|||
|
||||
await createPromiseFromStreams([
|
||||
createListStream(indices),
|
||||
createGenerateIndexRecordsStream(client, stats),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
]);
|
||||
|
||||
expect(stats.getTestSummary()).toEqual({
|
||||
|
@ -40,7 +40,7 @@ describe('esArchiver: createGenerateIndexRecordsStream()', () => {
|
|||
|
||||
await createPromiseFromStreams([
|
||||
createListStream(['index1']),
|
||||
createGenerateIndexRecordsStream(client, stats),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
]);
|
||||
|
||||
const params = (client.indices.get as sinon.SinonSpy).args[0][0];
|
||||
|
@ -58,7 +58,7 @@ describe('esArchiver: createGenerateIndexRecordsStream()', () => {
|
|||
|
||||
const indexRecords = await createPromiseFromStreams<any[]>([
|
||||
createListStream(['index1', 'index2', 'index3']),
|
||||
createGenerateIndexRecordsStream(client, stats),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
|
@ -83,7 +83,7 @@ describe('esArchiver: createGenerateIndexRecordsStream()', () => {
|
|||
|
||||
const indexRecords = await createPromiseFromStreams([
|
||||
createListStream(['index1']),
|
||||
createGenerateIndexRecordsStream(client, stats),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
|
@ -99,4 +99,51 @@ describe('esArchiver: createGenerateIndexRecordsStream()', () => {
|
|||
},
|
||||
]);
|
||||
});
|
||||
|
||||
describe('change index names', () => {
|
||||
it('changes .kibana* index names if keepIndexNames is not enabled', async () => {
|
||||
const stats = createStubStats();
|
||||
const client = createStubClient(['.kibana_7.16.0_001']);
|
||||
|
||||
const indexRecords = await createPromiseFromStreams([
|
||||
createListStream(['.kibana_7.16.0_001']),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
expect(indexRecords).toEqual([
|
||||
{ type: 'index', value: expect.objectContaining({ index: '.kibana_1' }) },
|
||||
]);
|
||||
});
|
||||
|
||||
it('does not change non-.kibana* index names if keepIndexNames is not enabled', async () => {
|
||||
const stats = createStubStats();
|
||||
const client = createStubClient(['.foo']);
|
||||
|
||||
const indexRecords = await createPromiseFromStreams([
|
||||
createListStream(['.foo']),
|
||||
createGenerateIndexRecordsStream({ client, stats }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
expect(indexRecords).toEqual([
|
||||
{ type: 'index', value: expect.objectContaining({ index: '.foo' }) },
|
||||
]);
|
||||
});
|
||||
|
||||
it('does not change .kibana* index names if keepIndexNames is enabled', async () => {
|
||||
const stats = createStubStats();
|
||||
const client = createStubClient(['.kibana_7.16.0_001']);
|
||||
|
||||
const indexRecords = await createPromiseFromStreams([
|
||||
createListStream(['.kibana_7.16.0_001']),
|
||||
createGenerateIndexRecordsStream({ client, stats, keepIndexNames: true }),
|
||||
createConcatStream([]),
|
||||
]);
|
||||
|
||||
expect(indexRecords).toEqual([
|
||||
{ type: 'index', value: expect.objectContaining({ index: '.kibana_7.16.0_001' }) },
|
||||
]);
|
||||
});
|
||||
});
|
||||
});
|
||||
|
|
|
@ -11,7 +11,15 @@ import { Transform } from 'stream';
|
|||
import { Stats } from '../stats';
|
||||
import { ES_CLIENT_HEADERS } from '../../client_headers';
|
||||
|
||||
export function createGenerateIndexRecordsStream(client: Client, stats: Stats) {
|
||||
export function createGenerateIndexRecordsStream({
|
||||
client,
|
||||
stats,
|
||||
keepIndexNames,
|
||||
}: {
|
||||
client: Client;
|
||||
stats: Stats;
|
||||
keepIndexNames?: boolean;
|
||||
}) {
|
||||
return new Transform({
|
||||
writableObjectMode: true,
|
||||
readableObjectMode: true,
|
||||
|
@ -59,9 +67,9 @@ export function createGenerateIndexRecordsStream(client: Client, stats: Stats) {
|
|||
this.push({
|
||||
type: 'index',
|
||||
value: {
|
||||
// always rewrite the .kibana_* index to .kibana_1 so that
|
||||
// if keepIndexNames is false, rewrite the .kibana_* index to .kibana_1 so that
|
||||
// when it is loaded it can skip migration, if possible
|
||||
index: index.startsWith('.kibana') ? '.kibana_1' : index,
|
||||
index: index.startsWith('.kibana') && !keepIndexNames ? '.kibana_1' : index,
|
||||
settings,
|
||||
mappings,
|
||||
aliases,
|
||||
|
|
|
@ -16,18 +16,21 @@ import { deleteIndex } from './delete_index';
|
|||
import { ES_CLIENT_HEADERS } from '../../client_headers';
|
||||
|
||||
/**
|
||||
* Deletes all indices that start with `.kibana`
|
||||
* Deletes all indices that start with `.kibana`, or if onlyTaskManager==true, all indices that start with `.kibana_task_manager`
|
||||
*/
|
||||
export async function deleteKibanaIndices({
|
||||
client,
|
||||
stats,
|
||||
onlyTaskManager = false,
|
||||
log,
|
||||
}: {
|
||||
client: Client;
|
||||
stats: Stats;
|
||||
onlyTaskManager?: boolean;
|
||||
log: ToolingLog;
|
||||
}) {
|
||||
const indexNames = await fetchKibanaIndices(client);
|
||||
const indexPattern = onlyTaskManager ? '.kibana_task_manager*' : '.kibana*';
|
||||
const indexNames = await fetchKibanaIndices(client, indexPattern);
|
||||
if (!indexNames.length) {
|
||||
return;
|
||||
}
|
||||
|
@ -75,9 +78,9 @@ function isKibanaIndex(index?: string): index is string {
|
|||
);
|
||||
}
|
||||
|
||||
async function fetchKibanaIndices(client: Client) {
|
||||
async function fetchKibanaIndices(client: Client, indexPattern: string) {
|
||||
const resp = await client.cat.indices(
|
||||
{ index: '.kibana*', format: 'json' },
|
||||
{ index: indexPattern, format: 'json' },
|
||||
{
|
||||
headers: ES_CLIENT_HEADERS,
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
const path = require('path');
|
||||
const fs = require('fs');
|
||||
|
||||
function isKibanaRoot(maybeKibanaRoot) {
|
||||
try {
|
||||
const packageJsonPath = path.join(maybeKibanaRoot, 'package.json');
|
||||
fs.accessSync(packageJsonPath, fs.constants.R_OK);
|
||||
const packageJsonContent = fs.readFileSync(packageJsonPath);
|
||||
return JSON.parse(packageJsonContent).name === 'kibana';
|
||||
} catch (e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = function findKibanaRoot() {
|
||||
let maybeKibanaRoot = path.resolve(__dirname, '../../..');
|
||||
|
||||
// when using syslinks, __dirname reports outside of the repo
|
||||
// if that's the case, the path will contain .cache/bazel
|
||||
if (!maybeKibanaRoot.includes('.cache/bazel')) {
|
||||
return maybeKibanaRoot;
|
||||
}
|
||||
|
||||
// process.argv[1] would be the eslint binary, a correctly-set editor
|
||||
// will use a local eslint inside the repo node_modules and its value
|
||||
// should be `ACTUAL_KIBANA_ROOT/node_modules/.bin/eslint`
|
||||
maybeKibanaRoot = path.resolve(process.argv[1], '../../../');
|
||||
if (isKibanaRoot(maybeKibanaRoot)) {
|
||||
return maybeKibanaRoot;
|
||||
}
|
||||
|
||||
// eslint should run on the repo root level
|
||||
// try to use process.cwd as the kibana root
|
||||
maybeKibanaRoot = process.cwd();
|
||||
if (isKibanaRoot(maybeKibanaRoot)) {
|
||||
return maybeKibanaRoot;
|
||||
}
|
||||
|
||||
// fallback to the first predicted path (original script)
|
||||
return maybeKibanaRoot;
|
||||
};
|
|
@ -7,7 +7,8 @@
|
|||
*/
|
||||
|
||||
const path = require('path');
|
||||
const KIBANA_ROOT = path.resolve(__dirname, '../../..');
|
||||
const findKibanaRoot = require('../helpers/find_kibana_root');
|
||||
const KIBANA_ROOT = findKibanaRoot();
|
||||
|
||||
function checkModuleNameNode(context, mappings, node, desc = 'Imported') {
|
||||
const mapping = mappings.find(
|
||||
|
|
|
@ -77,5 +77,76 @@ ruleTester.run('@kbn/eslint/module-migration', rule, {
|
|||
export const foo2 = 'bar'
|
||||
`,
|
||||
},
|
||||
/**
|
||||
* Given this tree:
|
||||
* x-pack/
|
||||
* - common/
|
||||
* - foo.ts <-- the target import
|
||||
* - other/
|
||||
* - folder/
|
||||
* - bar.ts <-- the linted fle
|
||||
* import "x-pack/common/foo" should be
|
||||
* import ../../foo
|
||||
*/
|
||||
{
|
||||
code: dedent`
|
||||
import "x-pack/common/foo"
|
||||
`,
|
||||
filename: 'x-pack/common/other/folder/bar.ts',
|
||||
options: [
|
||||
[
|
||||
{
|
||||
from: 'x-pack',
|
||||
to: 'foo',
|
||||
toRelative: 'x-pack',
|
||||
},
|
||||
],
|
||||
],
|
||||
errors: [
|
||||
{
|
||||
line: 1,
|
||||
message: 'Imported module "x-pack/common/foo" should be "../../foo"',
|
||||
},
|
||||
],
|
||||
output: dedent`
|
||||
import '../../foo'
|
||||
`,
|
||||
},
|
||||
/**
|
||||
* Given this tree:
|
||||
* x-pack/
|
||||
* - common/
|
||||
* - foo.ts <-- the target import
|
||||
* - another/
|
||||
* - posible
|
||||
* - example <-- the linted file
|
||||
*
|
||||
* import "x-pack/common/foo" should be
|
||||
* import ../../common/foo
|
||||
*/
|
||||
{
|
||||
code: dedent`
|
||||
import "x-pack/common/foo"
|
||||
`,
|
||||
filename: 'x-pack/another/possible/example.ts',
|
||||
options: [
|
||||
[
|
||||
{
|
||||
from: 'x-pack',
|
||||
to: 'foo',
|
||||
toRelative: 'x-pack',
|
||||
},
|
||||
],
|
||||
],
|
||||
errors: [
|
||||
{
|
||||
line: 1,
|
||||
message: 'Imported module "x-pack/common/foo" should be "../../common/foo"',
|
||||
},
|
||||
],
|
||||
output: dedent`
|
||||
import '../../common/foo'
|
||||
`,
|
||||
},
|
||||
],
|
||||
});
|
||||
|
|
|
@ -23,6 +23,17 @@ filegroup(
|
|||
|
||||
NPM_MODULE_EXTRA_FILES = [
|
||||
"package.json",
|
||||
"deep_exact_rt/package.json",
|
||||
"iso_to_epoch_rt/package.json",
|
||||
"json_rt/package.json",
|
||||
"merge_rt/package.json",
|
||||
"non_empty_string_rt/package.json",
|
||||
"parseable_types/package.json",
|
||||
"props_to_schema/package.json",
|
||||
"strict_keys_rt/package.json",
|
||||
"to_boolean_rt/package.json",
|
||||
"to_json_schema/package.json",
|
||||
"to_number_rt/package.json",
|
||||
]
|
||||
|
||||
RUNTIME_DEPS = [
|
||||
|
|
4
packages/kbn-io-ts-utils/deep_exact_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/deep_exact_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/deep_exact_rt",
|
||||
"types": "../target_types/deep_exact_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/iso_to_epoch_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/iso_to_epoch_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/iso_to_epoch_rt",
|
||||
"types": "../target_types/iso_to_epoch_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/json_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/json_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/json_rt",
|
||||
"types": "../target_types/json_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/merge_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/merge_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/merge_rt",
|
||||
"types": "../target_types/merge_rt"
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/non_empty_string_rt",
|
||||
"types": "../target_types/non_empty_string_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/parseable_types/package.json
Normal file
4
packages/kbn-io-ts-utils/parseable_types/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/parseable_types",
|
||||
"types": "../target_types/parseable_types"
|
||||
}
|
4
packages/kbn-io-ts-utils/props_to_schema/package.json
Normal file
4
packages/kbn-io-ts-utils/props_to_schema/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/props_to_schema",
|
||||
"types": "../target_types/props_to_schema"
|
||||
}
|
|
@ -113,7 +113,7 @@ export function strictKeysRt<T extends t.Any>(type: T) {
|
|||
const excessKeys = difference([...keys.all], [...keys.handled]);
|
||||
|
||||
if (excessKeys.length) {
|
||||
return t.failure(i, context, `Excess keys are not allowed: \n${excessKeys.join('\n')}`);
|
||||
return t.failure(i, context, `Excess keys are not allowed:\n${excessKeys.join('\n')}`);
|
||||
}
|
||||
|
||||
return t.success(i);
|
||||
|
|
4
packages/kbn-io-ts-utils/strict_keys_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/strict_keys_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/strict_keys_rt",
|
||||
"types": "../target_types/strict_keys_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/to_boolean_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/to_boolean_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/to_boolean_rt",
|
||||
"types": "../target_types/to_boolean_rt"
|
||||
}
|
4
packages/kbn-io-ts-utils/to_json_schema/package.json
Normal file
4
packages/kbn-io-ts-utils/to_json_schema/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/to_json_schema",
|
||||
"types": "../target_types/to_json_schema"
|
||||
}
|
4
packages/kbn-io-ts-utils/to_number_rt/package.json
Normal file
4
packages/kbn-io-ts-utils/to_number_rt/package.json
Normal file
|
@ -0,0 +1,4 @@
|
|||
{
|
||||
"main": "../target_node/to_number_rt",
|
||||
"types": "../target_types/to_number_rt"
|
||||
}
|
|
@ -11,6 +11,7 @@ SOURCE_FILES = glob(
|
|||
"src/**/*",
|
||||
],
|
||||
exclude = [
|
||||
"**/__jest__",
|
||||
"**/*.test.*",
|
||||
"**/README.md",
|
||||
],
|
||||
|
@ -36,12 +37,14 @@ RUNTIME_DEPS = [
|
|||
"@npm//monaco-editor",
|
||||
"@npm//raw-loader",
|
||||
"@npm//regenerator-runtime",
|
||||
"@npm//rxjs",
|
||||
]
|
||||
|
||||
TYPES_DEPS = [
|
||||
"//packages/kbn-i18n",
|
||||
"@npm//antlr4ts",
|
||||
"@npm//monaco-editor",
|
||||
"@npm//rxjs",
|
||||
"@npm//@types/jest",
|
||||
"@npm//@types/node",
|
||||
]
|
||||
|
|
68
packages/kbn-monaco/src/__jest__/jest.mocks.ts
Normal file
68
packages/kbn-monaco/src/__jest__/jest.mocks.ts
Normal file
|
@ -0,0 +1,68 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { MockIModel } from './types';
|
||||
|
||||
const createMockModel = (ID: string) => {
|
||||
const model: MockIModel = {
|
||||
uri: '',
|
||||
id: 'mockModel',
|
||||
value: '',
|
||||
getModeId: () => ID,
|
||||
changeContentListeners: [],
|
||||
setValue(newValue) {
|
||||
this.value = newValue;
|
||||
this.changeContentListeners.forEach((listener) => listener());
|
||||
},
|
||||
getValue() {
|
||||
return this.value;
|
||||
},
|
||||
onDidChangeContent(handler) {
|
||||
this.changeContentListeners.push(handler);
|
||||
},
|
||||
onDidChangeLanguage: (handler) => {
|
||||
handler({ newLanguage: ID });
|
||||
},
|
||||
};
|
||||
|
||||
return model;
|
||||
};
|
||||
|
||||
jest.mock('../monaco_imports', () => {
|
||||
const original = jest.requireActual('../monaco_imports');
|
||||
const originalMonaco = original.monaco;
|
||||
const originalEditor = original.monaco.editor;
|
||||
|
||||
return {
|
||||
...original,
|
||||
monaco: {
|
||||
...originalMonaco,
|
||||
editor: {
|
||||
...originalEditor,
|
||||
model: null,
|
||||
createModel(ID: string) {
|
||||
this.model = createMockModel(ID);
|
||||
return this.model;
|
||||
},
|
||||
onDidCreateModel(handler: (model: MockIModel) => void) {
|
||||
if (!this.model) {
|
||||
throw new Error(
|
||||
`Model needs to be created by calling monaco.editor.createModel(ID) first.`
|
||||
);
|
||||
}
|
||||
handler(this.model);
|
||||
},
|
||||
getModel() {
|
||||
return this.model;
|
||||
},
|
||||
getModels: () => [],
|
||||
setModelMarkers: () => undefined,
|
||||
},
|
||||
},
|
||||
};
|
||||
});
|
|
@ -6,8 +6,14 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
preset: '@kbn/test',
|
||||
rootDir: '../..',
|
||||
roots: ['<rootDir>/packages/kbn-securitysolution-t-grid'],
|
||||
};
|
||||
export interface MockIModel {
|
||||
uri: string;
|
||||
id: string;
|
||||
value: string;
|
||||
changeContentListeners: Array<() => void>;
|
||||
getModeId: () => string;
|
||||
setValue: (value: string) => void;
|
||||
getValue: () => string;
|
||||
onDidChangeContent: (handler: () => void) => void;
|
||||
onDidChangeLanguage: (handler: (options: { newLanguage: string }) => void) => void;
|
||||
}
|
147
packages/kbn-monaco/src/painless/diagnostics_adapter.test.ts
Normal file
147
packages/kbn-monaco/src/painless/diagnostics_adapter.test.ts
Normal file
|
@ -0,0 +1,147 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import '../__jest__/jest.mocks'; // Make sure this is the first import
|
||||
|
||||
import { Subscription } from 'rxjs';
|
||||
|
||||
import { MockIModel } from '../__jest__/types';
|
||||
import { LangValidation } from '../types';
|
||||
import { monaco } from '../monaco_imports';
|
||||
import { ID } from './constants';
|
||||
|
||||
import { DiagnosticsAdapter } from './diagnostics_adapter';
|
||||
|
||||
const getSyntaxErrors = jest.fn(async (): Promise<string[] | undefined> => undefined);
|
||||
|
||||
const getMockWorker = async () => {
|
||||
return {
|
||||
getSyntaxErrors,
|
||||
} as any;
|
||||
};
|
||||
|
||||
function flushPromises() {
|
||||
return new Promise((resolve) => setImmediate(resolve));
|
||||
}
|
||||
|
||||
describe('Painless DiagnosticAdapter', () => {
|
||||
let diagnosticAdapter: DiagnosticsAdapter;
|
||||
let subscription: Subscription;
|
||||
let model: MockIModel;
|
||||
let validation: LangValidation;
|
||||
|
||||
beforeAll(() => {
|
||||
jest.useFakeTimers();
|
||||
});
|
||||
|
||||
afterAll(() => {
|
||||
jest.useRealTimers();
|
||||
});
|
||||
|
||||
beforeEach(async () => {
|
||||
model = monaco.editor.createModel(ID) as unknown as MockIModel;
|
||||
diagnosticAdapter = new DiagnosticsAdapter(getMockWorker);
|
||||
|
||||
// validate() has a promise we need to wait for
|
||||
// --> await worker.getSyntaxErrors()
|
||||
await flushPromises();
|
||||
|
||||
subscription = diagnosticAdapter.validation$.subscribe((newValidation) => {
|
||||
validation = newValidation;
|
||||
});
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
if (subscription) {
|
||||
subscription.unsubscribe();
|
||||
}
|
||||
});
|
||||
|
||||
test('should validate when the content changes', async () => {
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
|
||||
model.setValue('new content');
|
||||
await flushPromises();
|
||||
expect(validation!.isValidating).toBe(true);
|
||||
|
||||
jest.advanceTimersByTime(500); // there is a 500ms debounce for the validate() to trigger
|
||||
await flushPromises();
|
||||
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
|
||||
model.setValue('changed');
|
||||
// Flushing promise here is not actually required but adding it to make sure the test
|
||||
// works as expected even when doing so.
|
||||
await flushPromises();
|
||||
expect(validation!.isValidating).toBe(true);
|
||||
|
||||
// when we clear the content we immediately set the
|
||||
// "isValidating" to false and mark the content as valid.
|
||||
// No need to wait for the setTimeout
|
||||
model.setValue('');
|
||||
await flushPromises();
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
expect(validation!.isValid).toBe(true);
|
||||
});
|
||||
|
||||
test('should prevent race condition of multiple content change and validation triggered', async () => {
|
||||
const errors = ['Syntax error returned'];
|
||||
|
||||
getSyntaxErrors.mockResolvedValueOnce(errors);
|
||||
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
|
||||
model.setValue('foo');
|
||||
jest.advanceTimersByTime(300); // only 300ms out of the 500ms
|
||||
|
||||
model.setValue('bar'); // This will cancel the first setTimeout
|
||||
|
||||
jest.advanceTimersByTime(300); // Again, only 300ms out of the 500ms.
|
||||
await flushPromises();
|
||||
|
||||
expect(validation!.isValidating).toBe(true); // we are still validating
|
||||
|
||||
jest.advanceTimersByTime(200); // rest of the 500ms
|
||||
await flushPromises();
|
||||
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
expect(validation!.isValid).toBe(false);
|
||||
expect(validation!.errors).toBe(errors);
|
||||
});
|
||||
|
||||
test('should prevent race condition (2) of multiple content change and validation triggered', async () => {
|
||||
const errors1 = ['First error returned'];
|
||||
const errors2 = ['Second error returned'];
|
||||
|
||||
getSyntaxErrors
|
||||
.mockResolvedValueOnce(errors1) // first call
|
||||
.mockResolvedValueOnce(errors2); // second call
|
||||
|
||||
model.setValue('foo');
|
||||
// By now we are waiting on the worker to await getSyntaxErrors()
|
||||
// we won't flush the promise to not pass this point in time just yet
|
||||
jest.advanceTimersByTime(700);
|
||||
|
||||
// We change the value at the same moment
|
||||
model.setValue('bar');
|
||||
// now we pass the await getSyntaxErrors() point but its result (errors1) should be stale and discarted
|
||||
await flushPromises();
|
||||
|
||||
jest.advanceTimersByTime(300);
|
||||
await flushPromises();
|
||||
|
||||
expect(validation!.isValidating).toBe(true); // we are still validating value "bar"
|
||||
|
||||
jest.advanceTimersByTime(200); // rest of the 500ms
|
||||
await flushPromises();
|
||||
|
||||
expect(validation!.isValidating).toBe(false);
|
||||
expect(validation!.isValid).toBe(false);
|
||||
// We have the second error response, the first one has been discarted
|
||||
expect(validation!.errors).toBe(errors2);
|
||||
});
|
||||
});
|
|
@ -6,7 +6,10 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { BehaviorSubject } from 'rxjs';
|
||||
|
||||
import { monaco } from '../monaco_imports';
|
||||
import { SyntaxErrors, LangValidation } from '../types';
|
||||
import { ID } from './constants';
|
||||
import { WorkerAccessor } from './language';
|
||||
import { PainlessError } from './worker';
|
||||
|
@ -18,11 +21,17 @@ const toDiagnostics = (error: PainlessError): monaco.editor.IMarkerData => {
|
|||
};
|
||||
};
|
||||
|
||||
export interface SyntaxErrors {
|
||||
[modelId: string]: PainlessError[];
|
||||
}
|
||||
export class DiagnosticsAdapter {
|
||||
private errors: SyntaxErrors = {};
|
||||
private validation = new BehaviorSubject<LangValidation>({
|
||||
isValid: true,
|
||||
isValidating: false,
|
||||
errors: [],
|
||||
});
|
||||
// To avoid stale validation data we keep track of the latest call to validate().
|
||||
private validateIdx = 0;
|
||||
|
||||
public validation$ = this.validation.asObservable();
|
||||
|
||||
constructor(private worker: WorkerAccessor) {
|
||||
const onModelAdd = (model: monaco.editor.IModel): void => {
|
||||
|
@ -35,14 +44,27 @@ export class DiagnosticsAdapter {
|
|||
return;
|
||||
}
|
||||
|
||||
const idx = ++this.validateIdx; // Disable any possible inflight validation
|
||||
clearTimeout(handle);
|
||||
|
||||
// Reset the model markers if an empty string is provided on change
|
||||
if (model.getValue().trim() === '') {
|
||||
this.validation.next({
|
||||
isValid: true,
|
||||
isValidating: false,
|
||||
errors: [],
|
||||
});
|
||||
return monaco.editor.setModelMarkers(model, ID, []);
|
||||
}
|
||||
|
||||
this.validation.next({
|
||||
...this.validation.value,
|
||||
isValidating: true,
|
||||
});
|
||||
// Every time a new change is made, wait 500ms before validating
|
||||
clearTimeout(handle);
|
||||
handle = setTimeout(() => this.validate(model.uri), 500);
|
||||
handle = setTimeout(() => {
|
||||
this.validate(model.uri, idx);
|
||||
}, 500);
|
||||
});
|
||||
|
||||
model.onDidChangeLanguage(({ newLanguage }) => {
|
||||
|
@ -51,21 +73,33 @@ export class DiagnosticsAdapter {
|
|||
if (newLanguage !== ID) {
|
||||
return monaco.editor.setModelMarkers(model, ID, []);
|
||||
} else {
|
||||
this.validate(model.uri);
|
||||
this.validate(model.uri, ++this.validateIdx);
|
||||
}
|
||||
});
|
||||
|
||||
this.validate(model.uri);
|
||||
this.validation.next({
|
||||
...this.validation.value,
|
||||
isValidating: true,
|
||||
});
|
||||
this.validate(model.uri, ++this.validateIdx);
|
||||
}
|
||||
};
|
||||
monaco.editor.onDidCreateModel(onModelAdd);
|
||||
monaco.editor.getModels().forEach(onModelAdd);
|
||||
}
|
||||
|
||||
private async validate(resource: monaco.Uri): Promise<void> {
|
||||
private async validate(resource: monaco.Uri, idx: number): Promise<void> {
|
||||
if (idx !== this.validateIdx) {
|
||||
return;
|
||||
}
|
||||
|
||||
const worker = await this.worker(resource);
|
||||
const errorMarkers = await worker.getSyntaxErrors(resource.toString());
|
||||
|
||||
if (idx !== this.validateIdx) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (errorMarkers) {
|
||||
const model = monaco.editor.getModel(resource);
|
||||
this.errors = {
|
||||
|
@ -75,6 +109,9 @@ export class DiagnosticsAdapter {
|
|||
// Set the error markers and underline them with "Error" severity
|
||||
monaco.editor.setModelMarkers(model!, ID, errorMarkers.map(toDiagnostics));
|
||||
}
|
||||
|
||||
const isValid = errorMarkers === undefined || errorMarkers.length === 0;
|
||||
this.validation.next({ isValidating: false, isValid, errors: errorMarkers ?? [] });
|
||||
}
|
||||
|
||||
public getSyntaxErrors() {
|
||||
|
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
import { ID } from './constants';
|
||||
import { lexerRules, languageConfiguration } from './lexer_rules';
|
||||
import { getSuggestionProvider, getSyntaxErrors } from './language';
|
||||
import { getSuggestionProvider, getSyntaxErrors, validation$ } from './language';
|
||||
import { CompleteLangModuleType } from '../types';
|
||||
|
||||
export const PainlessLang: CompleteLangModuleType = {
|
||||
|
@ -17,6 +17,7 @@ export const PainlessLang: CompleteLangModuleType = {
|
|||
lexerRules,
|
||||
languageConfiguration,
|
||||
getSyntaxErrors,
|
||||
validation$,
|
||||
};
|
||||
|
||||
export * from './types';
|
||||
|
|
|
@ -5,15 +5,16 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
import { Observable, of } from 'rxjs';
|
||||
import { monaco } from '../monaco_imports';
|
||||
|
||||
import { WorkerProxyService, EditorStateService } from './lib';
|
||||
import { LangValidation, SyntaxErrors } from '../types';
|
||||
import { ID } from './constants';
|
||||
import { PainlessContext, PainlessAutocompleteField } from './types';
|
||||
import { PainlessWorker } from './worker';
|
||||
import { PainlessCompletionAdapter } from './completion_adapter';
|
||||
import { DiagnosticsAdapter, SyntaxErrors } from './diagnostics_adapter';
|
||||
import { DiagnosticsAdapter } from './diagnostics_adapter';
|
||||
|
||||
const workerProxyService = new WorkerProxyService();
|
||||
const editorStateService = new EditorStateService();
|
||||
|
@ -37,9 +38,13 @@ let diagnosticsAdapter: DiagnosticsAdapter;
|
|||
|
||||
// Returns syntax errors for all models by model id
|
||||
export const getSyntaxErrors = (): SyntaxErrors => {
|
||||
return diagnosticsAdapter.getSyntaxErrors();
|
||||
return diagnosticsAdapter?.getSyntaxErrors() ?? {};
|
||||
};
|
||||
|
||||
export const validation$: () => Observable<LangValidation> = () =>
|
||||
diagnosticsAdapter?.validation$ ||
|
||||
of<LangValidation>({ isValid: true, isValidating: false, errors: [] });
|
||||
|
||||
monaco.languages.onLanguage(ID, async () => {
|
||||
workerProxyService.setup();
|
||||
|
||||
|
|
|
@ -5,6 +5,8 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import type { Observable } from 'rxjs';
|
||||
|
||||
import { monaco } from './monaco_imports';
|
||||
|
||||
export interface LangModuleType {
|
||||
|
@ -19,4 +21,23 @@ export interface CompleteLangModuleType extends LangModuleType {
|
|||
languageConfiguration: monaco.languages.LanguageConfiguration;
|
||||
getSuggestionProvider: Function;
|
||||
getSyntaxErrors: Function;
|
||||
validation$: () => Observable<LangValidation>;
|
||||
}
|
||||
|
||||
export interface EditorError {
|
||||
startLineNumber: number;
|
||||
startColumn: number;
|
||||
endLineNumber: number;
|
||||
endColumn: number;
|
||||
message: string;
|
||||
}
|
||||
|
||||
export interface LangValidation {
|
||||
isValidating: boolean;
|
||||
isValid: boolean;
|
||||
errors: EditorError[];
|
||||
}
|
||||
|
||||
export interface SyntaxErrors {
|
||||
[modelId: string]: EditorError[];
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
/*
|
||||
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
|
||||
* or more contributor license agreements. Licensed under the Elastic License
|
||||
* 2.0 and the Server Side Public License, v 1; you may not use this file except
|
||||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
preset: '@kbn/test',
|
||||
rootDir: '../..',
|
||||
roots: ['<rootDir>/packages/kbn-securitysolution-list-constants'],
|
||||
};
|
|
@ -5,7 +5,7 @@
|
|||
* in compliance with, at your election, the Elastic License 2.0 or the Server
|
||||
* Side Public License, v 1.
|
||||
*/
|
||||
import { jsonRt } from '@kbn/io-ts-utils';
|
||||
import { jsonRt } from '@kbn/io-ts-utils/json_rt';
|
||||
import * as t from 'io-ts';
|
||||
import { decodeRequestParams } from './decode_request_params';
|
||||
|
||||
|
@ -69,7 +69,7 @@ describe('decodeRequestParams', () => {
|
|||
};
|
||||
|
||||
expect(decode).toThrowErrorMatchingInlineSnapshot(`
|
||||
"Excess keys are not allowed:
|
||||
"Excess keys are not allowed:
|
||||
path.extraKey"
|
||||
`);
|
||||
});
|
||||
|
|
|
@ -10,7 +10,7 @@ import { omitBy, isPlainObject, isEmpty } from 'lodash';
|
|||
import { isLeft } from 'fp-ts/lib/Either';
|
||||
import { PathReporter } from 'io-ts/lib/PathReporter';
|
||||
import Boom from '@hapi/boom';
|
||||
import { strictKeysRt } from '@kbn/io-ts-utils';
|
||||
import { strictKeysRt } from '@kbn/io-ts-utils/strict_keys_rt';
|
||||
import { RouteParamsRT } from './typings';
|
||||
|
||||
interface KibanaRequestParams {
|
||||
|
|
|
@ -46,7 +46,15 @@ module.exports = {
|
|||
modulePathIgnorePatterns: ['__fixtures__/', 'target/'],
|
||||
|
||||
// Use this configuration option to add custom reporters to Jest
|
||||
reporters: ['default', '@kbn/test/target_node/jest/junit_reporter'],
|
||||
reporters: [
|
||||
'default',
|
||||
[
|
||||
'@kbn/test/target_node/jest/junit_reporter',
|
||||
{
|
||||
rootDirectory: '.',
|
||||
},
|
||||
],
|
||||
],
|
||||
|
||||
// The paths to modules that run some code to configure or set up the testing environment before each test
|
||||
setupFiles: [
|
||||
|
|
|
@ -44,6 +44,7 @@ declare global {
|
|||
|
||||
export function runJest(configName = 'jest.config.js') {
|
||||
const argv = buildArgv(process.argv);
|
||||
const devConfigName = 'jest.config.dev.js';
|
||||
|
||||
const log = new ToolingLog({
|
||||
level: argv.verbose ? 'verbose' : 'info',
|
||||
|
@ -52,11 +53,12 @@ export function runJest(configName = 'jest.config.js') {
|
|||
|
||||
const runStartTime = Date.now();
|
||||
const reportTime = getTimeReporter(log, 'scripts/jest');
|
||||
let cwd: string;
|
||||
|
||||
let testFiles: string[];
|
||||
|
||||
const cwd: string = process.env.INIT_CWD || process.cwd();
|
||||
|
||||
if (!argv.config) {
|
||||
cwd = process.env.INIT_CWD || process.cwd();
|
||||
testFiles = argv._.splice(2).map((p) => resolve(cwd, p));
|
||||
const commonTestFiles = commonBasePath(testFiles);
|
||||
const testFilesProvided = testFiles.length > 0;
|
||||
|
@ -66,18 +68,25 @@ export function runJest(configName = 'jest.config.js') {
|
|||
log.verbose('commonTestFiles:', commonTestFiles);
|
||||
|
||||
let configPath;
|
||||
let devConfigPath;
|
||||
|
||||
// sets the working directory to the cwd or the common
|
||||
// base directory of the provided test files
|
||||
let wd = testFilesProvided ? commonTestFiles : cwd;
|
||||
|
||||
devConfigPath = resolve(wd, devConfigName);
|
||||
configPath = resolve(wd, configName);
|
||||
|
||||
while (!existsSync(configPath)) {
|
||||
while (!existsSync(configPath) && !existsSync(devConfigPath)) {
|
||||
wd = resolve(wd, '..');
|
||||
devConfigPath = resolve(wd, devConfigName);
|
||||
configPath = resolve(wd, configName);
|
||||
}
|
||||
|
||||
if (existsSync(devConfigPath)) {
|
||||
configPath = devConfigPath;
|
||||
}
|
||||
|
||||
log.verbose(`no config provided, found ${configPath}`);
|
||||
process.argv.push('--config', configPath);
|
||||
|
||||
|
|
|
@ -26,7 +26,16 @@ const template: string = `module.exports = {
|
|||
};
|
||||
`;
|
||||
|
||||
const roots: string[] = ['x-pack/plugins', 'packages', 'src/plugins', 'test', 'src'];
|
||||
const roots: string[] = [
|
||||
'x-pack/plugins/security_solution/public',
|
||||
'x-pack/plugins/security_solution/server',
|
||||
'x-pack/plugins/security_solution',
|
||||
'x-pack/plugins',
|
||||
'packages',
|
||||
'src/plugins',
|
||||
'test',
|
||||
'src',
|
||||
];
|
||||
|
||||
export async function runCheckJestConfigsCli() {
|
||||
run(
|
||||
|
@ -76,7 +85,9 @@ export async function runCheckJestConfigsCli() {
|
|||
modulePath,
|
||||
});
|
||||
|
||||
writeFileSync(resolve(root, name, 'jest.config.js'), content);
|
||||
const configPath = resolve(root, name, 'jest.config.js');
|
||||
log.info('created %s', configPath);
|
||||
writeFileSync(configPath, content);
|
||||
} else {
|
||||
log.warning(`Unable to determind where to place jest.config.js for ${file}`);
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
*/
|
||||
import React from 'react';
|
||||
import * as t from 'io-ts';
|
||||
import { toNumberRt } from '@kbn/io-ts-utils';
|
||||
import { toNumberRt } from '@kbn/io-ts-utils/to_number_rt';
|
||||
import { createRouter } from './create_router';
|
||||
import { createMemoryHistory } from 'history';
|
||||
import { route } from './route';
|
||||
|
|
|
@ -15,16 +15,10 @@ import {
|
|||
} from 'react-router-config';
|
||||
import qs from 'query-string';
|
||||
import { findLastIndex, merge, compact } from 'lodash';
|
||||
import type { deepExactRt as deepExactRtTyped, mergeRt as mergeRtTyped } from '@kbn/io-ts-utils';
|
||||
// @ts-expect-error
|
||||
import { deepExactRt as deepExactRtNonTyped } from '@kbn/io-ts-utils/target_node/deep_exact_rt';
|
||||
// @ts-expect-error
|
||||
import { mergeRt as mergeRtNonTyped } from '@kbn/io-ts-utils/target_node/merge_rt';
|
||||
import { mergeRt } from '@kbn/io-ts-utils/merge_rt';
|
||||
import { deepExactRt } from '@kbn/io-ts-utils/deep_exact_rt';
|
||||
import { FlattenRoutesOf, Route, Router } from './types';
|
||||
|
||||
const deepExactRt: typeof deepExactRtTyped = deepExactRtNonTyped;
|
||||
const mergeRt: typeof mergeRtTyped = mergeRtNonTyped;
|
||||
|
||||
function toReactRouterPath(path: string) {
|
||||
return path.replace(/(?:{([^\/]+)})/g, ':$1');
|
||||
}
|
||||
|
|
|
@ -23,7 +23,6 @@ filegroup(
|
|||
)
|
||||
|
||||
NPM_MODULE_EXTRA_FILES = [
|
||||
"eui_theme_vars/package.json",
|
||||
"package.json",
|
||||
"README.md"
|
||||
]
|
||||
|
|
|
@ -1,4 +0,0 @@
|
|||
{
|
||||
"main": "../target_node/eui_theme_vars.js",
|
||||
"types": "../target_types/eui_theme_vars.d.ts"
|
||||
}
|
|
@ -59,8 +59,7 @@ exports.externals = {
|
|||
'@elastic/eui/lib/services': '__kbnSharedDeps__.ElasticEuiLibServices',
|
||||
'@elastic/eui/lib/services/format': '__kbnSharedDeps__.ElasticEuiLibServicesFormat',
|
||||
'@elastic/eui/dist/eui_charts_theme': '__kbnSharedDeps__.ElasticEuiChartsTheme',
|
||||
'@elastic/eui/dist/eui_theme_light.json': '__kbnSharedDeps__.Theme.euiLightVars',
|
||||
'@elastic/eui/dist/eui_theme_dark.json': '__kbnSharedDeps__.Theme.euiDarkVars',
|
||||
|
||||
// transient dep of eui
|
||||
'react-beautiful-dnd': '__kbnSharedDeps__.ReactBeautifulDnD',
|
||||
lodash: '__kbnSharedDeps__.Lodash',
|
||||
|
|
|
@ -6,7 +6,9 @@
|
|||
* Side Public License, v 1.
|
||||
*/
|
||||
|
||||
/* eslint-disable-next-line @kbn/eslint/module_migration */
|
||||
import { default as v8Light } from '@elastic/eui/dist/eui_theme_amsterdam_light.json';
|
||||
/* eslint-disable-next-line @kbn/eslint/module_migration */
|
||||
import { default as v8Dark } from '@elastic/eui/dist/eui_theme_amsterdam_dark.json';
|
||||
|
||||
const globals: any = typeof window === 'undefined' ? {} : window;
|
||||
|
|
|
@ -7,21 +7,35 @@
|
|||
*/
|
||||
|
||||
import { accessSync, constants } from 'fs';
|
||||
import { getConfigPath, getDataPath, getConfigDirectory } from './';
|
||||
import { createAbsolutePathSerializer } from '@kbn/dev-utils';
|
||||
import { getConfigPath, getDataPath, getLogsPath, getConfigDirectory } from './';
|
||||
|
||||
expect.addSnapshotSerializer(createAbsolutePathSerializer());
|
||||
|
||||
describe('Default path finder', () => {
|
||||
it('should find a kibana.yml', () => {
|
||||
const configPath = getConfigPath();
|
||||
expect(() => accessSync(configPath, constants.R_OK)).not.toThrow();
|
||||
it('should expose a path to the config directory', () => {
|
||||
expect(getConfigDirectory()).toMatchInlineSnapshot('<absolute path>/config');
|
||||
});
|
||||
|
||||
it('should find a data directory', () => {
|
||||
const dataPath = getDataPath();
|
||||
expect(() => accessSync(dataPath, constants.R_OK)).not.toThrow();
|
||||
it('should expose a path to the kibana.yml', () => {
|
||||
expect(getConfigPath()).toMatchInlineSnapshot('<absolute path>/config/kibana.yml');
|
||||
});
|
||||
|
||||
it('should expose a path to the data directory', () => {
|
||||
expect(getDataPath()).toMatchInlineSnapshot('<absolute path>/data');
|
||||
});
|
||||
|
||||
it('should expose a path to the logs directory', () => {
|
||||
expect(getLogsPath()).toMatchInlineSnapshot('<absolute path>/logs');
|
||||
});
|
||||
|
||||
it('should find a config directory', () => {
|
||||
const configDirectory = getConfigDirectory();
|
||||
expect(() => accessSync(configDirectory, constants.R_OK)).not.toThrow();
|
||||
});
|
||||
|
||||
it('should find a kibana.yml', () => {
|
||||
const configPath = getConfigPath();
|
||||
expect(() => accessSync(configPath, constants.R_OK)).not.toThrow();
|
||||
});
|
||||
});
|
||||
|
|
|
@ -27,6 +27,8 @@ const CONFIG_DIRECTORIES = [
|
|||
|
||||
const DATA_PATHS = [join(REPO_ROOT, 'data'), '/var/lib/kibana'].filter(isString);
|
||||
|
||||
const LOGS_PATHS = [join(REPO_ROOT, 'logs'), '/var/log/kibana'].filter(isString);
|
||||
|
||||
function findFile(paths: string[]) {
|
||||
const availablePath = paths.find((configPath) => {
|
||||
try {
|
||||
|
@ -57,6 +59,12 @@ export const getConfigDirectory = () => findFile(CONFIG_DIRECTORIES);
|
|||
*/
|
||||
export const getDataPath = () => findFile(DATA_PATHS);
|
||||
|
||||
/**
|
||||
* Get the directory containing logs
|
||||
* @internal
|
||||
*/
|
||||
export const getLogsPath = () => findFile(LOGS_PATHS);
|
||||
|
||||
export type PathConfigType = TypeOf<typeof config.schema>;
|
||||
|
||||
export const config = {
|
||||
|
|
|
@ -22,6 +22,7 @@ export class DocLinksService {
|
|||
// Documentation for `main` branches is still published at a `master` URL.
|
||||
const DOC_LINK_VERSION = kibanaBranch === 'main' ? 'master' : kibanaBranch;
|
||||
const ELASTIC_WEBSITE_URL = 'https://www.elastic.co/';
|
||||
const STACK_DOCS = `${ELASTIC_WEBSITE_URL}guide/en/elastic-stack/${DOC_LINK_VERSION}/`;
|
||||
const ELASTICSEARCH_DOCS = `${ELASTIC_WEBSITE_URL}guide/en/elasticsearch/reference/${DOC_LINK_VERSION}/`;
|
||||
const KIBANA_DOCS = `${ELASTIC_WEBSITE_URL}guide/en/kibana/${DOC_LINK_VERSION}/`;
|
||||
const FLEET_DOCS = `${ELASTIC_WEBSITE_URL}guide/en/fleet/${DOC_LINK_VERSION}/`;
|
||||
|
@ -36,6 +37,9 @@ export class DocLinksService {
|
|||
links: {
|
||||
settings: `${ELASTIC_WEBSITE_URL}guide/en/kibana/${DOC_LINK_VERSION}/settings.html`,
|
||||
elasticStackGetStarted: `${STACK_GETTING_STARTED}get-started-elastic-stack.html`,
|
||||
upgrade: {
|
||||
upgradingElasticStack: `${STACK_DOCS}upgrading-elastic-stack.html`,
|
||||
},
|
||||
apm: {
|
||||
kibanaSettings: `${KIBANA_DOCS}apm-settings-in-kibana.html`,
|
||||
supportedServiceMaps: `${KIBANA_DOCS}service-maps.html#service-maps-supported`,
|
||||
|
@ -154,11 +158,15 @@ export class DocLinksService {
|
|||
introduction: `${KIBANA_DOCS}index-patterns.html`,
|
||||
fieldFormattersNumber: `${KIBANA_DOCS}numeral.html`,
|
||||
fieldFormattersString: `${KIBANA_DOCS}field-formatters-string.html`,
|
||||
runtimeFields: `${KIBANA_DOCS}managing-index-patterns.html#runtime-fields`,
|
||||
runtimeFields: `${KIBANA_DOCS}managing-data-views.html#runtime-fields`,
|
||||
},
|
||||
addData: `${KIBANA_DOCS}connect-to-elasticsearch.html`,
|
||||
kibana: `${KIBANA_DOCS}index.html`,
|
||||
upgradeAssistant: `${KIBANA_DOCS}upgrade-assistant.html`,
|
||||
upgradeAssistant: {
|
||||
overview: `${KIBANA_DOCS}upgrade-assistant.html`,
|
||||
batchReindex: `${KIBANA_DOCS}batch-start-resume-reindex.html`,
|
||||
remoteReindex: `${ELASTICSEARCH_DOCS}docs-reindex.html#reindex-from-remote`,
|
||||
},
|
||||
rollupJobs: `${KIBANA_DOCS}data-rollups.html`,
|
||||
elasticsearch: {
|
||||
docsBase: `${ELASTICSEARCH_DOCS}`,
|
||||
|
@ -222,10 +230,11 @@ export class DocLinksService {
|
|||
remoteClustersProxy: `${ELASTICSEARCH_DOCS}remote-clusters.html#proxy-mode`,
|
||||
remoteClusersProxySettings: `${ELASTICSEARCH_DOCS}remote-clusters-settings.html#remote-cluster-proxy-settings`,
|
||||
scriptParameters: `${ELASTICSEARCH_DOCS}modules-scripting-using.html#prefer-params`,
|
||||
setupUpgrade: `${ELASTICSEARCH_DOCS}setup-upgrade.html`,
|
||||
shardAllocationSettings: `${ELASTICSEARCH_DOCS}modules-cluster.html#cluster-shard-allocation-settings`,
|
||||
transportSettings: `${ELASTICSEARCH_DOCS}modules-network.html#common-network-settings`,
|
||||
typesRemoval: `${ELASTICSEARCH_DOCS}removal-of-types.html`,
|
||||
setupUpgrade: `${ELASTICSEARCH_DOCS}setup-upgrade.html`,
|
||||
apiCompatibilityHeader: `${ELASTICSEARCH_DOCS}api-conventions.html#api-compatibility`,
|
||||
},
|
||||
siem: {
|
||||
guide: `${SECURITY_SOLUTION_DOCS}index.html`,
|
||||
|
@ -289,6 +298,7 @@ export class DocLinksService {
|
|||
outlierDetectionRoc: `${ELASTIC_WEBSITE_URL}guide/en/machine-learning/${DOC_LINK_VERSION}/ml-dfa-finding-outliers.html#ml-dfanalytics-roc`,
|
||||
regressionEvaluation: `${ELASTIC_WEBSITE_URL}guide/en/machine-learning/${DOC_LINK_VERSION}/ml-dfa-regression.html#ml-dfanalytics-regression-evaluation`,
|
||||
classificationAucRoc: `${ELASTIC_WEBSITE_URL}guide/en/machine-learning/${DOC_LINK_VERSION}/ml-dfa-classification.html#ml-dfanalytics-class-aucroc`,
|
||||
setUpgradeMode: `${ELASTICSEARCH_DOCS}ml-set-upgrade-mode.html`,
|
||||
},
|
||||
transforms: {
|
||||
guide: `${ELASTICSEARCH_DOCS}transforms.html`,
|
||||
|
@ -479,6 +489,7 @@ export class DocLinksService {
|
|||
fleetServerAddFleetServer: `${FLEET_DOCS}fleet-server.html#add-fleet-server`,
|
||||
settings: `${FLEET_DOCS}fleet-settings.html#fleet-server-hosts-setting`,
|
||||
settingsFleetServerHostSettings: `${FLEET_DOCS}fleet-settings.html#fleet-server-hosts-setting`,
|
||||
settingsFleetServerProxySettings: `${KIBANA_DOCS}fleet-settings-kb.html#fleet-data-visualizer-settings`,
|
||||
troubleshooting: `${FLEET_DOCS}fleet-troubleshooting.html`,
|
||||
elasticAgent: `${FLEET_DOCS}elastic-agent-installation.html`,
|
||||
beatsAgentComparison: `${FLEET_DOCS}beats-agent-comparison.html`,
|
||||
|
@ -490,6 +501,7 @@ export class DocLinksService {
|
|||
upgradeElasticAgent712lower: `${FLEET_DOCS}upgrade-elastic-agent.html#upgrade-7.12-lower`,
|
||||
learnMoreBlog: `${ELASTIC_WEBSITE_URL}blog/elastic-agent-and-fleet-make-it-easier-to-integrate-your-systems-with-elastic`,
|
||||
apiKeysLearnMore: `${KIBANA_DOCS}api-keys.html`,
|
||||
onPremRegistry: `${ELASTIC_WEBSITE_URL}guide/en/integrations-developer/${DOC_LINK_VERSION}/air-gapped.html`,
|
||||
},
|
||||
ecs: {
|
||||
guide: `${ELASTIC_WEBSITE_URL}guide/en/ecs/current/index.html`,
|
||||
|
@ -522,6 +534,9 @@ export interface DocLinksStart {
|
|||
readonly links: {
|
||||
readonly settings: string;
|
||||
readonly elasticStackGetStarted: string;
|
||||
readonly upgrade: {
|
||||
readonly upgradingElasticStack: string;
|
||||
};
|
||||
readonly apm: {
|
||||
readonly kibanaSettings: string;
|
||||
readonly supportedServiceMaps: string;
|
||||
|
@ -645,7 +660,11 @@ export interface DocLinksStart {
|
|||
};
|
||||
readonly addData: string;
|
||||
readonly kibana: string;
|
||||
readonly upgradeAssistant: string;
|
||||
readonly upgradeAssistant: {
|
||||
readonly overview: string;
|
||||
readonly batchReindex: string;
|
||||
readonly remoteReindex: string;
|
||||
};
|
||||
readonly rollupJobs: string;
|
||||
readonly elasticsearch: Record<string, string>;
|
||||
readonly siem: {
|
||||
|
@ -748,6 +767,7 @@ export interface DocLinksStart {
|
|||
fleetServerAddFleetServer: string;
|
||||
settings: string;
|
||||
settingsFleetServerHostSettings: string;
|
||||
settingsFleetServerProxySettings: string;
|
||||
troubleshooting: string;
|
||||
elasticAgent: string;
|
||||
datastreams: string;
|
||||
|
@ -757,6 +777,7 @@ export interface DocLinksStart {
|
|||
upgradeElasticAgent712lower: string;
|
||||
learnMoreBlog: string;
|
||||
apiKeysLearnMore: string;
|
||||
onPremRegistry: string;
|
||||
}>;
|
||||
readonly ecs: {
|
||||
readonly guide: string;
|
||||
|
|
|
@ -478,6 +478,9 @@ export interface DocLinksStart {
|
|||
readonly links: {
|
||||
readonly settings: string;
|
||||
readonly elasticStackGetStarted: string;
|
||||
readonly upgrade: {
|
||||
readonly upgradingElasticStack: string;
|
||||
};
|
||||
readonly apm: {
|
||||
readonly kibanaSettings: string;
|
||||
readonly supportedServiceMaps: string;
|
||||
|
@ -601,7 +604,11 @@ export interface DocLinksStart {
|
|||
};
|
||||
readonly addData: string;
|
||||
readonly kibana: string;
|
||||
readonly upgradeAssistant: string;
|
||||
readonly upgradeAssistant: {
|
||||
readonly overview: string;
|
||||
readonly batchReindex: string;
|
||||
readonly remoteReindex: string;
|
||||
};
|
||||
readonly rollupJobs: string;
|
||||
readonly elasticsearch: Record<string, string>;
|
||||
readonly siem: {
|
||||
|
@ -704,6 +711,7 @@ export interface DocLinksStart {
|
|||
fleetServerAddFleetServer: string;
|
||||
settings: string;
|
||||
settingsFleetServerHostSettings: string;
|
||||
settingsFleetServerProxySettings: string;
|
||||
troubleshooting: string;
|
||||
elasticAgent: string;
|
||||
datastreams: string;
|
||||
|
@ -713,6 +721,7 @@ export interface DocLinksStart {
|
|||
upgradeElasticAgent712lower: string;
|
||||
learnMoreBlog: string;
|
||||
apiKeysLearnMore: string;
|
||||
onPremRegistry: string;
|
||||
}>;
|
||||
readonly ecs: {
|
||||
readonly guide: string;
|
||||
|
|
|
@ -1,222 +1,504 @@
|
|||
# Saved Object Migrations
|
||||
- [Introduction](#introduction)
|
||||
- [Algorithm steps](#algorithm-steps)
|
||||
- [INIT](#init)
|
||||
- [Next action](#next-action)
|
||||
- [New control state](#new-control-state)
|
||||
- [CREATE_NEW_TARGET](#create_new_target)
|
||||
- [Next action](#next-action-1)
|
||||
- [New control state](#new-control-state-1)
|
||||
- [LEGACY_SET_WRITE_BLOCK](#legacy_set_write_block)
|
||||
- [Next action](#next-action-2)
|
||||
- [New control state](#new-control-state-2)
|
||||
- [LEGACY_CREATE_REINDEX_TARGET](#legacy_create_reindex_target)
|
||||
- [Next action](#next-action-3)
|
||||
- [New control state](#new-control-state-3)
|
||||
- [LEGACY_REINDEX](#legacy_reindex)
|
||||
- [Next action](#next-action-4)
|
||||
- [New control state](#new-control-state-4)
|
||||
- [LEGACY_REINDEX_WAIT_FOR_TASK](#legacy_reindex_wait_for_task)
|
||||
- [Next action](#next-action-5)
|
||||
- [New control state](#new-control-state-5)
|
||||
- [LEGACY_DELETE](#legacy_delete)
|
||||
- [Next action](#next-action-6)
|
||||
- [New control state](#new-control-state-6)
|
||||
- [WAIT_FOR_YELLOW_SOURCE](#wait_for_yellow_source)
|
||||
- [Next action](#next-action-7)
|
||||
- [New control state](#new-control-state-7)
|
||||
- [SET_SOURCE_WRITE_BLOCK](#set_source_write_block)
|
||||
- [Next action](#next-action-8)
|
||||
- [New control state](#new-control-state-8)
|
||||
- [CREATE_REINDEX_TEMP](#create_reindex_temp)
|
||||
- [Next action](#next-action-9)
|
||||
- [New control state](#new-control-state-9)
|
||||
- [REINDEX_SOURCE_TO_TEMP_OPEN_PIT](#reindex_source_to_temp_open_pit)
|
||||
- [Next action](#next-action-10)
|
||||
- [New control state](#new-control-state-10)
|
||||
- [REINDEX_SOURCE_TO_TEMP_READ](#reindex_source_to_temp_read)
|
||||
- [Next action](#next-action-11)
|
||||
- [New control state](#new-control-state-11)
|
||||
- [REINDEX_SOURCE_TO_TEMP_TRANSFORM](#REINDEX_SOURCE_TO_TEMP_TRANSFORM)
|
||||
- [Next action](#next-action-12)
|
||||
- [New control state](#new-control-state-12)
|
||||
- [REINDEX_SOURCE_TO_TEMP_INDEX_BULK](#reindex_source_to_temp_index_bulk)
|
||||
- [Next action](#next-action-13)
|
||||
- [New control state](#new-control-state-13)
|
||||
- [REINDEX_SOURCE_TO_TEMP_CLOSE_PIT](#reindex_source_to_temp_close_pit)
|
||||
- [Next action](#next-action-14)
|
||||
- [New control state](#new-control-state-14)
|
||||
- [SET_TEMP_WRITE_BLOCK](#set_temp_write_block)
|
||||
- [Next action](#next-action-15)
|
||||
- [New control state](#new-control-state-15)
|
||||
- [CLONE_TEMP_TO_TARGET](#clone_temp_to_target)
|
||||
- [Next action](#next-action-16)
|
||||
- [New control state](#new-control-state-16)
|
||||
- [OUTDATED_DOCUMENTS_SEARCH](#outdated_documents_search)
|
||||
- [Next action](#next-action-17)
|
||||
- [New control state](#new-control-state-17)
|
||||
- [OUTDATED_DOCUMENTS_TRANSFORM](#outdated_documents_transform)
|
||||
- [Next action](#next-action-18)
|
||||
- [New control state](#new-control-state-18)
|
||||
- [UPDATE_TARGET_MAPPINGS](#update_target_mappings)
|
||||
- [Next action](#next-action-19)
|
||||
- [New control state](#new-control-state-19)
|
||||
- [UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK](#update_target_mappings_wait_for_task)
|
||||
- [Next action](#next-action-20)
|
||||
- [New control state](#new-control-state-20)
|
||||
- [MARK_VERSION_INDEX_READY_CONFLICT](#mark_version_index_ready_conflict)
|
||||
- [Next action](#next-action-21)
|
||||
- [New control state](#new-control-state-21)
|
||||
- [Manual QA Test Plan](#manual-qa-test-plan)
|
||||
- [1. Legacy pre-migration](#1-legacy-pre-migration)
|
||||
- [2. Plugins enabled/disabled](#2-plugins-enableddisabled)
|
||||
- [Test scenario 1 (enable a plugin after migration):](#test-scenario-1-enable-a-plugin-after-migration)
|
||||
- [Test scenario 2 (disable a plugin after migration):](#test-scenario-2-disable-a-plugin-after-migration)
|
||||
- [Test scenario 3 (multiple instances, enable a plugin after migration):](#test-scenario-3-multiple-instances-enable-a-plugin-after-migration)
|
||||
- [Test scenario 4 (multiple instances, mixed plugin enabled configs):](#test-scenario-4-multiple-instances-mixed-plugin-enabled-configs)
|
||||
|
||||
Migrations are the mechanism by which saved object indices are kept up to date with the Kibana system. Plugin authors write their plugins to work with a certain set of mappings, and documents of a certain shape. Migrations ensure that the index actually conforms to those expectations.
|
||||
# Introduction
|
||||
In the past, the risk of downtime caused by Kibana's saved object upgrade
|
||||
migrations have discouraged users from adopting the latest features. v2
|
||||
migrations aims to solve this problem by minimizing the operational impact on
|
||||
our users.
|
||||
|
||||
## Migrating the index
|
||||
To achieve this it uses a new migration algorithm where every step of the
|
||||
algorithm is idempotent. No matter at which step a Kibana instance gets
|
||||
interrupted, it can always restart the migration from the beginning and repeat
|
||||
all the steps without requiring any user intervention. This doesn't mean
|
||||
migrations will never fail, but when they fail for intermittent reasons like
|
||||
an Elasticsearch cluster running out of heap, Kibana will automatically be
|
||||
able to successfully complete the migration once the cluster has enough heap.
|
||||
|
||||
When Kibana boots, prior to serving any requests, it performs a check to see if the kibana index needs to be migrated.
|
||||
For more background information on the problem see the [saved object
|
||||
migrations
|
||||
RFC](https://github.com/elastic/kibana/blob/main/rfcs/text/0013_saved_object_migrations.md).
|
||||
|
||||
- If there are out of date docs, or mapping changes, or the current index is not aliased, the index is migrated.
|
||||
- If the Kibana index does not exist, it is created.
|
||||
# Algorithm steps
|
||||
The design goals for the algorithm was to keep downtime below 10 minutes for
|
||||
100k saved objects while guaranteeing no data loss and keeping steps as simple
|
||||
and explicit as possible.
|
||||
|
||||
All of this happens prior to Kibana serving any http requests.
|
||||
|
||||
Here is the gist of what happens if an index migration is necessary:
|
||||
|
||||
* If `.kibana` (or whatever the Kibana index is named) is not an alias, it will be converted to one:
|
||||
* Reindex `.kibana` into `.kibana_1`
|
||||
* Delete `.kibana`
|
||||
* Create an alias `.kibana` that points to `.kibana_1`
|
||||
* Create a `.kibana_2` index
|
||||
* Copy all documents from `.kibana_1` into `.kibana_2`, running them through any applicable migrations
|
||||
* Point the `.kibana` alias to `.kibana_2`
|
||||
|
||||
## Migrating Kibana clusters
|
||||
|
||||
If Kibana is being run in a cluster, migrations will be coordinated so that they only run on one Kibana instance at a time. This is done in a fairly rudimentary way. Let's say we have two Kibana instances, kibana1 and kibana2.
|
||||
|
||||
* kibana1 and kibana2 both start simultaneously and detect that the index requires migration
|
||||
* kibana1 begins the migration and creates index `.kibana_4`
|
||||
* kibana2 tries to begin the migration, but fails with the error `.kibana_4 already exists`
|
||||
* kibana2 logs that it failed to create the migration index, and instead begins polling
|
||||
* Every few seconds, kibana2 instance checks the `.kibana` index to see if it is done migrating
|
||||
* Once `.kibana` is determined to be up to date, the kibana2 instance continues booting
|
||||
|
||||
In this example, if the `.kibana_4` index existed prior to Kibana booting, the entire migration process will fail, as all Kibana instances will assume another instance is migrating to the `.kibana_4` index. This problem is only fixable by deleting the `.kibana_4` index.
|
||||
|
||||
## Import / export
|
||||
|
||||
If a user attempts to import FanciPlugin 1.0 documents into a Kibana system that is running FanciPlugin 2.0, those documents will be migrated prior to being persisted in the Kibana index. If a user attempts to import documents having a migration version that is _greater_ than the current Kibana version, the documents will fail to import.
|
||||
|
||||
## Validation
|
||||
|
||||
It might happen that a user modifies their FanciPlugin 1.0 export file to have documents with a migrationVersion of 2.0.0. In this scenario, Kibana will store those documents as if they are up to date, even though they are not, and the result will be unknown, but probably undesirable behavior.
|
||||
|
||||
Similarly, Kibana server APIs assume that they are sent up to date documents unless a document specifies a migrationVersion. This means that out-of-date callers of our APIs will send us out-of-date documents, and those documents will be accepted and stored as if they are up-to-date.
|
||||
|
||||
To prevent this from happening, migration authors should _always_ write a [validation](../validation) function that throws an error if a document is not up to date, and this validation function should always be updated any time a new migration is added for the relevant document types.
|
||||
|
||||
## Document ownership
|
||||
|
||||
In the eyes of the migration system, only one plugin can own a saved object type, or a root-level property on a saved object.
|
||||
|
||||
So, let's say we have a document that looks like this:
|
||||
|
||||
```js
|
||||
{
|
||||
type: 'dashboard',
|
||||
attributes: { title: 'whatever' },
|
||||
securityKey: '324234234kjlke2',
|
||||
}
|
||||
```
|
||||
|
||||
In this document, one plugin might own the `dashboard` type, and another plugin might own the `securityKey` type. If two or more plugins define securityKey migrations `{ migrations: { securityKey: { ... } } }`, Kibana will fail to start.
|
||||
|
||||
To write a migration for this document, the dashboard plugin might look something like this:
|
||||
|
||||
```js
|
||||
uiExports: {
|
||||
migrations: {
|
||||
// This is whatever value your document's "type" field is
|
||||
dashboard: {
|
||||
// Takes a pre 1.9.0 dashboard doc, and converts it to 1.9.0
|
||||
'1.9.0': (doc) => {
|
||||
doc.attributes.title = doc.attributes.title.toUpperCase();
|
||||
return doc;
|
||||
},
|
||||
|
||||
// Takes a 1.9.0 dashboard doc, and converts it to a 2.0.0
|
||||
'2.0.0': (doc) => {
|
||||
doc.attributes.title = doc.attributes.title + '!!!';
|
||||
return doc;
|
||||
},
|
||||
},
|
||||
},
|
||||
// ... normal uiExport stuff
|
||||
}
|
||||
```
|
||||
|
||||
After Kibana migrates the index, our example document would have `{ attributes: { title: 'WHATEVER!!' } }`.
|
||||
|
||||
Each migration function only needs to be able to handle documents belonging to the previous version. The initial migration function (in this example, `1.9.0`) needs to be more flexible, as it may be passed documents of any pre `1.9.0` shape.
|
||||
|
||||
## Disabled plugins
|
||||
|
||||
If a plugin is disabled, all of its documents are retained in the Kibana index. They can be imported and exported. When the plugin is re-enabled, Kibana will migrate any out of date documents that were imported or retained while it was disabled.
|
||||
|
||||
## Configuration
|
||||
|
||||
Kibana index migrations expose a few config settings which might be tweaked:
|
||||
|
||||
* `migrations.scrollDuration` - The
|
||||
[scroll](https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-scroll.html#scroll-search-context)
|
||||
value used to read batches of documents from the source index. Defaults to
|
||||
`15m`.
|
||||
* `migrations.batchSize` - The number of documents to read / transform / write
|
||||
at a time during index migrations
|
||||
* `migrations.pollInterval` - How often, in milliseconds, secondary Kibana
|
||||
instances will poll to see if the primary Kibana instance has finished
|
||||
migrating the index.
|
||||
* `migrations.skip` - Skip running migrations on startup (defaults to false).
|
||||
This should only be used for running integration tests without a running
|
||||
elasticsearch cluster. Note: even though migrations won't run on startup,
|
||||
individual docs will still be migrated when read from ES.
|
||||
|
||||
## Example
|
||||
|
||||
To illustrate how migrations work, let's walk through an example, using a fictional plugin: `FanciPlugin`.
|
||||
|
||||
FanciPlugin 1.0 had a mapping that looked like this:
|
||||
|
||||
```js
|
||||
{
|
||||
fanci: {
|
||||
properties: {
|
||||
fanciName: { type: 'keyword' },
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
But in 2.0, it was decided that `fanciName` should be renamed to `title`.
|
||||
|
||||
So, FanciPlugin 2.0 has a mapping that looks like this:
|
||||
|
||||
```js
|
||||
{
|
||||
fanci: {
|
||||
properties: {
|
||||
title: { type: 'keyword' },
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Note, the `fanciName` property is gone altogether. The problem is that lots of people have used FanciPlugin 1.0, and there are lots of documents out in the wild that have the `fanciName` property. FanciPlugin 2.0 won't know how to handle these documents, as it now expects that property to be called `title`.
|
||||
|
||||
To solve this problem, the FanciPlugin authors write a migration which will take all 1.0 documents and transform them into 2.0 documents.
|
||||
|
||||
FanciPlugin's uiExports is modified to have a migrations section that looks like this:
|
||||
|
||||
```js
|
||||
uiExports: {
|
||||
migrations: {
|
||||
// This is whatever value your document's "type" field is
|
||||
fanci: {
|
||||
// This is the version of the plugin for which this migration was written, and
|
||||
// should follow semver conventions. Here, doc is a pre 2.0.0 document which this
|
||||
// function will modify to have the shape we expect in 2.0.0
|
||||
'2.0.0': (doc) => {
|
||||
const { fanciName } = doc.attributes;
|
||||
|
||||
delete doc.attributes.fanciName;
|
||||
doc.attributes.title = fanciName;
|
||||
|
||||
return doc;
|
||||
},
|
||||
},
|
||||
},
|
||||
// ... normal uiExport stuff
|
||||
}
|
||||
```
|
||||
|
||||
Now, whenever Kibana boots, if FanciPlugin is enabled, Kibana scans its index for any documents that have type 'fanci' and have a `migrationVersion.fanci` property that is anything other than `2.0.0`. If any such documents are found, the index is determined to be out of date (or at least of the wrong version), and Kibana attempts to migrate the index.
|
||||
|
||||
At the end of the migration, Kibana's fanci documents will look something like this:
|
||||
|
||||
```js
|
||||
{
|
||||
id: 'someid',
|
||||
type: 'fanci',
|
||||
attributes: {
|
||||
title: 'Shazm!',
|
||||
},
|
||||
migrationVersion: { fanci: '2.0.0' },
|
||||
}
|
||||
```
|
||||
|
||||
Note, the migrationVersion property has been added, and it contains information about what migrations were applied to the document.
|
||||
|
||||
## Source code
|
||||
|
||||
The migrations source code is grouped into two folders:
|
||||
|
||||
* `core` - Contains index-agnostic, general migration logic, which could be reused for indices other than `.kibana`
|
||||
* `kibana` - Contains a relatively light-weight wrapper around core, which provides `.kibana` index-specific logic
|
||||
|
||||
Generally, the code eschews classes in favor of functions and basic data structures. The publicly exported code is all class-based, however, in an attempt to conform to Kibana norms.
|
||||
|
||||
### Core
|
||||
|
||||
There are three core entry points.
|
||||
|
||||
* index_migrator - Logic for migrating an index
|
||||
* document_migrator - Logic for migrating an individual document, used by index_migrator, but also by the saved object client to migrate docs during document creation
|
||||
* build_active_mappings - Logic to convert mapping properties into a full index mapping object, including the core properties required by any saved object index
|
||||
|
||||
## Testing
|
||||
|
||||
Run Jest tests:
|
||||
|
||||
Documentation: https://www.elastic.co/guide/en/kibana/current/development-tests.html#_unit_testing
|
||||
The algorithm is implemented as a state-action machine based on https://www.microsoft.com/en-us/research/uploads/prod/2016/12/Computation-and-State-Machines.pdf
|
||||
|
||||
The state-action machine defines it's behaviour in steps. Each step is a
|
||||
transition from a control state s_i to the contral state s_i+1 caused by an
|
||||
action a_i.
|
||||
|
||||
```
|
||||
yarn test:jest src/core/server/saved_objects/migrations --watch
|
||||
s_i -> a_i -> s_i+1
|
||||
s_i+1 -> a_i+1 -> s_i+2
|
||||
```
|
||||
|
||||
Run integration tests:
|
||||
Given a control state s1, `next(s1)` returns the next action to execute.
|
||||
Actions are asynchronous, once the action resolves, we can use the action
|
||||
response to determine the next state to transition to as defined by the
|
||||
function `model(state, response)`.
|
||||
|
||||
We can then loosely define a step as:
|
||||
```
|
||||
node scripts/functional_tests_server
|
||||
node scripts/functional_test_runner --config test/api_integration/config.js --grep migration
|
||||
s_i+1 = model(s_i, await next(s_i)())
|
||||
```
|
||||
|
||||
When there are no more actions returned by `next` the state-action machine
|
||||
terminates such as in the DONE and FATAL control states.
|
||||
|
||||
What follows is a list of all control states. For each control state the
|
||||
following is described:
|
||||
- _next action_: the next action triggered by the current control state
|
||||
- _new control state_: based on the action response, the possible new control states that the machine will transition to
|
||||
|
||||
Since the algorithm runs once for each saved object index the steps below
|
||||
always reference a single saved object index `.kibana`. When Kibana starts up,
|
||||
all the steps are also repeated for the `.kibana_task_manager` index but this
|
||||
is left out of the description for brevity.
|
||||
|
||||
## INIT
|
||||
### Next action
|
||||
`fetchIndices`
|
||||
|
||||
Fetch the saved object indices, mappings and aliases to find the source index
|
||||
and determine whether we’re migrating from a legacy index or a v1 migrations
|
||||
index.
|
||||
|
||||
### New control state
|
||||
1. If `.kibana` and the version specific aliases both exists and are pointing
|
||||
to the same index. This version's migration has already been completed. Since
|
||||
the same version could have plugins enabled at any time that would introduce
|
||||
new transforms or mappings.
|
||||
→ `OUTDATED_DOCUMENTS_SEARCH`
|
||||
|
||||
2. If `.kibana` is pointing to an index that belongs to a later version of
|
||||
Kibana .e.g. a 7.11.0 instance found the `.kibana` alias pointing to
|
||||
`.kibana_7.12.0_001` fail the migration
|
||||
→ `FATAL`
|
||||
|
||||
3. If the `.kibana` alias exists we’re migrating from either a v1 or v2 index
|
||||
and the migration source index is the index the `.kibana` alias points to.
|
||||
→ `WAIT_FOR_YELLOW_SOURCE`
|
||||
|
||||
4. If `.kibana` is a concrete index, we’re migrating from a legacy index
|
||||
→ `LEGACY_SET_WRITE_BLOCK`
|
||||
|
||||
5. If there are no `.kibana` indices, this is a fresh deployment. Initialize a
|
||||
new saved objects index
|
||||
→ `CREATE_NEW_TARGET`
|
||||
|
||||
## CREATE_NEW_TARGET
|
||||
### Next action
|
||||
`createIndex`
|
||||
|
||||
Create the target index. This operation is idempotent, if the index already exist, we wait until its status turns yellow
|
||||
|
||||
### New control state
|
||||
→ `MARK_VERSION_INDEX_READY`
|
||||
|
||||
## LEGACY_SET_WRITE_BLOCK
|
||||
### Next action
|
||||
`setWriteBlock`
|
||||
|
||||
Set a write block on the legacy index to prevent any older Kibana instances
|
||||
from writing to the index while the migration is in progress which could cause
|
||||
lost acknowledged writes.
|
||||
|
||||
This is the first of a series of `LEGACY_*` control states that will:
|
||||
- reindex the concrete legacy `.kibana` index into a `.kibana_pre6.5.0_001` index
|
||||
- delete the concrete `.kibana` _index_ so that we're able to create a `.kibana` _alias_
|
||||
|
||||
### New control state
|
||||
1. If the write block was successfully added
|
||||
→ `LEGACY_CREATE_REINDEX_TARGET`
|
||||
2. If the write block failed because the index doesn't exist, it means another instance already completed the legacy pre-migration. Proceed to the next step.
|
||||
→ `LEGACY_CREATE_REINDEX_TARGET`
|
||||
|
||||
## LEGACY_CREATE_REINDEX_TARGET
|
||||
### Next action
|
||||
`createIndex`
|
||||
|
||||
Create a new `.kibana_pre6.5.0_001` index into which we can reindex the legacy
|
||||
index. (Since the task manager index was converted from a data index into a
|
||||
saved objects index in 7.4 it will be reindexed into `.kibana_pre7.4.0_001`)
|
||||
### New control state
|
||||
→ `LEGACY_REINDEX`
|
||||
|
||||
## LEGACY_REINDEX
|
||||
### Next action
|
||||
`reindex`
|
||||
|
||||
Let Elasticsearch reindex the legacy index into `.kibana_pre6.5.0_001`. (For
|
||||
the task manager index we specify a `preMigrationScript` to convert the
|
||||
original task manager documents into valid saved objects)
|
||||
### New control state
|
||||
→ `LEGACY_REINDEX_WAIT_FOR_TASK`
|
||||
|
||||
|
||||
## LEGACY_REINDEX_WAIT_FOR_TASK
|
||||
### Next action
|
||||
`waitForReindexTask`
|
||||
|
||||
Wait for up to 60s for the reindex task to complete.
|
||||
### New control state
|
||||
1. If the reindex task completed
|
||||
→ `LEGACY_DELETE`
|
||||
2. If the reindex task failed with a `target_index_had_write_block` or
|
||||
`index_not_found_exception` another instance already completed this step
|
||||
→ `LEGACY_DELETE`
|
||||
3. If the reindex task is still in progress
|
||||
→ `LEGACY_REINDEX_WAIT_FOR_TASK`
|
||||
|
||||
## LEGACY_DELETE
|
||||
### Next action
|
||||
`updateAliases`
|
||||
|
||||
Use the updateAliases API to atomically remove the legacy index and create a
|
||||
new `.kibana` alias that points to `.kibana_pre6.5.0_001`.
|
||||
### New control state
|
||||
1. If the action succeeds
|
||||
→ `SET_SOURCE_WRITE_BLOCK`
|
||||
2. If the action fails with `remove_index_not_a_concrete_index` or
|
||||
`index_not_found_exception` another instance has already completed this step.
|
||||
→ `SET_SOURCE_WRITE_BLOCK`
|
||||
|
||||
## WAIT_FOR_YELLOW_SOURCE
|
||||
### Next action
|
||||
`waitForIndexStatusYellow`
|
||||
|
||||
Wait for the Elasticsearch cluster to be in "yellow" state. It means the index's primary shard is allocated and the index is ready for searching/indexing documents, but ES wasn't able to allocate the replicas.
|
||||
We don't have as much data redundancy as we could have, but it's enough to start the migration.
|
||||
|
||||
### New control state
|
||||
→ `SET_SOURCE_WRITE_BLOCK`
|
||||
|
||||
## SET_SOURCE_WRITE_BLOCK
|
||||
### Next action
|
||||
`setWriteBlock`
|
||||
|
||||
Set a write block on the source index to prevent any older Kibana instances from writing to the index while the migration is in progress which could cause lost acknowledged writes.
|
||||
|
||||
### New control state
|
||||
→ `CREATE_REINDEX_TEMP`
|
||||
|
||||
## CREATE_REINDEX_TEMP
|
||||
### Next action
|
||||
`createIndex`
|
||||
|
||||
This operation is idempotent, if the index already exist, we wait until its status turns yellow.
|
||||
|
||||
- Because we will be transforming documents before writing them into this index, we can already set the mappings to the target mappings for this version. The source index might contain documents belonging to a disabled plugin. So set `dynamic: false` mappings for any unknown saved object types.
|
||||
- (Since we never query the temporary index we can potentially disable refresh to speed up indexing performance. Profile to see if gains justify complexity)
|
||||
|
||||
### New control state
|
||||
→ `REINDEX_SOURCE_TO_TEMP_OPEN_PIT`
|
||||
|
||||
## REINDEX_SOURCE_TO_TEMP_OPEN_PIT
|
||||
### Next action
|
||||
`openPIT`
|
||||
|
||||
Open a PIT. Since there is a write block on the source index there is basically no overhead to keeping the PIT so we can lean towards a larger `keep_alive` value like 10 minutes.
|
||||
### New control state
|
||||
→ `REINDEX_SOURCE_TO_TEMP_READ`
|
||||
|
||||
## REINDEX_SOURCE_TO_TEMP_READ
|
||||
### Next action
|
||||
`readNextBatchOfSourceDocuments`
|
||||
|
||||
Read the next batch of outdated documents from the source index by using search after with our PIT.
|
||||
|
||||
### New control state
|
||||
1. If the batch contained > 0 documents
|
||||
→ `REINDEX_SOURCE_TO_TEMP_TRANSFORM`
|
||||
2. If there are no more documents returned
|
||||
→ `REINDEX_SOURCE_TO_TEMP_CLOSE_PIT`
|
||||
|
||||
## REINDEX_SOURCE_TO_TEMP_TRANSFORM
|
||||
### Next action
|
||||
`transformRawDocs`
|
||||
|
||||
Transform the current batch of documents
|
||||
|
||||
In order to support sharing saved objects to multiple spaces in 8.0, the
|
||||
transforms will also regenerate document `_id`'s. To ensure that this step
|
||||
remains idempotent, the new `_id` is deterministically generated using UUIDv5
|
||||
ensuring that each Kibana instance generates the same new `_id` for the same document.
|
||||
### New control state
|
||||
→ `REINDEX_SOURCE_TO_TEMP_INDEX_BULK`
|
||||
## REINDEX_SOURCE_TO_TEMP_INDEX_BULK
|
||||
### Next action
|
||||
`bulkIndexTransformedDocuments`
|
||||
|
||||
Use the bulk API create action to write a batch of up-to-date documents. The
|
||||
create action ensures that there will be only one write per reindexed document
|
||||
even if multiple Kibana instances are performing this step. Use
|
||||
`refresh=false` to speed up the create actions, the `UPDATE_TARGET_MAPPINGS`
|
||||
step will ensure that the index is refreshed before we start serving traffic.
|
||||
|
||||
The following errors are ignored because it means another instance already
|
||||
completed this step:
|
||||
- documents already exist in the temp index
|
||||
- temp index has a write block
|
||||
- temp index is not found
|
||||
### New control state
|
||||
1. If `currentBatch` is the last batch in `transformedDocBatches`
|
||||
→ `REINDEX_SOURCE_TO_TEMP_READ`
|
||||
2. If there are more batches left in `transformedDocBatches`
|
||||
→ `REINDEX_SOURCE_TO_TEMP_INDEX_BULK`
|
||||
|
||||
## REINDEX_SOURCE_TO_TEMP_CLOSE_PIT
|
||||
### Next action
|
||||
`closePIT`
|
||||
|
||||
### New control state
|
||||
→ `SET_TEMP_WRITE_BLOCK`
|
||||
|
||||
## SET_TEMP_WRITE_BLOCK
|
||||
### Next action
|
||||
`setWriteBlock`
|
||||
|
||||
Set a write block on the temporary index so that we can clone it.
|
||||
### New control state
|
||||
→ `CLONE_TEMP_TO_TARGET`
|
||||
|
||||
## CLONE_TEMP_TO_TARGET
|
||||
### Next action
|
||||
`cloneIndex`
|
||||
|
||||
Ask elasticsearch to clone the temporary index into the target index. If the target index already exists (because another node already started the clone operation), wait until the clone is complete by waiting for a yellow index status.
|
||||
|
||||
We can’t use the temporary index as our target index because one instance can complete the migration, delete a document, and then a second instance starts the reindex operation and re-creates the deleted document. By cloning the temporary index and only accepting writes/deletes from the cloned target index, we prevent lost acknowledged deletes.
|
||||
|
||||
### New control state
|
||||
→ `OUTDATED_DOCUMENTS_SEARCH`
|
||||
|
||||
## OUTDATED_DOCUMENTS_SEARCH
|
||||
### Next action
|
||||
`searchForOutdatedDocuments`
|
||||
|
||||
Search for outdated saved object documents. Will return one batch of
|
||||
documents.
|
||||
|
||||
If another instance has a disabled plugin it will reindex that plugin's
|
||||
documents without transforming them. Because this instance doesn't know which
|
||||
plugins were disabled by the instance that performed the
|
||||
`REINDEX_SOURCE_TO_TEMP_TRANSFORM` step, we need to search for outdated documents
|
||||
and transform them to ensure that everything is up to date.
|
||||
|
||||
### New control state
|
||||
1. Found outdated documents?
|
||||
→ `OUTDATED_DOCUMENTS_TRANSFORM`
|
||||
2. All documents up to date
|
||||
→ `UPDATE_TARGET_MAPPINGS`
|
||||
|
||||
## OUTDATED_DOCUMENTS_TRANSFORM
|
||||
### Next action
|
||||
`transformRawDocs` + `bulkOverwriteTransformedDocuments`
|
||||
|
||||
Once transformed we use an index operation to overwrite the outdated document with the up-to-date version. Optimistic concurrency control ensures that we only overwrite the document once so that any updates/writes by another instance which already completed the migration aren’t overwritten and lost.
|
||||
|
||||
### New control state
|
||||
→ `OUTDATED_DOCUMENTS_SEARCH`
|
||||
|
||||
## UPDATE_TARGET_MAPPINGS
|
||||
### Next action
|
||||
`updateAndPickupMappings`
|
||||
|
||||
If another instance has some plugins disabled it will disable the mappings of that plugin's types when creating the temporary index. This action will
|
||||
update the mappings and then use an update_by_query to ensure that all fields are “picked-up” and ready to be searched over.
|
||||
|
||||
### New control state
|
||||
→ `UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK`
|
||||
|
||||
## UPDATE_TARGET_MAPPINGS_WAIT_FOR_TASK
|
||||
### Next action
|
||||
`updateAliases`
|
||||
|
||||
Atomically apply the `versionIndexReadyActions` using the _alias actions API. By performing the following actions we guarantee that if multiple versions of Kibana started the upgrade in parallel, only one version will succeed.
|
||||
|
||||
1. verify that the current alias is still pointing to the source index
|
||||
2. Point the version alias and the current alias to the target index.
|
||||
3. Remove the temporary index
|
||||
|
||||
### New control state
|
||||
1. If all the actions succeed we’re ready to serve traffic
|
||||
→ `DONE`
|
||||
2. If action (1) fails with alias_not_found_exception or action (3) fails with index_not_found_exception another instance already completed the migration
|
||||
→ `MARK_VERSION_INDEX_READY_CONFLICT`
|
||||
|
||||
## MARK_VERSION_INDEX_READY_CONFLICT
|
||||
### Next action
|
||||
`fetchIndices`
|
||||
|
||||
Fetch the saved object indices
|
||||
|
||||
### New control state
|
||||
If another instance completed a migration from the same source we need to verify that it is running the same version.
|
||||
|
||||
1. If the current and version aliases are pointing to the same index the instance that completed the migration was on the same version and it’s safe to start serving traffic.
|
||||
→ `DONE`
|
||||
2. If the other instance was running a different version we fail the migration. Once we restart one of two things can happen: the other instance is an older version and we will restart the migration, or, it’s a newer version and we will refuse to start up.
|
||||
→ `FATAL`
|
||||
|
||||
# Manual QA Test Plan
|
||||
## 1. Legacy pre-migration
|
||||
When upgrading from a legacy index additional steps are required before the
|
||||
regular migration process can start.
|
||||
|
||||
We have the following potential legacy indices:
|
||||
- v5.x index that wasn't upgraded -> kibana should refuse to start the migration
|
||||
- v5.x index that was upgraded to v6.x: `.kibana-6` _index_ with `.kibana` _alias_
|
||||
- < v6.5 `.kibana` _index_ (Saved Object Migrations were
|
||||
introduced in v6.5 https://github.com/elastic/kibana/pull/20243)
|
||||
- TODO: Test versions which introduced the `kibana_index_template` template?
|
||||
- < v7.4 `.kibana_task_manager` _index_ (Task Manager started
|
||||
using Saved Objects in v7.4 https://github.com/elastic/kibana/pull/39829)
|
||||
|
||||
Test plan:
|
||||
1. Ensure that the different versions of Kibana listed above can successfully
|
||||
upgrade to 7.11.
|
||||
2. Ensure that multiple Kibana nodes can migrate a legacy index in parallel
|
||||
(choose a representative legacy version to test with e.g. v6.4). Add a lot
|
||||
of Saved Objects to Kibana to increase the time it takes for a migration to
|
||||
complete which will make it easier to introduce failures.
|
||||
1. If all instances are started in parallel the upgrade should succeed
|
||||
2. If nodes are randomly restarted shortly after they start participating
|
||||
in the migration the upgrade should either succeed or never complete.
|
||||
However, if a fatal error occurs it should never result in permanent
|
||||
failure.
|
||||
1. Start one instance, wait 500 ms
|
||||
2. Start a second instance
|
||||
3. If an instance starts a saved object migration, wait X ms before
|
||||
killing the process and restarting the migration.
|
||||
4. Keep decreasing X until migrations are barely able to complete.
|
||||
5. If a migration fails with a fatal error, start a Kibana that doesn't
|
||||
get restarted. Given enough time, it should always be able to
|
||||
successfully complete the migration.
|
||||
|
||||
For a successful migration the following behaviour should be observed:
|
||||
1. The `.kibana` index should be reindexed into a `.kibana_pre6.5.0` index
|
||||
2. The `.kibana` index should be deleted
|
||||
3. The `.kibana_index_template` should be deleted
|
||||
4. The `.kibana_pre6.5.0` index should have a write block applied
|
||||
5. Documents from `.kibana_pre6.5.0` should be migrated into `.kibana_7.11.0_001`
|
||||
6. Once migration has completed, the `.kibana_current` and `.kibana_7.11.0`
|
||||
aliases should point to the `.kibana_7.11.0_001` index.
|
||||
|
||||
## 2. Plugins enabled/disabled
|
||||
Kibana plugins can be disabled/enabled at any point in time. We need to ensure
|
||||
that Saved Object documents are migrated for all the possible sequences of
|
||||
enabling, disabling, before or after a version upgrade.
|
||||
|
||||
### Test scenario 1 (enable a plugin after migration):
|
||||
1. Start an old version of Kibana (< 7.11)
|
||||
2. Create a document that we know will be migrated in a later version (i.e.
|
||||
create a `dashboard`)
|
||||
3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
|
||||
4. Upgrade Kibana to v7.11 making sure the plugin in step (3) is still disabled.
|
||||
5. Enable the plugin from step (3)
|
||||
6. Restart Kibana
|
||||
7. Ensure that the document from step (2) has been migrated
|
||||
(`migrationVersion` contains 7.11.0)
|
||||
|
||||
### Test scenario 2 (disable a plugin after migration):
|
||||
1. Start an old version of Kibana (< 7.11)
|
||||
2. Create a document that we know will be migrated in a later version (i.e.
|
||||
create a `dashboard`)
|
||||
3. Upgrade Kibana to v7.11 making sure the plugin in step (3) is enabled.
|
||||
4. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
|
||||
6. Restart Kibana
|
||||
7. Ensure that Kibana logs a warning, but continues to start even though there
|
||||
are saved object documents which don't belong to an enable plugin
|
||||
|
||||
### Test scenario 3 (multiple instances, enable a plugin after migration):
|
||||
Follow the steps from 'Test scenario 1', but perform the migration with
|
||||
multiple instances of Kibana
|
||||
|
||||
### Test scenario 4 (multiple instances, mixed plugin enabled configs):
|
||||
We don't support this upgrade scenario, but it's worth making sure we don't
|
||||
have data loss when there's a user error.
|
||||
1. Start an old version of Kibana (< 7.11)
|
||||
2. Create a document that we know will be migrated in a later version (i.e.
|
||||
create a `dashboard`)
|
||||
3. Disable the plugin to which the document belongs (i.e `dashboard` plugin)
|
||||
4. Upgrade Kibana to v7.11 using multiple instances of Kibana. The plugin from
|
||||
step (3) should be enabled on half of the instances and disabled on the
|
||||
other half.
|
||||
5. Ensure that the document from step (2) has been migrated
|
||||
(`migrationVersion` contains 7.11.0)
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue