* build immutable bundles for new platform plugins * only inspect workers if configured to do so * [navigation] use an index.scss file * add yarn.lock symlink * set pluginScanDirs in test so fixtures stay consistent * cleanup helpers a little * fix type error * support KBN_OPTIMIZER_MAX_WORKERS for limiting workers via env * test support for KBN_OPTIMIZER_MAX_WORKERS * expand the available memory for workers when only running one or two * add docs about KBN_OPTIMIZER_MAX_WORKERS environment variable * fix README link * update kbn/pm dist * implement bundle caching/reuse * update kbn/pm dist * don't check for cache if --no-cache is passed * update renovate config * standardize on index.scss, move console styles over * add support for --no-cache to cli * include worker config vars in optimizer version * ignore concatenated modules * update integration test * add safari to browserslist to avoid user-agent warnings in dev * update docs, clean up optimizer message/misc naming * always handle initialized messages, don't ignore states that are attached to specific events * reword caching docs, add environment var to disable caching * tweak logging and don't use optimizer.useBundleCache as that's disabled in dev * handle change notifications * batch changes for 1 second * rename CompilerState type to CompilerMsg * getChanges() no longer needs to assign changes to dirs * remove unused deps * split up run_worker.ts and share cacheKey generation logic * add a couple docs * update tests and remove unused imports * specify files when creating bundle cache key * remove one more unused import * match existing dev cli output more closely * update kbn/pm dist * set KBN_NP_PLUGINS_BUILT to avoid warning in CI * avoid extending global window type * add note to keep pluginScanDirs in sync * pass browserslistEnv in workerConfig so it is used for cache key * load commons.bundle.js in parallel too * emit initialized+success states if all bundles are cached * load bootstraps as quickly as possible * skip flaky suite * bump * update jest snapshots * remove hashing from cache key generation * remove unnecessary non-null assertion * improve docs and break up Optimizer#run() * remove unused import * refactor kbn/optimizer to break up observable logic, implement more helpful cache invalidation logic with logging * fix tests * add initializing phase * avoid rxjs observable constructor * remove unnecessary rxjs helper, add tests for bundle cache * update consumers of optimizer * update readme with new call style * replace "new platform" with "kibana platform" * fix a couple more renames * add support for several plain-text file formats * fix naming of OptimizerMsg => OptimizerUpdate, use "store" naming too * one more OptimizerMsg update * ensure bundles are not cached when cache config is false * test for initializing states and bundle cache events * remove unnecessary timeout change * Remove unnecessary helpers * Add tests for BundleCache class * Add tests for Bundle class * test summarizeEvent$ * missing paths are no longer listed in mtimes map * add tests for optimizer/cache_keys * Add some extra docs * Remove labeled loop * add integration test for kbn-optimizer watcher components * querystring-browser removed * tweak logging a smidge, improve info and final message * remove unused imports * remove duplication of getModuleCount() method * move type annotation that validates things * clear up the build completion message Co-authored-by: Elastic Machine <elasticmachine@users.noreply.github.com>
303 lines
7.9 KiB
Groovy
303 lines
7.9 KiB
Groovy
def withWorkers(machineName, preWorkerClosure = {}, workerClosures = [:]) {
|
|
return {
|
|
jobRunner('tests-xl', true) {
|
|
withGcsArtifactUpload(machineName, {
|
|
withPostBuildReporting {
|
|
doSetup()
|
|
preWorkerClosure()
|
|
|
|
def nextWorker = 1
|
|
def worker = { workerClosure ->
|
|
def workerNumber = nextWorker
|
|
nextWorker++
|
|
|
|
return {
|
|
// This delay helps smooth out CPU load caused by ES/Kibana instances starting up at the same time
|
|
def delay = (workerNumber-1)*20
|
|
sleep(delay)
|
|
|
|
workerClosure(workerNumber)
|
|
}
|
|
}
|
|
|
|
def workers = [:]
|
|
workerClosures.each { workerName, workerClosure ->
|
|
workers[workerName] = worker(workerClosure)
|
|
}
|
|
|
|
parallel(workers)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
}
|
|
|
|
def withWorker(machineName, label, Closure closure) {
|
|
return {
|
|
jobRunner(label, false) {
|
|
withGcsArtifactUpload(machineName) {
|
|
withPostBuildReporting {
|
|
doSetup()
|
|
closure()
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
def intakeWorker(jobName, String script) {
|
|
return withWorker(jobName, 'linux && immutable') {
|
|
withEnv([
|
|
"JOB=${jobName}",
|
|
]) {
|
|
runbld(script, "Execute ${jobName}")
|
|
}
|
|
}
|
|
}
|
|
|
|
def withPostBuildReporting(Closure closure) {
|
|
try {
|
|
closure()
|
|
} finally {
|
|
catchError {
|
|
runErrorReporter()
|
|
}
|
|
|
|
catchError {
|
|
runbld.junit()
|
|
}
|
|
|
|
catchError {
|
|
publishJunit()
|
|
}
|
|
}
|
|
}
|
|
|
|
def getPostBuildWorker(name, closure) {
|
|
return { workerNumber ->
|
|
def kibanaPort = "61${workerNumber}1"
|
|
def esPort = "61${workerNumber}2"
|
|
def esTransportPort = "61${workerNumber}3"
|
|
|
|
withEnv([
|
|
"CI_WORKER_NUMBER=${workerNumber}",
|
|
"TEST_KIBANA_HOST=localhost",
|
|
"TEST_KIBANA_PORT=${kibanaPort}",
|
|
"TEST_KIBANA_URL=http://elastic:changeme@localhost:${kibanaPort}",
|
|
"TEST_ES_URL=http://elastic:changeme@localhost:${esPort}",
|
|
"TEST_ES_TRANSPORT_PORT=${esTransportPort}",
|
|
"IS_PIPELINE_JOB=1",
|
|
"KBN_NP_PLUGINS_BUILT=true",
|
|
]) {
|
|
closure()
|
|
}
|
|
}
|
|
}
|
|
|
|
def getOssCiGroupWorker(ciGroup) {
|
|
return getPostBuildWorker("ciGroup" + ciGroup, {
|
|
withEnv([
|
|
"CI_GROUP=${ciGroup}",
|
|
"JOB=kibana-ciGroup${ciGroup}",
|
|
]) {
|
|
retryable("kibana-ciGroup${ciGroup}") {
|
|
runbld("./test/scripts/jenkins_ci_group.sh", "Execute kibana-ciGroup${ciGroup}")
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
def getXpackCiGroupWorker(ciGroup) {
|
|
return getPostBuildWorker("xpack-ciGroup" + ciGroup, {
|
|
withEnv([
|
|
"CI_GROUP=${ciGroup}",
|
|
"JOB=xpack-kibana-ciGroup${ciGroup}",
|
|
]) {
|
|
retryable("xpack-kibana-ciGroup${ciGroup}") {
|
|
runbld("./test/scripts/jenkins_xpack_ci_group.sh", "Execute xpack-kibana-ciGroup${ciGroup}")
|
|
}
|
|
}
|
|
})
|
|
}
|
|
|
|
def jobRunner(label, useRamDisk, closure) {
|
|
node(label) {
|
|
agentInfo.print()
|
|
|
|
if (useRamDisk) {
|
|
// Move to a temporary workspace, so that we can symlink the real workspace into /dev/shm
|
|
def originalWorkspace = env.WORKSPACE
|
|
ws('/tmp/workspace') {
|
|
sh(
|
|
script: """
|
|
mkdir -p /dev/shm/workspace
|
|
mkdir -p '${originalWorkspace}' # create all of the directories leading up to the workspace, if they don't exist
|
|
rm --preserve-root -rf '${originalWorkspace}' # then remove just the workspace, just in case there's stuff in it
|
|
ln -s /dev/shm/workspace '${originalWorkspace}'
|
|
""",
|
|
label: "Move workspace to RAM - /dev/shm/workspace"
|
|
)
|
|
}
|
|
}
|
|
|
|
def scmVars
|
|
|
|
// Try to clone from Github up to 8 times, waiting 15 secs between attempts
|
|
retryWithDelay(8, 15) {
|
|
scmVars = checkout scm
|
|
}
|
|
|
|
withEnv([
|
|
"CI=true",
|
|
"HOME=${env.JENKINS_HOME}",
|
|
"PR_SOURCE_BRANCH=${env.ghprbSourceBranch ?: ''}",
|
|
"PR_TARGET_BRANCH=${env.ghprbTargetBranch ?: ''}",
|
|
"PR_AUTHOR=${env.ghprbPullAuthorLogin ?: ''}",
|
|
"TEST_BROWSER_HEADLESS=1",
|
|
"GIT_BRANCH=${scmVars.GIT_BRANCH}",
|
|
]) {
|
|
withCredentials([
|
|
string(credentialsId: 'vault-addr', variable: 'VAULT_ADDR'),
|
|
string(credentialsId: 'vault-role-id', variable: 'VAULT_ROLE_ID'),
|
|
string(credentialsId: 'vault-secret-id', variable: 'VAULT_SECRET_ID'),
|
|
]) {
|
|
// scm is configured to check out to the ./kibana directory
|
|
dir('kibana') {
|
|
closure()
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
def uploadGcsArtifact(uploadPrefix, pattern) {
|
|
googleStorageUpload(
|
|
credentialsId: 'kibana-ci-gcs-plugin',
|
|
bucket: "gs://${uploadPrefix}",
|
|
pattern: pattern,
|
|
sharedPublicly: true,
|
|
showInline: true,
|
|
)
|
|
}
|
|
|
|
def downloadCoverageArtifacts() {
|
|
def storageLocation = "gs://kibana-ci-artifacts/jobs/${env.JOB_NAME}/${BUILD_NUMBER}/coverage/"
|
|
def targetLocation = "/tmp/downloaded_coverage"
|
|
|
|
sh "mkdir -p '${targetLocation}' && gsutil -m cp -r '${storageLocation}' '${targetLocation}'"
|
|
}
|
|
|
|
def uploadCoverageArtifacts(prefix, pattern) {
|
|
def uploadPrefix = "kibana-ci-artifacts/jobs/${env.JOB_NAME}/${BUILD_NUMBER}/coverage/${prefix}"
|
|
uploadGcsArtifact(uploadPrefix, pattern)
|
|
}
|
|
|
|
def withGcsArtifactUpload(workerName, closure) {
|
|
def uploadPrefix = "kibana-ci-artifacts/jobs/${env.JOB_NAME}/${BUILD_NUMBER}/${workerName}"
|
|
def ARTIFACT_PATTERNS = [
|
|
'target/kibana-*',
|
|
'target/junit/**/*',
|
|
'test/**/screenshots/**/*.png',
|
|
'test/functional/failure_debug/html/*.html',
|
|
'x-pack/test/**/screenshots/**/*.png',
|
|
'x-pack/test/functional/failure_debug/html/*.html',
|
|
'x-pack/test/functional/apps/reporting/reports/session/*.pdf',
|
|
]
|
|
|
|
withEnv([
|
|
"GCS_UPLOAD_PREFIX=${uploadPrefix}"
|
|
], {
|
|
try {
|
|
closure()
|
|
} finally {
|
|
catchError {
|
|
ARTIFACT_PATTERNS.each { pattern ->
|
|
uploadGcsArtifact(uploadPrefix, pattern)
|
|
}
|
|
}
|
|
}
|
|
})
|
|
|
|
if (env.CODE_COVERAGE) {
|
|
sh 'tar -czf kibana-coverage.tar.gz target/kibana-coverage/**/*'
|
|
uploadGcsArtifact("kibana-ci-artifacts/jobs/${env.JOB_NAME}/${BUILD_NUMBER}/coverage/${workerName}", 'kibana-coverage.tar.gz')
|
|
}
|
|
}
|
|
|
|
def publishJunit() {
|
|
junit(testResults: 'target/junit/**/*.xml', allowEmptyResults: true, keepLongStdio: true)
|
|
}
|
|
|
|
def sendMail() {
|
|
// If the build doesn't have a result set by this point, there haven't been any errors and it can be marked as a success
|
|
// The e-mail plugin for the infra e-mail depends upon this being set
|
|
currentBuild.result = currentBuild.result ?: 'SUCCESS'
|
|
|
|
def buildStatus = buildUtils.getBuildStatus()
|
|
if (buildStatus != 'SUCCESS' && buildStatus != 'ABORTED') {
|
|
node('flyweight') {
|
|
sendInfraMail()
|
|
sendKibanaMail()
|
|
}
|
|
}
|
|
}
|
|
|
|
def sendInfraMail() {
|
|
catchError {
|
|
step([
|
|
$class: 'Mailer',
|
|
notifyEveryUnstableBuild: true,
|
|
recipients: 'infra-root+build@elastic.co',
|
|
sendToIndividuals: false
|
|
])
|
|
}
|
|
}
|
|
|
|
def sendKibanaMail() {
|
|
catchError {
|
|
def buildStatus = buildUtils.getBuildStatus()
|
|
if(params.NOTIFY_ON_FAILURE && buildStatus != 'SUCCESS' && buildStatus != 'ABORTED') {
|
|
emailext(
|
|
to: 'build-kibana@elastic.co',
|
|
subject: "${env.JOB_NAME} - Build # ${env.BUILD_NUMBER} - ${buildStatus}",
|
|
body: '${SCRIPT,template="groovy-html.template"}',
|
|
mimeType: 'text/html',
|
|
)
|
|
}
|
|
}
|
|
}
|
|
|
|
def bash(script, label) {
|
|
sh(
|
|
script: "#!/bin/bash\n${script}",
|
|
label: label
|
|
)
|
|
}
|
|
|
|
def doSetup() {
|
|
runbld("./test/scripts/jenkins_setup.sh", "Setup Build Environment and Dependencies")
|
|
}
|
|
|
|
def buildOss() {
|
|
runbld("./test/scripts/jenkins_build_kibana.sh", "Build OSS/Default Kibana")
|
|
}
|
|
|
|
def buildXpack() {
|
|
runbld("./test/scripts/jenkins_xpack_build_kibana.sh", "Build X-Pack Kibana")
|
|
}
|
|
|
|
def runErrorReporter() {
|
|
def status = buildUtils.getBuildStatus()
|
|
def dryRun = status != "ABORTED" ? "" : "--no-github-update"
|
|
|
|
bash(
|
|
"""
|
|
source src/dev/ci_setup/setup_env.sh
|
|
node scripts/report_failed_tests ${dryRun}
|
|
""",
|
|
"Report failed tests, if necessary"
|
|
)
|
|
}
|
|
|
|
return this
|