2020-03-02 21:04:27 +01:00
// "Workers" in this file will spin up an instance, do some setup etc depending on the configuration, and then execute some work that you define
// e.g. workers.base(name: 'my-worker') { sh "echo 'ready to execute some kibana scripts'" }
2020-04-04 16:11:31 +02:00
def label ( size ) {
switch ( size ) {
2020-06-11 18:19:14 +02:00
case 'flyweight' :
return 'flyweight'
2020-04-04 16:11:31 +02:00
case 's' :
2020-06-19 00:24:16 +02:00
return 'docker && linux && immutable'
2020-05-29 00:25:52 +02:00
case 's-highmem' :
2020-06-19 00:24:16 +02:00
return 'docker && tests-s'
2021-05-03 19:53:22 +02:00
case 'm' :
return 'docker && linux && immutable && gobld/machineType:n2-standard-8'
2020-12-05 02:50:55 +01:00
case 'm-highmem' :
return 'docker && linux && immutable && gobld/machineType:n1-highmem-8'
2020-04-04 16:11:31 +02:00
case 'l' :
2020-06-19 00:24:16 +02:00
return 'docker && tests-l'
2020-04-04 16:11:31 +02:00
case 'xl' :
2020-06-19 00:24:16 +02:00
return 'docker && tests-xl'
2020-08-04 19:13:51 +02:00
case 'xl-highmem' :
return 'docker && tests-xl-highmem'
2020-04-04 16:11:31 +02:00
case 'xxl' :
2020-12-18 23:39:32 +01:00
return 'docker && tests-xxl && gobld/machineType:custom-64-270336'
2021-02-09 19:08:46 +01:00
case 'n2-standard-16' :
return 'docker && linux && immutable && gobld/machineType:n2-standard-16'
2020-04-04 16:11:31 +02:00
}
error "unknown size '${size}'"
}
2020-03-02 21:04:27 +01:00
/ *
The base worker that all of the others use . Will clone the scm ( assumed to be kibana ) , and run kibana bootstrap processes by default .
Parameters:
2020-04-04 16:11:31 +02:00
size - size of worker label to use , e . g . 's' or 'xl'
2020-03-02 21:04:27 +01:00
ramDisk - Should the workspace be mounted in memory ? Default: true
bootstrapped - If true , download kibana dependencies , run kbn bootstrap , etc . Default: true
name - Name of the worker for display purposes , filenames , etc .
scm - Jenkins scm configuration for checking out code . Use ` null ` to disable checkout . Default: inherited from job
* /
def base ( Map params , Closure closure ) {
2020-04-04 16:11:31 +02:00
def config = [ size: '' , ramDisk: true , bootstrapped: true , name: 'unnamed-worker' , scm: scm ] + params
if ( ! config . size ) {
error "You must specify an agent size, such as 'xl' or 's', when using workers.base()"
2020-03-02 21:04:27 +01:00
}
2020-04-04 16:11:31 +02:00
node ( label ( config . size ) ) {
2020-03-02 21:04:27 +01:00
agentInfo . print ( )
if ( config . ramDisk ) {
// Move to a temporary workspace, so that we can symlink the real workspace into /dev/shm
def originalWorkspace = env . WORKSPACE
ws ( '/tmp/workspace' ) {
sh (
script: "" "
mkdir - p /dev/ shm / workspace
mkdir - p '${originalWorkspace}' # create all of the directories leading up to the workspace , if they don ' t exist
rm - - preserve - root - rf '${originalWorkspace}' # then remove just the workspace , just in case there ' s stuff in it
ln - s /dev/ shm / workspace '${originalWorkspace}'
"" " ,
label: "Move workspace to RAM - /dev/shm/workspace"
)
}
}
2020-08-04 19:13:51 +02:00
sh (
script: "mkdir -p ${env.WORKSPACE}/tmp" ,
label: "Create custom temp directory"
)
2020-05-27 20:22:04 +02:00
def checkoutInfo = [ : ]
2020-03-02 21:04:27 +01:00
if ( config . scm ) {
// Try to clone from Github up to 8 times, waiting 15 secs between attempts
retryWithDelay ( 8 , 15 ) {
2021-04-28 17:08:51 +02:00
kibanaCheckout ( )
2020-05-27 20:22:04 +02:00
}
2020-05-19 16:00:47 +02:00
2020-05-27 20:22:04 +02:00
dir ( "kibana" ) {
checkoutInfo = getCheckoutInfo ( )
2020-06-23 19:32:03 +02:00
2021-05-05 01:21:37 +02:00
if ( ! buildState . has ( 'checkoutInfo' ) ) {
2020-06-23 19:32:03 +02:00
buildState . set ( 'checkoutInfo' , checkoutInfo )
2021-05-05 01:21:37 +02:00
if ( buildState . get ( 'shouldSetCommitStatus' ) ) {
githubCommitStatus . onStart ( )
}
2020-06-23 19:32:03 +02:00
}
2020-05-19 17:16:56 +02:00
}
2020-05-27 20:22:04 +02:00
ciStats . reportGitInfo (
checkoutInfo . branch ,
checkoutInfo . commit ,
checkoutInfo . targetBranch ,
checkoutInfo . mergeBase
)
2020-03-02 21:04:27 +01:00
}
withEnv ( [
"CI=true" ,
"HOME=${env.JENKINS_HOME}" ,
"PR_SOURCE_BRANCH=${env.ghprbSourceBranch ?: ''}" ,
"PR_TARGET_BRANCH=${env.ghprbTargetBranch ?: ''}" ,
"PR_AUTHOR=${env.ghprbPullAuthorLogin ?: ''}" ,
"TEST_BROWSER_HEADLESS=1" ,
2021-05-27 20:37:43 +02:00
"GIT_COMMIT=${checkoutInfo.commit}" ,
2020-05-27 20:22:04 +02:00
"GIT_BRANCH=${checkoutInfo.branch}" ,
2020-08-04 19:13:51 +02:00
"TMPDIR=${env.WORKSPACE}/tmp" , // For Chrome and anything else that respects it
2021-04-19 18:45:09 +02:00
"BUILD_TS_REFS_DISABLE=true" , // no need to build ts refs in bootstrap
2020-03-02 21:04:27 +01:00
] ) {
withCredentials ( [
string ( credentialsId: 'vault-addr' , variable: 'VAULT_ADDR' ) ,
string ( credentialsId: 'vault-role-id' , variable: 'VAULT_ROLE_ID' ) ,
string ( credentialsId: 'vault-secret-id' , variable: 'VAULT_SECRET_ID' ) ,
] ) {
// scm is configured to check out to the ./kibana directory
dir ( 'kibana' ) {
if ( config . bootstrapped ) {
kibanaPipeline . doSetup ( )
}
closure ( )
}
}
}
}
}
// Worker for ci processes. Extends the base worker and adds GCS artifact upload, error reporting, junit processing
def ci ( Map params , Closure closure ) {
2020-09-18 04:40:15 +02:00
def config = [ ramDisk: true , bootstrapped: true , runErrorReporter: true ] + params
2020-03-02 21:04:27 +01:00
return base ( config ) {
kibanaPipeline . withGcsArtifactUpload ( config . name ) {
2020-09-18 04:40:15 +02:00
kibanaPipeline . withPostBuildReporting ( config ) {
2020-03-02 21:04:27 +01:00
closure ( )
}
}
}
}
// Worker for running the current intake jobs. Just runs a single script after bootstrap.
def intake ( jobName , String script ) {
return {
2020-12-05 02:50:55 +01:00
ci ( name: jobName , size: 'm-highmem' , ramDisk: true ) {
2020-03-02 21:04:27 +01:00
withEnv ( [ "JOB=${jobName}" ] ) {
2020-08-01 02:56:39 +02:00
kibanaPipeline . notifyOnError {
2020-07-21 22:51:58 +02:00
runbld ( script , "Execute ${jobName}" )
}
2020-03-02 21:04:27 +01:00
}
}
}
}
// Worker for running functional tests. Runs a setup process (e.g. the kibana build) then executes a map of closures in parallel (e.g. one for each ciGroup)
def functional ( name , Closure setup , Map processes ) {
return {
2020-12-18 23:32:58 +01:00
parallelProcesses ( name: name , setup: setup , processes: processes , delayBetweenProcesses: 20 , size: 'xl' )
2020-03-02 21:04:27 +01:00
}
}
/ *
Creates a ci worker that can run a setup process , followed by a group of processes in parallel .
Parameters:
name: Name of the worker for display purposes , filenames , etc .
setup: Closure to execute after the agent is bootstrapped , before starting the parallel work
processes: Map of closures that will execute in parallel after setup . Each closure is passed a unique number .
delayBetweenProcesses: Number of seconds to wait between starting the parallel processes . Useful to spread the load of heavy init processes , e . g . Elasticsearch starting up . Default: 0
2020-04-04 16:11:31 +02:00
size: size of worker label to use , e . g . 's' or 'xl'
2020-03-02 21:04:27 +01:00
* /
def parallelProcesses ( Map params ) {
2020-04-04 16:11:31 +02:00
def config = [ name: 'parallel-worker' , setup: { } , processes: [ : ] , delayBetweenProcesses: 0 , size: 'xl' ] + params
2020-03-02 21:04:27 +01:00
2020-04-04 16:11:31 +02:00
ci ( size: config . size , name: config . name ) {
2020-03-02 21:04:27 +01:00
config . setup ( )
def nextProcessNumber = 1
def process = { processName , processClosure - >
def processNumber = nextProcessNumber
nextProcessNumber + +
return {
if ( config . delayBetweenProcesses & & config . delayBetweenProcesses > 0 ) {
// This delay helps smooth out CPU load caused by ES/Kibana instances starting up at the same time
def delay = ( processNumber - 1 ) * config . delayBetweenProcesses
sleep ( delay )
}
2020-08-04 19:13:51 +02:00
withEnv ( [ "CI_PARALLEL_PROCESS_NUMBER=${processNumber}" ] ) {
processClosure ( )
}
2020-03-02 21:04:27 +01:00
}
}
def processes = [ : ]
config . processes . each { processName , processClosure - >
processes [ processName ] = process ( processName , processClosure )
}
parallel ( processes )
}
}
return this