Keep a small reserve of tasks to not-batch, so all threads can have always have an initial task (#18696)
* Keep a small reserve of tasks to not-batch, so all threads can have an initial task" * Assign no weight to new tests, but still place them at the end of the list
This commit is contained in:
parent
38905f46ce
commit
5f4436d433
|
@ -44,7 +44,8 @@ namespace Harness.Parallel.Host {
|
||||||
console.log("Discovering tests...");
|
console.log("Discovering tests...");
|
||||||
const discoverStart = +(new Date());
|
const discoverStart = +(new Date());
|
||||||
const { statSync }: { statSync(path: string): { size: number }; } = require("fs");
|
const { statSync }: { statSync(path: string): { size: number }; } = require("fs");
|
||||||
const tasks: { runner: TestRunnerKind, file: string, size: number }[] = [];
|
let tasks: { runner: TestRunnerKind, file: string, size: number }[] = [];
|
||||||
|
const newTasks: { runner: TestRunnerKind, file: string, size: number }[] = [];
|
||||||
const perfData = readSavedPerfData();
|
const perfData = readSavedPerfData();
|
||||||
let totalCost = 0;
|
let totalCost = 0;
|
||||||
let unknownValue: string | undefined;
|
let unknownValue: string | undefined;
|
||||||
|
@ -60,8 +61,10 @@ namespace Harness.Parallel.Host {
|
||||||
const hashedName = hashName(runner.kind(), file);
|
const hashedName = hashName(runner.kind(), file);
|
||||||
size = perfData[hashedName];
|
size = perfData[hashedName];
|
||||||
if (size === undefined) {
|
if (size === undefined) {
|
||||||
size = Number.MAX_SAFE_INTEGER;
|
size = 0;
|
||||||
unknownValue = hashedName;
|
unknownValue = hashedName;
|
||||||
|
newTasks.push({ runner: runner.kind(), file, size });
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tasks.push({ runner: runner.kind(), file, size });
|
tasks.push({ runner: runner.kind(), file, size });
|
||||||
|
@ -69,6 +72,7 @@ namespace Harness.Parallel.Host {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tasks.sort((a, b) => a.size - b.size);
|
tasks.sort((a, b) => a.size - b.size);
|
||||||
|
tasks = tasks.concat(newTasks);
|
||||||
// 1 fewer batches than threads to account for unittests running on the final thread
|
// 1 fewer batches than threads to account for unittests running on the final thread
|
||||||
const batchCount = runners.length === 1 ? workerCount : workerCount - 1;
|
const batchCount = runners.length === 1 ? workerCount : workerCount - 1;
|
||||||
const packfraction = 0.9;
|
const packfraction = 0.9;
|
||||||
|
@ -174,7 +178,7 @@ namespace Harness.Parallel.Host {
|
||||||
let scheduledTotal = 0;
|
let scheduledTotal = 0;
|
||||||
batcher: while (true) {
|
batcher: while (true) {
|
||||||
for (let i = 0; i < batchCount; i++) {
|
for (let i = 0; i < batchCount; i++) {
|
||||||
if (tasks.length === 0) {
|
if (tasks.length <= workerCount) { // Keep a small reserve even in the suboptimally packed case
|
||||||
console.log(`Suboptimal packing detected: no tests remain to be stolen. Reduce packing fraction from ${packfraction} to fix.`);
|
console.log(`Suboptimal packing detected: no tests remain to be stolen. Reduce packing fraction from ${packfraction} to fix.`);
|
||||||
break batcher;
|
break batcher;
|
||||||
}
|
}
|
||||||
|
@ -213,7 +217,9 @@ namespace Harness.Parallel.Host {
|
||||||
worker.send({ type: "batch", payload });
|
worker.send({ type: "batch", payload });
|
||||||
}
|
}
|
||||||
else { // Unittest thread - send off just one test
|
else { // Unittest thread - send off just one test
|
||||||
worker.send({ type: "test", payload: tasks.pop() });
|
const payload = tasks.pop();
|
||||||
|
ts.Debug.assert(!!payload); // The reserve kept above should ensure there is always an initial task available, even in suboptimal scenarios
|
||||||
|
worker.send({ type: "test", payload });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue