Skip to content

test(NODE-6756): add tags to benchmarks #751

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 29 commits into from
Mar 11, 2025
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
29 commits
Select commit Hold shift + click to select a range
d5cb09c
update bsonBench
W-A-James Feb 14, 2025
5a9635b
add tags to granular benchmarks
W-A-James Feb 14, 2025
e836325
add tags to granular and spec tests
W-A-James Feb 14, 2025
ae6750a
update lockfile
W-A-James Feb 14, 2025
ca24538
lint and update lockfile
W-A-James Feb 21, 2025
54730a6
add normalized measurements to custom benchmarks
W-A-James Feb 27, 2025
e3a8def
update tag setting for granular tests
W-A-James Feb 27, 2025
4fba4e3
accomodate cpu baseline in custom benchmarks
W-A-James Feb 27, 2025
ca6bb42
support cpu baseline in granular and spec benchmarks
W-A-James Feb 27, 2025
a29eebb
update readme
W-A-James Feb 27, 2025
57e77f1
update gitignore
W-A-James Feb 27, 2025
a1ba3cc
fix error
W-A-James Feb 27, 2025
c22023d
change test name
W-A-James Feb 27, 2025
5ed4b71
small fixes
W-A-James Feb 27, 2025
90bca22
address review comments
W-A-James Feb 28, 2025
86b9cff
Use latest dbx-js-tools
W-A-James Mar 5, 2025
3703f72
use new perfSend
W-A-James Mar 6, 2025
fd50317
add new script
W-A-James Mar 6, 2025
67e0739
update ci config
W-A-James Mar 7, 2025
5e01f0e
Update .evergreen/perf_send.mjs
W-A-James Mar 7, 2025
fee984c
convert to async
W-A-James Mar 7, 2025
6c75090
use .text instead of .json
W-A-James Mar 7, 2025
9020210
fully print out request body
W-A-James Mar 7, 2025
433542b
fix perf_send
W-A-James Mar 7, 2025
c1d58ee
wip
W-A-James Mar 7, 2025
fbd7337
chore: try new url
nbbeeken Mar 11, 2025
12ed9c8
fix script response parsing
nbbeeken Mar 11, 2025
ca5a1be
move time stamps
nbbeeken Mar 11, 2025
038ad89
chore: small fixes
nbbeeken Mar 11, 2025
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3,889 changes: 1,578 additions & 2,311 deletions package-lock.json

Large diffs are not rendered by default.

7 changes: 4 additions & 3 deletions package.json
Original file line number Diff line number Diff line change
Expand Up @@ -102,9 +102,10 @@
"check:tsd": "npm run build:dts && tsd",
"check:web": "WEB=true mocha test/node",
"check:web-no-bigint": "WEB=true NO_BIGINT=true mocha test/node",
"check:granular-bench": "npm run build:bench && node ./test/bench/etc/run_granular_benchmarks.js",
"check:spec-bench": "npm run build:bench && node ./test/bench/lib/spec/bsonBench.js",
"check:custom-bench": "npm run build && node ./test/bench/custom/main.mjs",
"check:granular-bench": "npm run build:bench && npm run check:baseline-bench && node ./test/bench/etc/run_granular_benchmarks.js",
"check:spec-bench": "npm run build:bench && npm run check:baseline-bench && node ./test/bench/lib/spec/bsonBench.js",
"check:custom-bench": "npm run build && npm run check:baseline-bench && node ./test/bench/custom/main.mjs",
"check:baseline-bench": "node ./test/bench/etc/cpuBaseline.js",
"build:bench": "cd test/bench && npx tsc",
"build:ts": "node ./node_modules/typescript/bin/tsc",
"build:dts": "npm run build:ts && api-extractor run --typescript-compiler-folder node_modules/typescript --local && node etc/clean_definition_files.cjs",
Expand Down
38 changes: 21 additions & 17 deletions test/bench/custom/benchmarks.mjs
Original file line number Diff line number Diff line change
@@ -1,22 +1,26 @@
/* eslint-disable strict */
import { BSON } from '../../../lib/bson.mjs';

const ObjectId_isValid = [
function objectid_isvalid_wrong_string_length() {
BSON.ObjectId.isValid('a');
},
/** wrong character at the start, could be the most short circuited code path */
function objectid_isvalid_invalid_hex_at_start() {
BSON.ObjectId.isValid('g6e84ebdc96f4c0772f0cbbf');
},
/** wrong character at the end, could be the least short circuited code path */
function objectid_isvalid_invalid_hex_at_end() {
BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbg');
},
function objectid_isvalid_valid_hex_string() {
BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbf');
}
];
const ObjectId_isValid = {
name: 'ObjectId_isValid',
tags: ['alerting-benchmark', 'objectid'],
benchmarks: [
function objectid_isvalid_wrong_string_length() {
BSON.ObjectId.isValid('a');
},
/** wrong character at the start, could be the most short circuited code path */
function objectid_isvalid_invalid_hex_at_start() {
BSON.ObjectId.isValid('g6e84ebdc96f4c0772f0cbbf');
},
/** wrong character at the end, could be the least short circuited code path */
function objectid_isvalid_invalid_hex_at_end() {
BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbg');
},
function objectid_isvalid_valid_hex_string() {
BSON.ObjectId.isValid('66e84ebdc96f4c0772f0cbbf');
}
]
};

// Add benchmarks here:
export const benchmarks = [...ObjectId_isValid];
export const suites = [ObjectId_isValid];
87 changes: 69 additions & 18 deletions test/bench/custom/main.mjs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ import util from 'node:util';
import fs from 'node:fs';
import os from 'node:os';
import benchmark from 'benchmark';
import { benchmarks } from './benchmarks.mjs';
import { suites } from './benchmarks.mjs';

const hw = os.cpus();
const ram = os.totalmem() / 1024 ** 3;
Expand All @@ -20,20 +20,71 @@ const systemInfo = () =>
].join('\n');
console.log(systemInfo());

const suite = new benchmark.Suite();

for (const bench of benchmarks) suite.add(bench.name, bench);

suite
.on('cycle', function logBenchmark(event) {
console.log(String(event.target));
})
.on('complete', function outputPerfSend() {
const data = Array.from(this).map(bench => ({
info: { test_name: bench.name },
metrics: [{ name: 'ops_per_sec', value: bench.hz }]
}));
console.log(util.inspect(data, { depth: Infinity, colors: true }));
fs.writeFileSync('customBenchmarkResults.json', JSON.stringify(data), 'utf8');
})
.run();
function logBenchmark(event) {
console.log(String(event.target));
}

function processBenchmarkResult(bench, tags, metadata) {
return {
info: { test_name: bench.name },
metrics: [{ name: 'ops_per_sec', value: bench.hz, metadata }],
tags
};
}

let completedSuites = 0;
function completeSuite() {
const metadata = { improvement_direction: 'up' };
if (++completedSuites >= collectedSuites.length) {
let cpuBaselineResults;
try {
cpuBaselineResults = JSON.parse(fs.readFileSync(`${__dirname}/../etc/cpuBaseline.json`));
} catch {
throw new Error("Couldn't find baseline results");
}

const cpuBaselineResult = cpuBaselineResults.hz;
if (typeof cpuBaselineResult !== 'number') {
throw new Error("Couldn't find baseline result");
}

const data = [];
for (const { suite, suiteConfig } of collectedSuites) {
const { tags } = suiteConfig;
for (const bench of Array.from(suite)) {
const result = processBenchmarkResult(bench, tags, metadata);
result.metrics.push({
name: 'normalized_throughput',
value: bench.hz / cpuBaselineResult,
metadata
});
data.push(result);
}

data.push({
info: { test_name: 'cpuBaseline_custom' },
metrics: [{ name: 'ops_per_sec', value: cpuBaselineResult, metadata }]
});

console.log(util.inspect(data, { depth: Infinity, colors: true }));
fs.writeFileSync('customBenchmarkResults.json', JSON.stringify(data), 'utf8');
}
}
}

function processSuite(suiteModule, cycleHandler, completeHandler) {
let suite = new benchmark.Suite(suiteModule.name);
for (const b of suiteModule.benchmarks) {
suite.add(b.name, b);
}

suite = suite.on('cycle', cycleHandler).on('complete', completeHandler).run({ async: true });

return { suite, suiteConfig: suiteModule };
}

const collectedSuites = [];
for (const suite of suites) {
const newSuite = processSuite(suite, logBenchmark, completeSuite);
collectedSuites.push(newSuite);
}
28 changes: 16 additions & 12 deletions test/bench/custom/readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,21 +4,25 @@ In this directory are tests for code paths not covered by our spec or granular (

## How to write your own

In `benchmarks.mjs` add a new test to an existing array or make a new array for a new subject area.
Try to fit the name of the function into the format of: "subject area", "method or function" "test case that is being covered" (Ex. `objectid_isvalid_bestcase_false`).
In `benchmarks.mjs` add a new test to an existing benchmark object or make a new object for a new subject area.
Try to fit the name of the variables and the benchmark functions into the format of: "subject area", "method or function" "test case that is being covered" (Ex. `objectid_isvalid_bestcase_false`).
Make sure your test is added to the `benchmarks` export.

### Example

```js
const ObjectId_isValid = [
function objectid_isvalid_strlen() {
BSON.ObjectId.isValid('a');
},
// ...
];

export const benchmarks = [...ObjectId_isValid];
const ObjectId_isValid = {
name: 'ObjectId_isValid',
tags: ['objectid'],
benchmarks : [
function objectid_isvalid_strlen() {
BSON.ObjectId.isValid('a');
},
// ...
]
};

export const benchmarks = [ObjectId_isValid];
```

## Output
Expand All @@ -28,9 +32,9 @@ The JSON emitted at the end of the benchmarks must follow our performance tracki
The JSON must be an array of "`Test`"s:

```ts
type Metric = { name: string, value: number }
type Metric = { name: string, value: number, metadata: { improvement_diraction: 'up' | 'down' } }
type Test = {
info: { test_name: string },
info: { test_name: string, tags?: string[]},
metrics: Metric[]
}
```
Expand Down
3 changes: 2 additions & 1 deletion test/bench/etc/.gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
*Results.json
resultsCollected.json
cpuBaseline.json
resultsCollected*.json
60 changes: 60 additions & 0 deletions test/bench/etc/cpuBaseline.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
'use strict';
const fs = require('fs');
const { sep } = require('path');
const benchmark = require('benchmark');

const stableRegionMean = 42.82;
const taskSize = 3.1401000000000003 / stableRegionMean; // ~3MB worth of work scaled down by the mean of the current stable region in CI to bring this value to roughly 1

function sieveOfEratosthenes(n) {
// Create a boolean array "prime[0..n]" and initialize
// all entries as true. A value in prime[i] will
// become false if i is Not a prime
const prime = Array.from({ length: n + 1 }, () => true);

// We know 0 and 1 are not prime
prime[0] = false;
prime[1] = false;

for (let p = 2; p * p <= n; p++) {
// If prime[p] is not changed, then it is a prime
if (prime[p] === true) {
// Update all multiples of p as false
for (let i = p * p; i <= n; i += p) {
prime[i] = false;
}
}
}

// Collecting all prime numbers
const primes = [];
for (let i = 2; i <= n; i++) {
if (prime[i] === true) {
primes.push(i);
}
}

return primes;
}

new benchmark.Suite()
.add('cpuBaseline', function () {
sieveOfEratosthenes(1_000_000);
})
.on('complete', function () {
const data = {};
for (const b of Array.from(this)) {
if (b.name !== 'cpuBaseline') continue;
data.name = b.name;
data.stats = b.stats;
data.times = b.times;
data.hz = b.hz;
data.count = b.count;
data.cycles = b.cycles;
data.megabytes_per_second = taskSize / b.stats.mean;
}

console.log(data);
fs.writeFileSync(`${__dirname}${sep}cpuBaseline.json`, JSON.stringify(data));
})
.run();
21 changes: 21 additions & 0 deletions test/bench/etc/run_granular_benchmarks.js
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,9 @@ const DOCUMENT_ROOT = path.resolve(`${__dirname}/../documents`);
.run()
.catch(() => null);

// Check for benchmark results
const cpuBaselineData = require(`${__dirname}${path.sep}cpuBaseline.json`);

// Run all benchmark files
const lib = await fs.readdir(BENCHMARK_PATH);
for await (const dirent of lib) {
Expand Down Expand Up @@ -74,13 +77,31 @@ const DOCUMENT_ROOT = path.resolve(`${__dirname}/../documents`);
collectedResults.push(...results);
}
}
const metadata = {
improvement_direction: 'up'
};

const means = collectedResults.map(result => {
const rv = { ...result };
rv.metrics = rv.metrics.filter(metric => metric.type === 'MEAN');
rv.metrics = rv.metrics.map(m => {
return { ...m, metadata };
});
rv.metrics.push({
name: 'normalized_throughput',
value: rv.metrics[0].value / cpuBaselineData.megabytes_per_second,
metadata
});
return rv;
});

means.push({
info: { test_name: 'cpuBaseline_granular' },
metrics: [
{ name: 'megabytes_per_second', value: cpuBaselineData.megabytes_per_second, metadata }
]
});

await fs.writeFile(meansFile, JSON.stringify(means));

console.log(`Means in ${meansFile}`);
Expand Down
10 changes: 7 additions & 3 deletions test/bench/granular/binary.bench.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,16 @@ import {
BOOL,
ITERATIONS,
LIBRARY_SPEC,
WARMUP
WARMUP,
getTypeTestTags
} from './common';

async function main() {
const suite = new Suite('Binary');
const testDocs = await getTestDocs('binary');
// deserialize
for (const documentPath of testDocs) {
const tags = getTypeTestTags(documentPath);
for (const promoteBuffers of BOOL) {
suite.task({
documentPath,
Expand All @@ -22,7 +24,8 @@ async function main() {
operation: 'deserialize',
options: {
promoteBuffers
}
},
tags
});
}

Expand All @@ -33,7 +36,8 @@ async function main() {
iterations: ITERATIONS,
warmup: WARMUP,
operation: 'serialize',
options: { checkKeys: true, ignoreUndefined: false }
options: { checkKeys: true, ignoreUndefined: false },
tags
});
}
await runSuiteAndWriteResults(suite);
Expand Down
11 changes: 8 additions & 3 deletions test/bench/granular/boolean.bench.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,8 @@ import {
BOOL,
ITERATIONS,
WARMUP,
LIBRARY_SPEC
LIBRARY_SPEC,
getTypeTestTags
} from './common';

const OPTIONS = {
Expand All @@ -21,27 +22,31 @@ async function main() {
const testDocs = await getTestDocs('boolean');
// deserialize
for (const documentPath of testDocs) {
const tags = getTypeTestTags(documentPath);
for (const promoteValues of BOOL) {
suite.task({
documentPath,
library: LIBRARY_SPEC,
iterations: ITERATIONS,
warmup: WARMUP,
operation: 'deserialize',
options: { ...OPTIONS.deserialize, promoteValues }
options: { ...OPTIONS.deserialize, promoteValues },
tags
});
}
}

// serialize
for (const documentPath of testDocs) {
const tags = getTypeTestTags(documentPath);
suite.task({
documentPath,
library: LIBRARY_SPEC,
iterations: ITERATIONS,
warmup: WARMUP,
operation: 'deserialize',
options: OPTIONS.serialize
options: OPTIONS.serialize,
tags
});
}
await runSuiteAndWriteResults(suite);
Expand Down
Loading