roseking's picture
Add professional benchmark suite
9f5cf6f verified
import { mkdir } from 'node:fs/promises'
import { basename, isAbsolute, join, resolve } from 'node:path'
import { createConfig } from './config/defaults.js'
import { runScenarios } from './harness/runner.js'
import {
collectBootstrapObservability,
collectProcessObservability,
} from './observability/adapters.js'
import {
buildBenchmarkReport,
buildComparisonText,
} from './reporting/report.js'
import { createCorrectnessAndToolScenarios } from './scenarios/correctnessTools.js'
import { createHeadlessScenarios } from './scenarios/headless.js'
import { createRestorationScenario } from './scenarios/restoration.js'
import { createStartupAndCommandScenarios } from './scenarios/startupCommand.js'
import type { BenchmarkMode, BenchmarkReport, BenchmarkRunKind } from './types.js'
import { readJsonFile, writeJsonFile, writeTextFile } from './utils/files.js'
function parseArg(name: string): string | undefined {
const hit = process.argv.find(value => value.startsWith(`${name}=`))
if (!hit) return undefined
return hit.slice(name.length + 1)
}
function resolveMode(): BenchmarkMode {
const raw = parseArg('--mode')
if (raw === 'online' || raw === 'offline' || raw === 'hybrid') return raw
return 'hybrid'
}
function resolveRunKind(): BenchmarkRunKind {
const raw = parseArg('--run')
if (raw === 'smoke' || raw === 'full') return raw
return 'smoke'
}
function resolveOutputDir(rootDir: string): string {
const input = parseArg('--out')
if (!input) return join(rootDir, 'benchmark', 'results')
return isAbsolute(input) ? input : resolve(rootDir, input)
}
async function runBench(): Promise<void> {
const rootDir = process.cwd()
const nowIso = new Date().toISOString()
const mode = resolveMode()
const runKind = resolveRunKind()
const config = createConfig(mode, runKind)
const outputDir = resolveOutputDir(rootDir)
await mkdir(outputDir, { recursive: true })
const context = {
rootDir,
mode,
runKind,
nowIso,
outputDir,
}
const scenarios = [
...createStartupAndCommandScenarios(config),
...createHeadlessScenarios(config),
...createCorrectnessAndToolScenarios(config),
createRestorationScenario(config),
]
const summaries = await runScenarios({
scenarios,
context,
config,
})
const report = buildBenchmarkReport({
rootDir,
generatedAt: nowIso,
config,
scenarios: summaries,
observability: {
bootstrapState: collectBootstrapObservability(),
process: collectProcessObservability(),
},
})
const reportPath = join(
outputDir,
`benchmark-${runKind}-${nowIso.replace(/[:.]/gu, '-')}.json`,
)
await writeJsonFile(reportPath, report)
const latestPath = join(outputDir, 'latest.json')
await writeJsonFile(latestPath, report)
await writeTextFile(
join(outputDir, 'latest-summary.md'),
renderSummaryMarkdown(report, basename(reportPath)),
)
process.stdout.write(`Benchmark report written: ${reportPath}\n`)
process.stdout.write(
`Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'}\n`,
)
if (!report.qualityGate.passed) {
for (const reason of report.qualityGate.reasons) {
process.stdout.write(`- ${reason}\n`)
}
process.exitCode = 1
}
}
function renderSummaryMarkdown(
report: BenchmarkReport,
fileName: string,
): string {
const lines: string[] = []
lines.push('# Benchmark Summary')
lines.push('')
lines.push(`- Report: ${fileName}`)
lines.push(`- Mode: ${report.mode}`)
lines.push(`- Run: ${report.runKind}`)
lines.push(
`- Overall score: ${report.aggregate.score.total.toFixed(2)} (latency ${report.aggregate.score.latency.toFixed(2)}, stability ${report.aggregate.score.stability.toFixed(2)}, quality ${report.aggregate.score.quality.toFixed(2)}, cost ${report.aggregate.score.cost.toFixed(2)})`,
)
lines.push(`- Success rate: ${report.aggregate.successRate.toFixed(2)}%`)
lines.push('')
lines.push('## Scenario Results')
for (const scenario of report.scenarios) {
const p95 = scenario.durationMs?.p95 ?? 0
lines.push(
`- ${scenario.id} ${scenario.name}: success ${scenario.successRate.toFixed(2)}%, p95 ${p95.toFixed(2)}ms`,
)
}
lines.push('')
lines.push(
`- Quality gate: ${report.qualityGate.passed ? 'PASS' : 'FAIL'} (${report.qualityGate.reasons.length} issue(s))`,
)
if (!report.qualityGate.passed) {
for (const reason of report.qualityGate.reasons) {
lines.push(` - ${reason}`)
}
}
lines.push('')
return `${lines.join('\n')}\n`
}
async function runCompare(): Promise<void> {
const rootDir = process.cwd()
const outputDir = resolveOutputDir(rootDir)
const baselinePath = parseArg('--baseline')
const currentPath = parseArg('--current')
if (!baselinePath || !currentPath) {
throw new Error('--baseline and --current are required for compare mode')
}
const baseline = await readJsonFile<BenchmarkReport>(
isAbsolute(baselinePath) ? baselinePath : resolve(rootDir, baselinePath),
)
const current = await readJsonFile<BenchmarkReport>(
isAbsolute(currentPath) ? currentPath : resolve(rootDir, currentPath),
)
const text = buildComparisonText(baseline, current)
const outPath = join(outputDir, 'comparison.md')
await writeTextFile(outPath, text)
process.stdout.write(`Comparison written: ${outPath}\n`)
}
const command = parseArg('--command') ?? 'run'
if (command === 'compare') {
await runCompare()
} else {
await runBench()
}