From c94d6a89758e4bfbc0729cafa16e13dd38fea9a7 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:00:47 +0100 Subject: [PATCH 01/10] lint --- benchmark/performance.js | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/benchmark/performance.js b/benchmark/performance.js index 831a57db37..6deb53378a 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -9,6 +9,7 @@ */ const Parse = require('parse/node'); +const logger = require('../lib/Logger').logger; const { performance, PerformanceObserver } = require('perf_hooks'); const { MongoClient } = require('mongodb'); @@ -274,15 +275,15 @@ async function benchmarkUserLogin() { * Run all benchmarks */ async function runBenchmarks() { - console.error('Starting Parse Server Performance Benchmarks...'); - console.error(`Iterations per benchmark: ${ITERATIONS}`); - console.error(''); + logger.error('Starting Parse Server Performance Benchmarks...'); + logger.error(`Iterations per benchmark: ${ITERATIONS}`); + logger.error(''); let server; try { // Initialize Parse Server - console.error('Initializing Parse Server...'); + logger.error('Initializing Parse Server...'); server = await initializeParseServer(); // Wait for server to be ready @@ -291,47 +292,47 @@ async function runBenchmarks() { const results = []; // Run each benchmark with database cleanup - console.error('Running Object Create benchmark...'); + logger.error('Running Object Create benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectCreate()); - console.error('Running Object Read benchmark...'); + logger.error('Running Object Read benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectRead()); - console.error('Running Object Update benchmark...'); + logger.error('Running Object Update benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectUpdate()); - console.error('Running Simple Query benchmark...'); + logger.error('Running Simple Query benchmark...'); await cleanupDatabase(); results.push(await benchmarkSimpleQuery()); - console.error('Running Batch Save benchmark...'); + logger.error('Running Batch Save benchmark...'); await cleanupDatabase(); results.push(await benchmarkBatchSave()); - console.error('Running User Signup benchmark...'); + logger.error('Running User Signup benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserSignup()); - console.error('Running User Login benchmark...'); + logger.error('Running User Login benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserLogin()); // Output results in github-action-benchmark format - console.log(JSON.stringify(results, null, 2)); + logger.log(JSON.stringify(results, null, 2)); - console.error(''); - console.error('Benchmarks completed successfully!'); - console.error(''); - console.error('Summary:'); + logger.error(''); + logger.error('Benchmarks completed successfully!'); + logger.error(''); + logger.error('Summary:'); results.forEach(result => { - console.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); + logger.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); }); } catch (error) { - console.error('Error running benchmarks:', error); + logger.error('Error running benchmarks:', error); process.exit(1); } finally { // Cleanup From ecdc0ae03dd216241e56fee39758807186f62aef Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:18:04 +0100 Subject: [PATCH 02/10] fix --- .github/workflows/ci-performance.yml | 10 ++-- benchmark/performance.js | 70 +++++++++++++++++----------- 2 files changed, 49 insertions(+), 31 deletions(-) diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml index 4ae8c80d67..67bc690e89 100644 --- a/.github/workflows/ci-performance.yml +++ b/.github/workflows/ci-performance.yml @@ -224,13 +224,13 @@ jobs: const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`; let status = '✅'; - if (change > 20) { + if (change > 50) { status = '❌ Much Slower'; hasRegression = true; - } else if (change > 10) { + } else if (change > 25) { status = '⚠️ Slower'; hasRegression = true; - } else if (change < -10) { + } else if (change < -25) { status = '🚀 Faster'; hasImprovement = true; } @@ -281,7 +281,9 @@ jobs: echo "" >> comment.md echo "" >> comment.md echo "" >> comment.md - echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md + echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-300} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md + echo "" >> comment.md + echo "> **Note:** Benchmarks run on shared CI infrastructure and may show ±20% variance between runs. Only sustained regressions >25% are flagged." >> comment.md - name: Comment PR with results if: github.event_name == 'pull_request' diff --git a/benchmark/performance.js b/benchmark/performance.js index 6deb53378a..5c8941fc40 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -18,7 +18,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_ const SERVER_URL = 'http://localhost:1337/parse'; const APP_ID = 'benchmark-app-id'; const MASTER_KEY = 'benchmark-master-key'; -const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100', 10); +const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '300', 10); // Parse Server instance let parseServer; @@ -85,10 +85,18 @@ async function cleanupDatabase() { /** * Measure average time for an async operation over multiple iterations + * Uses warmup iterations, median metric, and outlier filtering for robustness */ async function measureOperation(name, operation, iterations = ITERATIONS) { + const warmupCount = Math.floor(iterations * 0.2); // 20% warmup iterations const times = []; + // Warmup phase - stabilize JIT compilation and caches + for (let i = 0; i < warmupCount; i++) { + await operation(); + } + + // Measurement phase for (let i = 0; i < iterations; i++) { const start = performance.now(); await operation(); @@ -96,22 +104,33 @@ async function measureOperation(name, operation, iterations = ITERATIONS) { times.push(end - start); } - // Calculate statistics + // Sort times for percentile calculations times.sort((a, b) => a - b); - const sum = times.reduce((acc, val) => acc + val, 0); - const mean = sum / times.length; - const p50 = times[Math.floor(times.length * 0.5)]; - const p95 = times[Math.floor(times.length * 0.95)]; - const p99 = times[Math.floor(times.length * 0.99)]; - const min = times[0]; - const max = times[times.length - 1]; + + // Filter outliers using Interquartile Range (IQR) method + const q1Index = Math.floor(times.length * 0.25); + const q3Index = Math.floor(times.length * 0.75); + const q1 = times[q1Index]; + const q3 = times[q3Index]; + const iqr = q3 - q1; + const lowerBound = q1 - 1.5 * iqr; + const upperBound = q3 + 1.5 * iqr; + + const filtered = times.filter(t => t >= lowerBound && t <= upperBound); + + // Calculate statistics on filtered data + const median = filtered[Math.floor(filtered.length * 0.5)]; + const p95 = filtered[Math.floor(filtered.length * 0.95)]; + const p99 = filtered[Math.floor(filtered.length * 0.99)]; + const min = filtered[0]; + const max = filtered[filtered.length - 1]; return { name, - value: mean, + value: median, // Use median instead of mean for robustness unit: 'ms', range: `${min.toFixed(2)} - ${max.toFixed(2)}`, - extra: `p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms`, + extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`, }; } @@ -275,15 +294,14 @@ async function benchmarkUserLogin() { * Run all benchmarks */ async function runBenchmarks() { - logger.error('Starting Parse Server Performance Benchmarks...'); - logger.error(`Iterations per benchmark: ${ITERATIONS}`); - logger.error(''); + logger.info('Starting Parse Server Performance Benchmarks...'); + logger.info(`Iterations per benchmark: ${ITERATIONS}`); let server; try { // Initialize Parse Server - logger.error('Initializing Parse Server...'); + logger.info('Initializing Parse Server...'); server = await initializeParseServer(); // Wait for server to be ready @@ -292,43 +310,41 @@ async function runBenchmarks() { const results = []; // Run each benchmark with database cleanup - logger.error('Running Object Create benchmark...'); + logger.info('Running Object Create benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectCreate()); - logger.error('Running Object Read benchmark...'); + logger.info('Running Object Read benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectRead()); - logger.error('Running Object Update benchmark...'); + logger.info('Running Object Update benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectUpdate()); - logger.error('Running Simple Query benchmark...'); + logger.info('Running Simple Query benchmark...'); await cleanupDatabase(); results.push(await benchmarkSimpleQuery()); - logger.error('Running Batch Save benchmark...'); + logger.info('Running Batch Save benchmark...'); await cleanupDatabase(); results.push(await benchmarkBatchSave()); - logger.error('Running User Signup benchmark...'); + logger.info('Running User Signup benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserSignup()); - logger.error('Running User Login benchmark...'); + logger.info('Running User Login benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserLogin()); // Output results in github-action-benchmark format logger.log(JSON.stringify(results, null, 2)); - logger.error(''); - logger.error('Benchmarks completed successfully!'); - logger.error(''); - logger.error('Summary:'); + logger.info('Benchmarks completed successfully!'); + logger.info('Summary:'); results.forEach(result => { - logger.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); + logger.info(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); }); } catch (error) { From a2cc0517e4b5f411a5536ed474d8387517523376 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:22:02 +0100 Subject: [PATCH 03/10] log --- benchmark/performance.js | 35 +++++++++++++++++++---------------- 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/benchmark/performance.js b/benchmark/performance.js index 5c8941fc40..a5a7ee69bc 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -8,6 +8,8 @@ * Run with: npm run benchmark */ +/* eslint-disable no-console */ + const Parse = require('parse/node'); const logger = require('../lib/Logger').logger; const { performance, PerformanceObserver } = require('perf_hooks'); @@ -294,14 +296,14 @@ async function benchmarkUserLogin() { * Run all benchmarks */ async function runBenchmarks() { - logger.info('Starting Parse Server Performance Benchmarks...'); - logger.info(`Iterations per benchmark: ${ITERATIONS}`); + console.log('Starting Parse Server Performance Benchmarks...'); + console.log(`Iterations per benchmark: ${ITERATIONS}`); let server; try { // Initialize Parse Server - logger.info('Initializing Parse Server...'); + console.log('Initializing Parse Server...'); server = await initializeParseServer(); // Wait for server to be ready @@ -310,45 +312,46 @@ async function runBenchmarks() { const results = []; // Run each benchmark with database cleanup - logger.info('Running Object Create benchmark...'); + console.log('Running Object Create benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectCreate()); - logger.info('Running Object Read benchmark...'); + console.log('Running Object Read benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectRead()); - logger.info('Running Object Update benchmark...'); + console.log('Running Object Update benchmark...'); await cleanupDatabase(); results.push(await benchmarkObjectUpdate()); - logger.info('Running Simple Query benchmark...'); + console.log('Running Simple Query benchmark...'); await cleanupDatabase(); results.push(await benchmarkSimpleQuery()); - logger.info('Running Batch Save benchmark...'); + console.log('Running Batch Save benchmark...'); await cleanupDatabase(); results.push(await benchmarkBatchSave()); - logger.info('Running User Signup benchmark...'); + console.log('Running User Signup benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserSignup()); - logger.info('Running User Login benchmark...'); + console.log('Running User Login benchmark...'); await cleanupDatabase(); results.push(await benchmarkUserLogin()); - // Output results in github-action-benchmark format - logger.log(JSON.stringify(results, null, 2)); + // Output results in github-action-benchmark format (stdout) + console.log(JSON.stringify(results, null, 2)); - logger.info('Benchmarks completed successfully!'); - logger.info('Summary:'); + // Output summary to stderr for visibility + console.log('Benchmarks completed successfully!'); + console.log('Summary:'); results.forEach(result => { - logger.info(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); + console.log(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`); }); } catch (error) { - logger.error('Error running benchmarks:', error); + console.error('Error running benchmarks:', error); process.exit(1); } finally { // Cleanup From a1cc5c38f9e3f9702aa5ab3f80809a22a3c2c36d Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:24:36 +0100 Subject: [PATCH 04/10] p95 --- benchmark/performance.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/performance.js b/benchmark/performance.js index a5a7ee69bc..4b77a430fb 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -129,10 +129,10 @@ async function measureOperation(name, operation, iterations = ITERATIONS) { return { name, - value: median, // Use median instead of mean for robustness + value: p95, // Use p95 as primary metric (industry standard) unit: 'ms', range: `${min.toFixed(2)} - ${max.toFixed(2)}`, - extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`, + extra: `median: ${median.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`, }; } From 450919aecc86d6173e7edadfc0dd236a6ad9250f Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:26:13 +0100 Subject: [PATCH 05/10] fix --- benchmark/performance.js | 1 - 1 file changed, 1 deletion(-) diff --git a/benchmark/performance.js b/benchmark/performance.js index 4b77a430fb..d13d878c7d 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -11,7 +11,6 @@ /* eslint-disable no-console */ const Parse = require('parse/node'); -const logger = require('../lib/Logger').logger; const { performance, PerformanceObserver } = require('perf_hooks'); const { MongoClient } = require('mongodb'); From 4f1b7f89c2661f565862743e22eb058333f39c88 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:31:25 +0100 Subject: [PATCH 06/10] fix --- .github/workflows/ci-performance.yml | 10 +++++----- CONTRIBUTING.md | 2 +- benchmark/performance.js | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml index 67bc690e89..0fe48eb04e 100644 --- a/.github/workflows/ci-performance.yml +++ b/.github/workflows/ci-performance.yml @@ -224,13 +224,13 @@ jobs: const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`; let status = '✅'; - if (change > 50) { + if (change > 100) { status = '❌ Much Slower'; hasRegression = true; - } else if (change > 25) { + } else if (change > 50) { status = '⚠️ Slower'; hasRegression = true; - } else if (change < -25) { + } else if (change < -50) { status = '🚀 Faster'; hasImprovement = true; } @@ -281,9 +281,9 @@ jobs: echo "" >> comment.md echo "" >> comment.md echo "" >> comment.md - echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-300} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md + echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md echo "" >> comment.md - echo "> **Note:** Benchmarks run on shared CI infrastructure and may show ±20% variance between runs. Only sustained regressions >25% are flagged." >> comment.md + echo "> **Note:** Benchmarks run on shared CI infrastructure and may show ±30% variance between runs. Only regressions >50% are flagged as concerning." >> comment.md - name: Comment PR with results if: github.event_name == 'pull_request' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index f79caa4236..d3e61e637c 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per 4. **Test locally**: Run the benchmarks locally to verify they work: ```bash npm run benchmark:quick # Quick test with 10 iterations - npm run benchmark # Full test with 100 iterations + npm run benchmark # Full test with 1000 iterations ``` For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against. diff --git a/benchmark/performance.js b/benchmark/performance.js index d13d878c7d..9bdd066779 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_ const SERVER_URL = 'http://localhost:1337/parse'; const APP_ID = 'benchmark-app-id'; const MASTER_KEY = 'benchmark-master-key'; -const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '300', 10); +const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10); // Parse Server instance let parseServer; From 9fb692c4a39aed17cdefa7e7a00ea93d966ff978 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:32:40 +0100 Subject: [PATCH 07/10] median --- benchmark/performance.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/benchmark/performance.js b/benchmark/performance.js index 9bdd066779..8f40e471eb 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -128,10 +128,10 @@ async function measureOperation(name, operation, iterations = ITERATIONS) { return { name, - value: p95, // Use p95 as primary metric (industry standard) + value: median, // Use median (p50) as primary metric for stability in CI unit: 'ms', range: `${min.toFixed(2)} - ${max.toFixed(2)}`, - extra: `median: ${median.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`, + extra: `p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms, n=${filtered.length}/${times.length}`, }; } From 9e92063cf85dfb5d4f000a4f849ab2cfdc08dea9 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:47:18 +0100 Subject: [PATCH 08/10] fix --- .github/workflows/ci-performance.yml | 16 ++++++++++------ CONTRIBUTING.md | 2 +- benchmark/performance.js | 4 +++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml index 0fe48eb04e..9d79b53883 100644 --- a/.github/workflows/ci-performance.yml +++ b/.github/workflows/ci-performance.yml @@ -47,6 +47,8 @@ jobs: - name: Run baseline benchmarks id: baseline + env: + NODE_ENV: production run: | echo "Checking if benchmark script exists..." if [ ! -f "benchmark/performance.js" ]; then @@ -56,8 +58,8 @@ jobs: echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt exit 0 fi - echo "Running baseline benchmarks..." - npm run benchmark > baseline-output.txt 2>&1 || true + echo "Running baseline benchmarks with CPU affinity..." + taskset -c 0 npm run benchmark > baseline-output.txt 2>&1 || npm run benchmark > baseline-output.txt 2>&1 || true echo "Benchmark command completed with exit code: $?" echo "Output file size: $(wc -c < baseline-output.txt) bytes" echo "--- Begin baseline-output.txt ---" @@ -111,9 +113,11 @@ jobs: - name: Run PR benchmarks id: pr-bench + env: + NODE_ENV: production run: | - echo "Running PR benchmarks..." - npm run benchmark > pr-output.txt 2>&1 || true + echo "Running PR benchmarks with CPU affinity..." + taskset -c 0 npm run benchmark > pr-output.txt 2>&1 || npm run benchmark > pr-output.txt 2>&1 || true echo "Benchmark command completed with exit code: $?" echo "Output file size: $(wc -c < pr-output.txt) bytes" echo "--- Begin pr-output.txt ---" @@ -281,9 +285,9 @@ jobs: echo "" >> comment.md echo "" >> comment.md echo "" >> comment.md - echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md + echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md echo "" >> comment.md - echo "> **Note:** Benchmarks run on shared CI infrastructure and may show ±30% variance between runs. Only regressions >50% are flagged as concerning." >> comment.md + echo "> **Note:** Using 100k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md - name: Comment PR with results if: github.event_name == 'pull_request' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index d3e61e637c..b1f7958240 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per 4. **Test locally**: Run the benchmarks locally to verify they work: ```bash npm run benchmark:quick # Quick test with 10 iterations - npm run benchmark # Full test with 1000 iterations + npm run benchmark # Full test with 100,000 iterations ``` For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against. diff --git a/benchmark/performance.js b/benchmark/performance.js index 8f40e471eb..ee99197b5e 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_ const SERVER_URL = 'http://localhost:1337/parse'; const APP_ID = 'benchmark-app-id'; const MASTER_KEY = 'benchmark-master-key'; -const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10); +const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100000', 10); // Parse Server instance let parseServer; @@ -41,6 +41,8 @@ async function initializeParseServer() { serverURL: SERVER_URL, silent: true, allowClientClassCreation: true, + logLevel: 'error', // Minimal logging for performance + verbose: false, }); app.use('/parse', parseServer.app); From d7616234411d2b92e815392642e2aae62b408618 Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 01:58:05 +0100 Subject: [PATCH 09/10] fix --- .github/workflows/ci-performance.yml | 31 ++++++++++++++++++++++------ CONTRIBUTING.md | 2 +- benchmark/performance.js | 2 +- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml index 9d79b53883..438f5b1233 100644 --- a/.github/workflows/ci-performance.yml +++ b/.github/workflows/ci-performance.yml @@ -27,11 +27,31 @@ jobs: timeout-minutes: 30 steps: + - name: Checkout PR branch (for benchmark script) + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha }} + fetch-depth: 1 + + - name: Save PR benchmark script + run: | + mkdir -p /tmp/pr-benchmark + cp -r benchmark /tmp/pr-benchmark/ || echo "No benchmark directory" + cp package.json /tmp/pr-benchmark/ || true + - name: Checkout base branch uses: actions/checkout@v4 with: ref: ${{ github.base_ref }} fetch-depth: 1 + clean: true + + - name: Restore PR benchmark script + run: | + if [ -d "/tmp/pr-benchmark/benchmark" ]; then + rm -rf benchmark + cp -r /tmp/pr-benchmark/benchmark . + fi - name: Setup Node.js uses: actions/setup-node@v4 @@ -50,15 +70,14 @@ jobs: env: NODE_ENV: production run: | - echo "Checking if benchmark script exists..." + echo "Running baseline benchmarks with CPU affinity (using PR's benchmark script)..." if [ ! -f "benchmark/performance.js" ]; then - echo "⚠️ Benchmark script not found in base branch - this is expected for new features" + echo "⚠️ Benchmark script not found - this is expected for new features" echo "Skipping baseline benchmark" echo '[]' > baseline.json - echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt + echo "Baseline: N/A (no benchmark script)" > baseline-output.txt exit 0 fi - echo "Running baseline benchmarks with CPU affinity..." taskset -c 0 npm run benchmark > baseline-output.txt 2>&1 || npm run benchmark > baseline-output.txt 2>&1 || true echo "Benchmark command completed with exit code: $?" echo "Output file size: $(wc -c < baseline-output.txt) bytes" @@ -285,9 +304,9 @@ jobs: echo "" >> comment.md echo "" >> comment.md echo "" >> comment.md - echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md + echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md echo "" >> comment.md - echo "> **Note:** Using 100k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md + echo "> **Note:** Using 1k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md - name: Comment PR with results if: github.event_name == 'pull_request' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index b1f7958240..8e3db29efa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per 4. **Test locally**: Run the benchmarks locally to verify they work: ```bash npm run benchmark:quick # Quick test with 10 iterations - npm run benchmark # Full test with 100,000 iterations + npm run benchmark # Full test with 1,000 iterations ``` For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against. diff --git a/benchmark/performance.js b/benchmark/performance.js index ee99197b5e..0c2dffc211 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_ const SERVER_URL = 'http://localhost:1337/parse'; const APP_ID = 'benchmark-app-id'; const MASTER_KEY = 'benchmark-master-key'; -const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100000', 10); +const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10); // Parse Server instance let parseServer; From 28cf8f3b7e9b73e7997abbf2850ad768bee82a7b Mon Sep 17 00:00:00 2001 From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com> Date: Sun, 9 Nov 2025 02:01:22 +0100 Subject: [PATCH 10/10] fix --- .github/workflows/ci-performance.yml | 4 ++-- CONTRIBUTING.md | 2 +- benchmark/performance.js | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml index 438f5b1233..3fd2200d02 100644 --- a/.github/workflows/ci-performance.yml +++ b/.github/workflows/ci-performance.yml @@ -304,9 +304,9 @@ jobs: echo "" >> comment.md echo "" >> comment.md echo "" >> comment.md - echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-1000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md + echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-10000} iterations per test on Node.js ${{ env.NODE_VERSION }} (production mode, CPU pinned)*" >> comment.md echo "" >> comment.md - echo "> **Note:** Using 1k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md + echo "> **Note:** Using 10k iterations with CPU affinity for measurement stability. Thresholds: ⚠️ >50%, ❌ >100%." >> comment.md - name: Comment PR with results if: github.event_name == 'pull_request' diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8e3db29efa..30050f87a6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -341,7 +341,7 @@ Performance benchmarks are located in [`benchmark/performance.js`](benchmark/per 4. **Test locally**: Run the benchmarks locally to verify they work: ```bash npm run benchmark:quick # Quick test with 10 iterations - npm run benchmark # Full test with 1,000 iterations + npm run benchmark # Full test with 10,000 iterations ``` For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against. diff --git a/benchmark/performance.js b/benchmark/performance.js index 0c2dffc211..7021ed35b3 100644 --- a/benchmark/performance.js +++ b/benchmark/performance.js @@ -19,7 +19,7 @@ const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_ const SERVER_URL = 'http://localhost:1337/parse'; const APP_ID = 'benchmark-app-id'; const MASTER_KEY = 'benchmark-master-key'; -const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '1000', 10); +const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '10000', 10); // Parse Server instance let parseServer; @@ -363,7 +363,7 @@ async function runBenchmarks() { server.close(); } // Give some time for cleanup - setTimeout(() => process.exit(0), 1000); + setTimeout(() => process.exit(0), 10000); } }