From 7105e2a892b2619819f1556aebf271b952c55266 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:38:04 +0100
Subject: [PATCH 01/12] feat
---
.github/workflows/performance.yml | 278 ++++++++++++++++++++++++
benchmark/performance.js | 342 ++++++++++++++++++++++++++++++
package.json | 5 +-
3 files changed, 624 insertions(+), 1 deletion(-)
create mode 100644 .github/workflows/performance.yml
create mode 100644 benchmark/performance.js
diff --git a/.github/workflows/performance.yml b/.github/workflows/performance.yml
new file mode 100644
index 0000000000..257fd061d5
--- /dev/null
+++ b/.github/workflows/performance.yml
@@ -0,0 +1,278 @@
+name: Performance Impact Check
+on:
+ pull_request:
+ branches:
+ - alpha
+ - beta
+ - release
+ - 'release-[0-9]+.x.x'
+ - next-major
+ paths-ignore:
+ - '**.md'
+ - 'docs/**'
+ - '.github/**'
+ - '!.github/workflows/performance.yml'
+
+env:
+ NODE_VERSION: 24.11.0
+ MONGODB_VERSION: 8.0.4
+
+jobs:
+ performance-check:
+ name: Run Performance Benchmarks
+ runs-on: ubuntu-latest
+ timeout-minutes: 30
+ permissions:
+ contents: write
+ pull-requests: write
+ deployments: write
+
+ steps:
+ - name: Checkout base branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.base_ref }}
+ fetch-depth: 1
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: 'npm'
+
+ - name: Install dependencies (base)
+ run: npm ci
+
+ - name: Build Parse Server (base)
+ run: npm run build
+
+ - name: Run baseline benchmarks
+ id: baseline
+ run: |
+ npm run benchmark > baseline-output.txt 2>&1 || true
+ # Extract JSON from output (last valid JSON block)
+ grep -A 1000 '^\[' baseline-output.txt | grep -B 1000 '^\]' | head -n -0 > baseline.json || echo '[]' > baseline.json
+ echo "Baseline benchmark results:"
+ cat baseline.json
+ continue-on-error: true
+
+ - name: Upload baseline results
+ uses: actions/upload-artifact@v4
+ with:
+ name: baseline-benchmark
+ path: |
+ baseline.json
+ baseline-output.txt
+ retention-days: 7
+
+ - name: Checkout PR branch
+ uses: actions/checkout@v4
+ with:
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: 1
+ clean: true
+
+ - name: Setup Node.js (PR)
+ uses: actions/setup-node@v4
+ with:
+ node-version: ${{ env.NODE_VERSION }}
+ cache: 'npm'
+
+ - name: Install dependencies (PR)
+ run: npm ci
+
+ - name: Build Parse Server (PR)
+ run: npm run build
+
+ - name: Run PR benchmarks
+ id: pr-bench
+ run: |
+ npm run benchmark > pr-output.txt 2>&1 || true
+ # Extract JSON from output (last valid JSON block)
+ grep -A 1000 '^\[' pr-output.txt | grep -B 1000 '^\]' | head -n -0 > pr.json || echo '[]' > pr.json
+ echo "PR benchmark results:"
+ cat pr.json
+ continue-on-error: true
+
+ - name: Upload PR results
+ uses: actions/upload-artifact@v4
+ with:
+ name: pr-benchmark
+ path: |
+ pr.json
+ pr-output.txt
+ retention-days: 7
+
+ - name: Store benchmark result (baseline)
+ uses: benchmark-action/github-action-benchmark@v1
+ if: github.event_name == 'pull_request'
+ with:
+ name: Parse Server Performance (baseline)
+ tool: 'customBiggerIsBetter'
+ output-file-path: baseline.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: false
+ save-data-file: false
+ comment-on-alert: false
+
+ - name: Store benchmark result (PR)
+ uses: benchmark-action/github-action-benchmark@v1
+ if: github.event_name == 'pull_request'
+ with:
+ name: Parse Server Performance
+ tool: 'customBiggerIsBetter'
+ output-file-path: pr.json
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ auto-push: false
+ save-data-file: false
+ alert-threshold: '110%'
+ comment-on-alert: true
+ fail-on-alert: false
+ alert-comment-cc-users: '@parse-community/maintainers'
+ summary-always: true
+
+ - name: Compare benchmark results
+ id: compare
+ run: |
+ node -e "
+ const fs = require('fs');
+
+ let baseline, pr;
+ try {
+ baseline = JSON.parse(fs.readFileSync('baseline.json', 'utf8'));
+ pr = JSON.parse(fs.readFileSync('pr.json', 'utf8'));
+ } catch (e) {
+ console.log('⚠️ Could not parse benchmark results');
+ process.exit(0);
+ }
+
+ if (!Array.isArray(baseline) || !Array.isArray(pr) || baseline.length === 0 || pr.length === 0) {
+ console.log('⚠️ Benchmark results are empty or invalid');
+ process.exit(0);
+ }
+
+ console.log('# Performance Comparison\n');
+ console.log('| Benchmark | Baseline | PR | Change | Status |');
+ console.log('|-----------|----------|----|---------| ------ |');
+
+ let hasRegression = false;
+ let hasImprovement = false;
+
+ baseline.forEach(baseResult => {
+ const prResult = pr.find(p => p.name === baseResult.name);
+ if (!prResult) {
+ console.log(\`| \${baseResult.name} | \${baseResult.value.toFixed(2)} ms | N/A | - | ⚠️ Missing |\`);
+ return;
+ }
+
+ const baseValue = parseFloat(baseResult.value);
+ const prValue = parseFloat(prResult.value);
+ const change = ((prValue - baseValue) / baseValue * 100);
+ const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
+
+ let status = '✅';
+ if (change > 10) {
+ status = '⚠️ Slower';
+ hasRegression = true;
+ } else if (change > 20) {
+ status = '❌ Much Slower';
+ hasRegression = true;
+ } else if (change < -10) {
+ status = '🚀 Faster';
+ hasImprovement = true;
+ }
+
+ console.log(\`| \${baseResult.name} | \${baseValue.toFixed(2)} ms | \${prValue.toFixed(2)} ms | \${changeStr} | \${status} |\`);
+ });
+
+ console.log('');
+ if (hasRegression) {
+ console.log('⚠️ **Performance regressions detected.** Please review the changes.');
+ } else if (hasImprovement) {
+ console.log('🚀 **Performance improvements detected!** Great work!');
+ } else {
+ console.log('✅ **No significant performance changes.**');
+ }
+ " | tee comparison.md
+
+ - name: Upload comparison
+ uses: actions/upload-artifact@v4
+ with:
+ name: benchmark-comparison
+ path: comparison.md
+ retention-days: 30
+
+ - name: Comment PR with results
+ if: github.event_name == 'pull_request'
+ uses: actions/github-script@v7
+ continue-on-error: true
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ script: |
+ const fs = require('fs');
+
+ let comparisonMd = '';
+ try {
+ comparisonMd = fs.readFileSync('comparison.md', 'utf8');
+ } catch (e) {
+ comparisonMd = '⚠️ Could not generate performance comparison.';
+ }
+
+ const body = `## Performance Impact Report
+
+ ${comparisonMd}
+
+
+ 📊 View detailed results
+
+ ### Baseline Results
+ \`\`\`json
+ ${fs.readFileSync('baseline.json', 'utf8')}
+ \`\`\`
+
+ ### PR Results
+ \`\`\`json
+ ${fs.readFileSync('pr.json', 'utf8')}
+ \`\`\`
+
+
+
+ *Benchmarks ran with ${process.env.BENCHMARK_ITERATIONS || '100'} iterations per test on Node.js ${process.env.NODE_VERSION}*
+ `;
+
+ // Find existing performance comment
+ const comments = await github.rest.issues.listComments({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ });
+
+ const existingComment = comments.data.find(comment =>
+ comment.user.login === 'github-actions[bot]' &&
+ comment.body.includes('Performance Impact Report')
+ );
+
+ if (existingComment) {
+ await github.rest.issues.updateComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ comment_id: existingComment.id,
+ body: body,
+ });
+ } else {
+ await github.rest.issues.createComment({
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ issue_number: context.issue.number,
+ body: body,
+ });
+ }
+
+ - name: Generate job summary
+ if: always()
+ run: |
+ if [ -f comparison.md ]; then
+ cat comparison.md >> $GITHUB_STEP_SUMMARY
+ else
+ echo "⚠️ Benchmark comparison not available" >> $GITHUB_STEP_SUMMARY
+ fi
diff --git a/benchmark/performance.js b/benchmark/performance.js
new file mode 100644
index 0000000000..5d472e1501
--- /dev/null
+++ b/benchmark/performance.js
@@ -0,0 +1,342 @@
+/**
+ * Performance Benchmark Suite for Parse Server
+ *
+ * This suite measures the performance of critical Parse Server operations
+ * using the Node.js Performance API. Results are output in a format
+ * compatible with github-action-benchmark.
+ *
+ * Run with: npm run benchmark
+ */
+
+const Parse = require('parse/node');
+const { performance, PerformanceObserver } = require('perf_hooks');
+const { MongoClient } = require('mongodb');
+
+// Configuration
+const MONGODB_URI = process.env.MONGODB_URI || 'mongodb://localhost:27017/parse_benchmark_test';
+const SERVER_URL = 'http://localhost:1337/parse';
+const APP_ID = 'benchmark-app-id';
+const MASTER_KEY = 'benchmark-master-key';
+const ITERATIONS = parseInt(process.env.BENCHMARK_ITERATIONS || '100', 10);
+
+// Parse Server instance
+let parseServer;
+let mongoClient;
+
+/**
+ * Initialize Parse Server for benchmarking
+ */
+async function initializeParseServer() {
+ const express = require('express');
+ const { default: ParseServer } = require('../lib/index.js');
+
+ const app = express();
+
+ parseServer = new ParseServer({
+ databaseURI: MONGODB_URI,
+ appId: APP_ID,
+ masterKey: MASTER_KEY,
+ serverURL: SERVER_URL,
+ silent: true,
+ allowClientClassCreation: true,
+ });
+
+ app.use('/parse', parseServer.app);
+
+ return new Promise((resolve) => {
+ const server = app.listen(1337, () => {
+ Parse.initialize(APP_ID);
+ Parse.masterKey = MASTER_KEY;
+ Parse.serverURL = SERVER_URL;
+ resolve(server);
+ });
+ });
+}
+
+/**
+ * Clean up database between benchmarks
+ */
+async function cleanupDatabase() {
+ if (!mongoClient) {
+ mongoClient = await MongoClient.connect(MONGODB_URI);
+ }
+ const db = mongoClient.db();
+ const collections = await db.listCollections().toArray();
+
+ for (const collection of collections) {
+ if (!collection.name.startsWith('system.')) {
+ await db.collection(collection.name).deleteMany({});
+ }
+ }
+}
+
+/**
+ * Measure average time for an async operation over multiple iterations
+ */
+async function measureOperation(name, operation, iterations = ITERATIONS) {
+ const times = [];
+
+ for (let i = 0; i < iterations; i++) {
+ const start = performance.now();
+ await operation();
+ const end = performance.now();
+ times.push(end - start);
+ }
+
+ // Calculate statistics
+ times.sort((a, b) => a - b);
+ const sum = times.reduce((acc, val) => acc + val, 0);
+ const mean = sum / times.length;
+ const p50 = times[Math.floor(times.length * 0.5)];
+ const p95 = times[Math.floor(times.length * 0.95)];
+ const p99 = times[Math.floor(times.length * 0.99)];
+ const min = times[0];
+ const max = times[times.length - 1];
+
+ return {
+ name,
+ value: mean,
+ unit: 'ms',
+ range: `${min.toFixed(2)} - ${max.toFixed(2)}`,
+ extra: `p50: ${p50.toFixed(2)}ms, p95: ${p95.toFixed(2)}ms, p99: ${p99.toFixed(2)}ms`,
+ };
+}
+
+/**
+ * Benchmark: Object Create
+ */
+async function benchmarkObjectCreate() {
+ let counter = 0;
+
+ return measureOperation('Object Create', async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const obj = new TestObject();
+ obj.set('testField', `test-value-${counter++}`);
+ obj.set('number', counter);
+ obj.set('boolean', true);
+ await obj.save();
+ });
+}
+
+/**
+ * Benchmark: Object Read (by ID)
+ */
+async function benchmarkObjectRead() {
+ // Setup: Create test objects
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < ITERATIONS; i++) {
+ const obj = new TestObject();
+ obj.set('testField', `read-test-${i}`);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Object Read', async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ await query.get(objects[counter++ % objects.length].id);
+ });
+}
+
+/**
+ * Benchmark: Object Update
+ */
+async function benchmarkObjectUpdate() {
+ // Setup: Create test objects
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < ITERATIONS; i++) {
+ const obj = new TestObject();
+ obj.set('testField', `update-test-${i}`);
+ obj.set('counter', 0);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Object Update', async () => {
+ const obj = objects[counter++ % objects.length];
+ obj.increment('counter');
+ obj.set('lastUpdated', new Date());
+ await obj.save();
+ });
+}
+
+/**
+ * Benchmark: Simple Query
+ */
+async function benchmarkSimpleQuery() {
+ // Setup: Create test data
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < 100; i++) {
+ const obj = new TestObject();
+ obj.set('category', i % 10);
+ obj.set('value', i);
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+
+ let counter = 0;
+
+ return measureOperation('Simple Query', async () => {
+ const query = new Parse.Query('BenchmarkTest');
+ query.equalTo('category', counter++ % 10);
+ await query.find();
+ });
+}
+
+/**
+ * Benchmark: Batch Save (saveAll)
+ */
+async function benchmarkBatchSave() {
+ const BATCH_SIZE = 10;
+
+ return measureOperation('Batch Save (10 objects)', async () => {
+ const TestObject = Parse.Object.extend('BenchmarkTest');
+ const objects = [];
+
+ for (let i = 0; i < BATCH_SIZE; i++) {
+ const obj = new TestObject();
+ obj.set('batchField', `batch-${i}`);
+ obj.set('timestamp', new Date());
+ objects.push(obj);
+ }
+
+ await Parse.Object.saveAll(objects);
+ }, Math.floor(ITERATIONS / BATCH_SIZE)); // Fewer iterations for batch operations
+}
+
+/**
+ * Benchmark: User Signup
+ */
+async function benchmarkUserSignup() {
+ let counter = 0;
+
+ return measureOperation('User Signup', async () => {
+ const user = new Parse.User();
+ user.set('username', `benchmark_user_${Date.now()}_${counter}`);
+ user.set('password', 'benchmark_password');
+ user.set('email', `benchmark${counter}@example.com`);
+ counter++;
+ await user.signUp();
+ }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+}
+
+/**
+ * Benchmark: User Login
+ */
+async function benchmarkUserLogin() {
+ // Setup: Create test users
+ const users = [];
+
+ for (let i = 0; i < 10; i++) {
+ const user = new Parse.User();
+ user.set('username', `benchmark_login_user_${i}`);
+ user.set('password', 'benchmark_password');
+ user.set('email', `login${i}@example.com`);
+ await user.signUp();
+ users.push({ username: user.get('username'), password: 'benchmark_password' });
+ await Parse.User.logOut();
+ }
+
+ let counter = 0;
+
+ return measureOperation('User Login', async () => {
+ const userCreds = users[counter++ % users.length];
+ await Parse.User.logIn(userCreds.username, userCreds.password);
+ await Parse.User.logOut();
+ }, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
+}
+
+/**
+ * Run all benchmarks
+ */
+async function runBenchmarks() {
+ console.error('Starting Parse Server Performance Benchmarks...');
+ console.error(`Iterations per benchmark: ${ITERATIONS}`);
+ console.error('');
+
+ let server;
+
+ try {
+ // Initialize Parse Server
+ console.error('Initializing Parse Server...');
+ server = await initializeParseServer();
+
+ // Wait for server to be ready
+ await new Promise(resolve => setTimeout(resolve, 2000));
+
+ const results = [];
+
+ // Run each benchmark with database cleanup
+ console.error('Running Object Create benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectCreate());
+
+ console.error('Running Object Read benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectRead());
+
+ console.error('Running Object Update benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkObjectUpdate());
+
+ console.error('Running Simple Query benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkSimpleQuery());
+
+ console.error('Running Batch Save benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkBatchSave());
+
+ console.error('Running User Signup benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkUserSignup());
+
+ console.error('Running User Login benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkUserLogin());
+
+ // Output results in github-action-benchmark format
+ console.log(JSON.stringify(results, null, 2));
+
+ console.error('');
+ console.error('Benchmarks completed successfully!');
+ console.error('');
+ console.error('Summary:');
+ results.forEach(result => {
+ console.error(` ${result.name}: ${result.value.toFixed(2)} ${result.unit} (${result.extra})`);
+ });
+
+ } catch (error) {
+ console.error('Error running benchmarks:', error);
+ process.exit(1);
+ } finally {
+ // Cleanup
+ if (mongoClient) {
+ await mongoClient.close();
+ }
+ if (server) {
+ server.close();
+ }
+ // Give some time for cleanup
+ setTimeout(() => process.exit(0), 1000);
+ }
+}
+
+// Run benchmarks if executed directly
+if (require.main === module) {
+ runBenchmarks();
+}
+
+module.exports = { runBenchmarks };
diff --git a/package.json b/package.json
index add0d15339..fe043e8ee4 100644
--- a/package.json
+++ b/package.json
@@ -138,7 +138,10 @@
"prettier": "prettier --write {src,spec}/{**/*,*}.js",
"prepare": "npm run build",
"postinstall": "node -p 'require(\"./postinstall.js\")()'",
- "madge:circular": "node_modules/.bin/madge ./src --circular"
+ "madge:circular": "node_modules/.bin/madge ./src --circular",
+ "benchmark": "cross-env MONGODB_VERSION=8.0.4 MONGODB_TOPOLOGY=standalone mongodb-runner exec -t standalone --version 8.0.4 -- --port 27017 -- npm run benchmark:only",
+ "benchmark:only": "node benchmark/performance.js",
+ "benchmark:quick": "cross-env BENCHMARK_ITERATIONS=10 npm run benchmark:only"
},
"types": "types/index.d.ts",
"engines": {
From 2595892bd9634a987f560277a80d36a0170add23 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:39:44 +0100
Subject: [PATCH 02/12] naming
---
.github/workflows/{performance.yml => ci-performance.yml} | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
rename .github/workflows/{performance.yml => ci-performance.yml} (99%)
diff --git a/.github/workflows/performance.yml b/.github/workflows/ci-performance.yml
similarity index 99%
rename from .github/workflows/performance.yml
rename to .github/workflows/ci-performance.yml
index 257fd061d5..5ee3a8bb1b 100644
--- a/.github/workflows/performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -1,4 +1,4 @@
-name: Performance Impact Check
+name: ci-performance
on:
pull_request:
branches:
From b05494cfc8893a5f84db475cb874486f4b9d5830 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:45:05 +0100
Subject: [PATCH 03/12] fix
---
.github/workflows/ci-performance.yml | 37 +++++++++++++++-------------
1 file changed, 20 insertions(+), 17 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 5ee3a8bb1b..82fc1d9df5 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -50,8 +50,8 @@ jobs:
id: baseline
run: |
npm run benchmark > baseline-output.txt 2>&1 || true
- # Extract JSON from output (last valid JSON block)
- grep -A 1000 '^\[' baseline-output.txt | grep -B 1000 '^\]' | head -n -0 > baseline.json || echo '[]' > baseline.json
+ # Extract JSON from output (everything between first [ and last ])
+ sed -n '/^\[/,/^\]/p' baseline-output.txt > baseline.json || echo '[]' > baseline.json
echo "Baseline benchmark results:"
cat baseline.json
continue-on-error: true
@@ -88,8 +88,8 @@ jobs:
id: pr-bench
run: |
npm run benchmark > pr-output.txt 2>&1 || true
- # Extract JSON from output (last valid JSON block)
- grep -A 1000 '^\[' pr-output.txt | grep -B 1000 '^\]' | head -n -0 > pr.json || echo '[]' > pr.json
+ # Extract JSON from output (everything between first [ and last ])
+ sed -n '/^\[/,/^\]/p' pr-output.txt > pr.json || echo '[]' > pr.json
echo "PR benchmark results:"
cat pr.json
continue-on-error: true
@@ -103,24 +103,27 @@ jobs:
pr-output.txt
retention-days: 7
- - name: Store benchmark result (baseline)
- uses: benchmark-action/github-action-benchmark@v1
- if: github.event_name == 'pull_request'
- with:
- name: Parse Server Performance (baseline)
- tool: 'customBiggerIsBetter'
- output-file-path: baseline.json
- github-token: ${{ secrets.GITHUB_TOKEN }}
- auto-push: false
- save-data-file: false
- comment-on-alert: false
+ - name: Verify benchmark files exist
+ run: |
+ echo "Checking for benchmark result files..."
+ if [ ! -f baseline.json ] || [ ! -s baseline.json ]; then
+ echo "⚠️ baseline.json is missing or empty, creating empty array"
+ echo '[]' > baseline.json
+ fi
+ if [ ! -f pr.json ] || [ ! -s pr.json ]; then
+ echo "⚠️ pr.json is missing or empty, creating empty array"
+ echo '[]' > pr.json
+ fi
+ echo "baseline.json size: $(wc -c < baseline.json) bytes"
+ echo "pr.json size: $(wc -c < pr.json) bytes"
- name: Store benchmark result (PR)
uses: benchmark-action/github-action-benchmark@v1
- if: github.event_name == 'pull_request'
+ if: github.event_name == 'pull_request' && hashFiles('pr.json') != ''
+ continue-on-error: true
with:
name: Parse Server Performance
- tool: 'customBiggerIsBetter'
+ tool: 'customSmallerIsBetter'
output-file-path: pr.json
github-token: ${{ secrets.GITHUB_TOKEN }}
auto-push: false
From 66422cf31c8f8db312155236e58c3d79e5e988c6 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:46:15 +0100
Subject: [PATCH 04/12] name
---
.github/workflows/ci-performance.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 82fc1d9df5..8988adf10c 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -19,7 +19,7 @@ env:
jobs:
performance-check:
- name: Run Performance Benchmarks
+ name: Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
From ae8d3b8deb96b5ee6f24c065f09c31273fd9e8a1 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:48:57 +0100
Subject: [PATCH 05/12] Update ci-performance.yml
---
.github/workflows/ci-performance.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 8988adf10c..4113d19410 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -1,6 +1,6 @@
name: ci-performance
on:
- pull_request:
+ pull_request_target:
branches:
- alpha
- beta
@@ -11,7 +11,7 @@ on:
- '**.md'
- 'docs/**'
- '.github/**'
- - '!.github/workflows/performance.yml'
+ - '!.github/workflows/ci-performance.yml'
env:
NODE_VERSION: 24.11.0
@@ -23,9 +23,9 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 30
permissions:
- contents: write
+ contents: read
pull-requests: write
- deployments: write
+ issues: write
steps:
- name: Checkout base branch
From 5a16b59455e8e73c8f480421a9474e1ac3c173d6 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:51:34 +0100
Subject: [PATCH 06/12] Update ci-performance.yml
---
.github/workflows/ci-performance.yml | 2 --
1 file changed, 2 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 4113d19410..fcf044f5fd 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -10,8 +10,6 @@ on:
paths-ignore:
- '**.md'
- 'docs/**'
- - '.github/**'
- - '!.github/workflows/ci-performance.yml'
env:
NODE_VERSION: 24.11.0
From e64f29e21896d2bdebb80dbc85c3c709946b370d Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 20:53:32 +0100
Subject: [PATCH 07/12] Update ci-performance.yml
---
.github/workflows/ci-performance.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index fcf044f5fd..13a9f1e5ac 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -1,6 +1,6 @@
name: ci-performance
on:
- pull_request_target:
+ pull_request:
branches:
- alpha
- beta
From 0479b882fbb1f900bacfa80724f7700ea53d3de3 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:04:56 +0100
Subject: [PATCH 08/12] fix
---
.github/workflows/ci-performance.yml | 102 ++++++++++-----------------
1 file changed, 37 insertions(+), 65 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 13a9f1e5ac..fc7a05a396 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -15,15 +15,16 @@ env:
NODE_VERSION: 24.11.0
MONGODB_VERSION: 8.0.4
+permissions:
+ contents: read
+ pull-requests: write
+ issues: write
+
jobs:
performance-check:
name: Benchmarks
runs-on: ubuntu-latest
timeout-minutes: 30
- permissions:
- contents: read
- pull-requests: write
- issues: write
steps:
- name: Checkout base branch
@@ -203,71 +204,42 @@ jobs:
path: comparison.md
retention-days: 30
+ - name: Prepare comment body
+ if: github.event_name == 'pull_request'
+ run: |
+ echo "## Performance Impact Report" > comment.md
+ echo "" >> comment.md
+ if [ -f comparison.md ]; then
+ cat comparison.md >> comment.md
+ else
+ echo "⚠️ Could not generate performance comparison." >> comment.md
+ fi
+ echo "" >> comment.md
+ echo "" >> comment.md
+ echo "📊 View detailed results
" >> comment.md
+ echo "" >> comment.md
+ echo "### Baseline Results" >> comment.md
+ echo "\`\`\`json" >> comment.md
+ cat baseline.json >> comment.md
+ echo "\`\`\`" >> comment.md
+ echo "" >> comment.md
+ echo "### PR Results" >> comment.md
+ echo "\`\`\`json" >> comment.md
+ cat pr.json >> comment.md
+ echo "\`\`\`" >> comment.md
+ echo "" >> comment.md
+ echo " " >> comment.md
+ echo "" >> comment.md
+ echo "*Benchmarks ran with ${BENCHMARK_ITERATIONS:-100} iterations per test on Node.js ${{ env.NODE_VERSION }}*" >> comment.md
+
- name: Comment PR with results
if: github.event_name == 'pull_request'
- uses: actions/github-script@v7
+ uses: thollander/actions-comment-pull-request@v2
continue-on-error: true
with:
- github-token: ${{ secrets.GITHUB_TOKEN }}
- script: |
- const fs = require('fs');
-
- let comparisonMd = '';
- try {
- comparisonMd = fs.readFileSync('comparison.md', 'utf8');
- } catch (e) {
- comparisonMd = '⚠️ Could not generate performance comparison.';
- }
-
- const body = `## Performance Impact Report
-
- ${comparisonMd}
-
-
- 📊 View detailed results
-
- ### Baseline Results
- \`\`\`json
- ${fs.readFileSync('baseline.json', 'utf8')}
- \`\`\`
-
- ### PR Results
- \`\`\`json
- ${fs.readFileSync('pr.json', 'utf8')}
- \`\`\`
-
-
-
- *Benchmarks ran with ${process.env.BENCHMARK_ITERATIONS || '100'} iterations per test on Node.js ${process.env.NODE_VERSION}*
- `;
-
- // Find existing performance comment
- const comments = await github.rest.issues.listComments({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: context.issue.number,
- });
-
- const existingComment = comments.data.find(comment =>
- comment.user.login === 'github-actions[bot]' &&
- comment.body.includes('Performance Impact Report')
- );
-
- if (existingComment) {
- await github.rest.issues.updateComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- comment_id: existingComment.id,
- body: body,
- });
- } else {
- await github.rest.issues.createComment({
- owner: context.repo.owner,
- repo: context.repo.repo,
- issue_number: context.issue.number,
- body: body,
- });
- }
+ filePath: comment.md
+ comment_tag: performance-benchmark
+ mode: recreate
- name: Generate job summary
if: always()
From a3143669146e898f256122cc91f81612789c6bb9 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:09:31 +0100
Subject: [PATCH 09/12] Update ci-performance.yml
---
.github/workflows/ci-performance.yml | 44 ++++++++++++++++++++++++++--
1 file changed, 42 insertions(+), 2 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index fc7a05a396..3cfd42e29b 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -48,9 +48,24 @@ jobs:
- name: Run baseline benchmarks
id: baseline
run: |
+ echo "Checking if benchmark script exists..."
+ if [ ! -f "benchmark/performance.js" ]; then
+ echo "⚠️ Benchmark script not found in base branch - this is expected for new features"
+ echo "Skipping baseline benchmark"
+ echo '[]' > baseline.json
+ echo "Baseline: N/A (benchmark script not in base branch)" > baseline-output.txt
+ exit 0
+ fi
+ echo "Running baseline benchmarks..."
npm run benchmark > baseline-output.txt 2>&1 || true
+ echo "Benchmark command completed with exit code: $?"
+ echo "Output file size: $(wc -c < baseline-output.txt) bytes"
+ echo "--- Begin baseline-output.txt ---"
+ cat baseline-output.txt
+ echo "--- End baseline-output.txt ---"
# Extract JSON from output (everything between first [ and last ])
sed -n '/^\[/,/^\]/p' baseline-output.txt > baseline.json || echo '[]' > baseline.json
+ echo "Extracted JSON size: $(wc -c < baseline.json) bytes"
echo "Baseline benchmark results:"
cat baseline.json
continue-on-error: true
@@ -86,9 +101,16 @@ jobs:
- name: Run PR benchmarks
id: pr-bench
run: |
+ echo "Running PR benchmarks..."
npm run benchmark > pr-output.txt 2>&1 || true
+ echo "Benchmark command completed with exit code: $?"
+ echo "Output file size: $(wc -c < pr-output.txt) bytes"
+ echo "--- Begin pr-output.txt ---"
+ cat pr-output.txt
+ echo "--- End pr-output.txt ---"
# Extract JSON from output (everything between first [ and last ])
sed -n '/^\[/,/^\]/p' pr-output.txt > pr.json || echo '[]' > pr.json
+ echo "Extracted JSON size: $(wc -c < pr.json) bytes"
echo "PR benchmark results:"
cat pr.json
continue-on-error: true
@@ -148,8 +170,26 @@ jobs:
process.exit(0);
}
- if (!Array.isArray(baseline) || !Array.isArray(pr) || baseline.length === 0 || pr.length === 0) {
- console.log('⚠️ Benchmark results are empty or invalid');
+ // Handle case where baseline doesn't exist (new feature)
+ if (!Array.isArray(baseline) || baseline.length === 0) {
+ if (!Array.isArray(pr) || pr.length === 0) {
+ console.log('⚠️ Benchmark results are empty or invalid');
+ process.exit(0);
+ }
+ console.log('# Performance Benchmark Results\n');
+ console.log('> ℹ️ Baseline not available - this appears to be a new feature\n');
+ console.log('| Benchmark | Value | Details |');
+ console.log('|-----------|-------|---------|');
+ pr.forEach(result => {
+ console.log(\`| \${result.name} | \${result.value.toFixed(2)} ms | \${result.extra} |\`);
+ });
+ console.log('');
+ console.log('✅ **New benchmarks established for this feature.**');
+ process.exit(0);
+ }
+
+ if (!Array.isArray(pr) || pr.length === 0) {
+ console.log('⚠️ PR benchmark results are empty or invalid');
process.exit(0);
}
From 3233bc13bc375fe677e861a891fd7664f1cc2be8 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:15:32 +0100
Subject: [PATCH 10/12] fix
---
benchmark/performance.js | 34 +++++++++++++++++++++++-----------
1 file changed, 23 insertions(+), 11 deletions(-)
diff --git a/benchmark/performance.js b/benchmark/performance.js
index 5d472e1501..831a57db37 100644
--- a/benchmark/performance.js
+++ b/benchmark/performance.js
@@ -43,13 +43,21 @@ async function initializeParseServer() {
app.use('/parse', parseServer.app);
- return new Promise((resolve) => {
- const server = app.listen(1337, () => {
+ return new Promise((resolve, reject) => {
+ const server = app.listen(1337, (err) => {
+ if (err) {
+ reject(new Error(`Failed to start server: ${err.message}`));
+ return;
+ }
Parse.initialize(APP_ID);
Parse.masterKey = MASTER_KEY;
Parse.serverURL = SERVER_URL;
resolve(server);
});
+
+ server.on('error', (err) => {
+ reject(new Error(`Server error: ${err.message}`));
+ });
});
}
@@ -57,16 +65,20 @@ async function initializeParseServer() {
* Clean up database between benchmarks
*/
async function cleanupDatabase() {
- if (!mongoClient) {
- mongoClient = await MongoClient.connect(MONGODB_URI);
- }
- const db = mongoClient.db();
- const collections = await db.listCollections().toArray();
+ try {
+ if (!mongoClient) {
+ mongoClient = await MongoClient.connect(MONGODB_URI);
+ }
+ const db = mongoClient.db();
+ const collections = await db.listCollections().toArray();
- for (const collection of collections) {
- if (!collection.name.startsWith('system.')) {
- await db.collection(collection.name).deleteMany({});
+ for (const collection of collections) {
+ if (!collection.name.startsWith('system.')) {
+ await db.collection(collection.name).deleteMany({});
+ }
}
+ } catch (error) {
+ throw new Error(`Failed to cleanup database: ${error.message}`);
}
}
@@ -223,11 +235,11 @@ async function benchmarkUserSignup() {
let counter = 0;
return measureOperation('User Signup', async () => {
+ counter++;
const user = new Parse.User();
user.set('username', `benchmark_user_${Date.now()}_${counter}`);
user.set('password', 'benchmark_password');
user.set('email', `benchmark${counter}@example.com`);
- counter++;
await user.signUp();
}, Math.floor(ITERATIONS / 10)); // Fewer iterations for user operations
}
From 6a274651fbe991767e967685deea1a9d63987ad3 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:18:22 +0100
Subject: [PATCH 11/12] Update CONTRIBUTING.md
---
CONTRIBUTING.md | 58 ++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 57 insertions(+), 1 deletion(-)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 01c88df10c..f79caa4236 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -21,9 +21,13 @@
- [Good to Know](#good-to-know)
- [Troubleshooting](#troubleshooting)
- [Please Do's](#please-dos)
- - [TypeScript Tests](#typescript-tests)
+ - [TypeScript Tests](#typescript-tests)
- [Test against Postgres](#test-against-postgres)
- [Postgres with Docker](#postgres-with-docker)
+ - [Performance Testing](#performance-testing)
+ - [Adding Tests](#adding-tests)
+ - [Adding Benchmarks](#adding-benchmarks)
+ - [Benchmark Guidelines](#benchmark-guidelines)
- [Breaking Changes](#breaking-changes)
- [Deprecation Policy](#deprecation-policy)
- [Feature Considerations](#feature-considerations)
@@ -298,6 +302,58 @@ RUN chmod +x /docker-entrypoint-initdb.d/setup-dbs.sh
Note that the script above will ONLY be executed during initialization of the container with no data in the database, see the official [Postgres image](https://hub.docker.com/_/postgres) for details. If you want to use the script to run again be sure there is no data in the /var/lib/postgresql/data of the container.
+### Performance Testing
+
+Parse Server includes an automated performance benchmarking system that runs on every pull request to detect performance regressions and track improvements over time.
+
+#### Adding Tests
+
+You should consider adding performance benchmarks if your contribution:
+
+- **Introduces a performance-critical feature**: Features that will be frequently used in production environments, such as new query operations, authentication methods, or data processing functions.
+- **Modifies existing critical paths**: Changes to core functionality like object CRUD operations, query execution, user authentication, file operations, or Cloud Code execution.
+- **Has potential performance impact**: Any change that affects database operations, network requests, data parsing, caching mechanisms, or algorithmic complexity.
+- **Optimizes performance**: If your PR specifically aims to improve performance, adding benchmarks helps verify the improvement and prevents future regressions.
+
+#### Adding Benchmarks
+
+Performance benchmarks are located in [`benchmark/performance.js`](benchmark/performance.js). To add a new benchmark:
+
+1. **Identify the operation to benchmark**: Determine the specific operation you want to measure (e.g., a new query type, a new API endpoint).
+
+2. **Create a benchmark function**: Follow the existing patterns in `benchmark/performance.js`:
+ ```javascript
+ async function benchmarkNewFeature() {
+ return measureOperation('Feature Name', async () => {
+ // Your operation to benchmark
+ const result = await someOperation();
+ }, ITERATIONS);
+ }
+ ```
+
+3. **Add to benchmark suite**: Register your benchmark in the `runBenchmarks()` function:
+ ```javascript
+ console.error('Running New Feature benchmark...');
+ await cleanupDatabase();
+ results.push(await benchmarkNewFeature());
+ ```
+
+4. **Test locally**: Run the benchmarks locally to verify they work:
+ ```bash
+ npm run benchmark:quick # Quick test with 10 iterations
+ npm run benchmark # Full test with 100 iterations
+ ```
+
+For new features where no baseline exists, the CI will establish new benchmarks that future PRs will be compared against.
+
+#### Benchmark Guidelines
+
+- **Keep benchmarks focused**: Each benchmark should test a single, well-defined operation.
+- **Use realistic data**: Test with data that reflects real-world usage patterns.
+- **Clean up between runs**: Use `cleanupDatabase()` to ensure consistent test conditions.
+- **Consider iteration count**: Use fewer iterations for expensive operations (see `ITERATIONS` environment variable).
+- **Document what you're testing**: Add clear comments explaining what the benchmark measures and why it's important.
+
## Breaking Changes
Breaking changes should be avoided whenever possible. For a breaking change to be accepted, the benefits of the change have to clearly outweigh the costs of developers having to adapt their deployments. If a breaking change is only cosmetic it will likely be rejected and preferred to become obsolete organically during the course of further development, unless it is required as part of a larger change. Breaking changes should follow the [Deprecation Policy](#deprecation-policy).
From 2a0d1e8e3747cc7fd8ff4ccd182bc4bfd43ef4b4 Mon Sep 17 00:00:00 2001
From: Manuel Trezza <5673677+mtrezza@users.noreply.github.com>
Date: Sat, 8 Nov 2025 21:20:51 +0100
Subject: [PATCH 12/12] Update ci-performance.yml
---
.github/workflows/ci-performance.yml | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/ci-performance.yml b/.github/workflows/ci-performance.yml
index 3cfd42e29b..4cde4d97b0 100644
--- a/.github/workflows/ci-performance.yml
+++ b/.github/workflows/ci-performance.yml
@@ -213,12 +213,12 @@ jobs:
const changeStr = change > 0 ? \`+\${change.toFixed(1)}%\` : \`\${change.toFixed(1)}%\`;
let status = '✅';
- if (change > 10) {
- status = '⚠️ Slower';
- hasRegression = true;
- } else if (change > 20) {
+ if (change > 20) {
status = '❌ Much Slower';
hasRegression = true;
+ } else if (change > 10) {
+ status = '⚠️ Slower';
+ hasRegression = true;
} else if (change < -10) {
status = '🚀 Faster';
hasImprovement = true;