diff --git a/lib/base/connection.js b/lib/base/connection.js index ea91f1813a..b8bc90af8a 100644 --- a/lib/base/connection.js +++ b/lib/base/connection.js @@ -392,7 +392,7 @@ class BaseConnection extends EventEmitter { secureSocket.on('data', (data) => { this.packetParser.execute(data); }); - this.write = (buffer) => secureSocket.write(buffer); + this.stream = secureSocket; } protocolError(message, code) { diff --git a/test/common.test.cjs b/test/common.test.cjs index edd3da1f3b..c08c8805b6 100644 --- a/test/common.test.cjs +++ b/test/common.test.cjs @@ -12,7 +12,7 @@ const config = { user: process.env.MYSQL_USER || 'root', password: (process.env.CI ? process.env.MYSQL_PASSWORD : '') || '', database: process.env.MYSQL_DATABASE || 'test', - compress: process.env.MYSQL_USE_COMPRESSION, + compress: process.env.MYSQL_USE_COMPRESSION === '1', port: process.env.MYSQL_PORT || 3306, disableEval, }; diff --git a/test/integration/connection/test-backpressure-load-data-infile.test.cjs b/test/integration/connection/test-backpressure-load-data-infile.test.cjs new file mode 100644 index 0000000000..5026d0f592 --- /dev/null +++ b/test/integration/connection/test-backpressure-load-data-infile.test.cjs @@ -0,0 +1,107 @@ +'use strict'; + +const { assert, log, skip, sleep, test } = require('poku'); +const common = require('../../common.test.cjs'); +const { Readable, Duplex } = require('node:stream'); +const Net = require('node:net'); +const driver = require('../../../index.js'); + +if (common.config.compress) { + skip( + 'skipping test with compression; load data infile backpressure is not working with compression' + ); +} + +class BigInput extends Readable { + count = 0; + MAX_EXPECTED_ROWS = 100_000; + onStart = null; + + _read() { + if (this.onStart) { + this.onStart(); + this.onStart = null; + } + + if (this.count < this.MAX_EXPECTED_ROWS) { + this.count++; + const row = `${this.count}\n`; + this.push(row); + } else { + this.push(null); + } + } +} + +test('load data infile backpressure on local stream', async () => { + const config = common.config; + const netStream = Net.connect(config.port, config.host); + netStream.setNoDelay(true); + await new Promise((resolve, reject) => + netStream.once('connect', resolve).once('error', reject) + ); + + class NetworkInterceptor extends Duplex { + simulateWriteBackpressure = false; + + constructor() { + super({ writableHighWaterMark: 65536 }); + netStream.on('data', (data) => { + const continueReading = this.push(data); + if (!continueReading) { + netStream.pause(); + } + }); + netStream.on('error', (err) => this.destroy(err)); + } + + _read() { + netStream.resume(); + } + + _write(chunk, encoding, callback) { + netStream.write(chunk, encoding, (err) => { + if (err) { + callback(err); + } else if (!this.simulateWriteBackpressure) { + callback(); + } + }); + } + } + + const interceptor = new NetworkInterceptor(); + const connection = driver.createConnection({ + ...config, + multipleStatements: true, + stream: interceptor, + }); + + const bigInput = new BigInput(); + bigInput.onStart = () => (interceptor.simulateWriteBackpressure = true); + + connection.query( + { + sql: ` + set global local_infile = 1; + create temporary table test_load_data_backpressure (id varchar(100)); + load data local infile "_" replace into table test_load_data_backpressure; + `, + infileStreamFactory: () => bigInput, + }, + (err, result) => { + if (err) throw err; + log('Load complete', result); + } + ); + + await sleep(1000); // allow time for backpressure to take effect + + connection.close(); + netStream.destroy(); + + assert.ok( + bigInput.count < bigInput.MAX_EXPECTED_ROWS, + `expected backpressure to stop infile stream at less than ${bigInput.MAX_EXPECTED_ROWS} rows (read ${bigInput.count} rows)` + ); +}); diff --git a/test/integration/connection/test-backpressure-result-streaming.test.cjs b/test/integration/connection/test-backpressure-result-streaming.test.cjs new file mode 100644 index 0000000000..d329635123 --- /dev/null +++ b/test/integration/connection/test-backpressure-result-streaming.test.cjs @@ -0,0 +1,59 @@ +'use strict'; + +const { assert, skip, sleep, test } = require('poku'); +const common = require('../../common.test.cjs'); + +test('result event backpressure with pause/resume', async () => { + const connection = common.createConnection({ + multipleStatements: true, + }); + + const mySqlVersion = await common.getMysqlVersion(connection); + if (mySqlVersion.major < 8) { + skip('MySQL >= 8.0 required to use CTE'); + } + + // in case wrapping with TLS, get the underlying socket first so we can see actual number of bytes read + const originalSocket = connection.stream; + + // the full result set will be over 6 MB uncompressed; about 490 KB with compression + const largeQuery = ` + SET SESSION cte_max_recursion_depth = 100000; + WITH RECURSIVE cte (n, s) AS ( + SELECT 1, 'this is just to cause more bytes transferred for each row' + UNION ALL + SELECT n + 1, s + FROM cte + WHERE n < 100000 + ) + SELECT * FROM cte; + `; + + let resultRowsCount = 0; + await new Promise((resolve, reject) => + connection + .query(largeQuery) + .on('result', (row) => { + resultRowsCount++; + if (row.n === 1) { + connection.pause(); + resolve(); + } + }) + .on('error', reject) + ); + + // if backpressure is not working, the bytes received will grow during this time, even though connection is paused + await sleep(500); + + assert.equal(resultRowsCount, 2, 'stop receiving result rows when paused'); + + // if backpressure is working, there should be less than 300 KB received; + // experimentally it appears to be around 100 KB but may vary if buffer sizes change + assert.ok( + originalSocket.bytesRead < 300000, + `Received ${originalSocket.bytesRead} bytes on paused connection` + ); + + connection.close(); +});