node/test/parallel/test-zlib-flush-write-sync-interleaved.js
Anna Henningsen 518ffc1256
zlib: do not coalesce multiple .flush() calls
This is an approach to address the issue linked below. Previously,
when `.write()` and `.flush()` calls to a zlib stream were interleaved
synchronously (i.e. without waiting for these operations to finish),
multiple flush calls would have been coalesced into a single flushing
operation.

This patch changes behaviour so that each `.flush()` all corresponds
to one flushing operation on the underlying zlib resource, and the
order of operations is as if the `.flush()` call were a `.write()`
call.

One test had to be removed because it specifically tested the previous
behaviour.

As a drive-by fix, this also makes sure that all flush callbacks are
called. Previously, that was not the case.

Fixes: https://github.com/nodejs/node/issues/28478

PR-URL: https://github.com/nodejs/node/pull/28520
Reviewed-By: Rich Trott <rtrott@gmail.com>
Reviewed-By: Ruben Bridgewater <ruben@bridgewater.de>
Reviewed-By: Luigi Pinca <luigipinca@gmail.com>
2019-07-14 22:40:12 +02:00

58 lines
1.6 KiB
JavaScript

'use strict';
const common = require('../common');
const assert = require('assert');
const { createGzip, createGunzip, Z_PARTIAL_FLUSH } = require('zlib');
// Verify that .flush() behaves like .write() in terms of ordering, e.g. in
// a sequence like .write() + .flush() + .write() + .flush() each .flush() call
// only affects the data written before it.
// Refs: https://github.com/nodejs/node/issues/28478
const compress = createGzip();
const decompress = createGunzip();
decompress.setEncoding('utf8');
const events = [];
const compressedChunks = [];
for (const chunk of ['abc', 'def', 'ghi']) {
compress.write(chunk, common.mustCall(() => events.push({ written: chunk })));
compress.flush(Z_PARTIAL_FLUSH, common.mustCall(() => {
events.push('flushed');
const chunk = compress.read();
if (chunk !== null)
compressedChunks.push(chunk);
}));
}
compress.end(common.mustCall(() => {
events.push('compress end');
writeToDecompress();
}));
function writeToDecompress() {
// Write the compressed chunks to a decompressor, one by one, in order to
// verify that the flushes actually worked.
const chunk = compressedChunks.shift();
if (chunk === undefined) return decompress.end();
decompress.write(chunk, common.mustCall(() => {
events.push({ read: decompress.read() });
writeToDecompress();
}));
}
process.on('exit', () => {
assert.deepStrictEqual(events, [
{ written: 'abc' },
'flushed',
{ written: 'def' },
'flushed',
{ written: 'ghi' },
'flushed',
'compress end',
{ read: 'abc' },
{ read: 'def' },
{ read: 'ghi' }
]);
});