node/test/sequential/test-net-bytes-per-incoming-chunk-overhead.js
Anna Henningsen 3217e8e66f
src: re-add Realloc() shrink after reading stream data
This would otherwise keep a lot of unused memory lying around,
and in particular add up to a page per chunk of memory overhead
for network reads, potentially opening a DoS vector if the resulting
`Buffer` objects are kept around indefinitely (e.g. stored in a list
and not concatenated until the socket finishes).

This fixes CVE-2018-7164.

Refs: https://github.com/nodejs-private/security/issues/186
Refs: 7c4b09b24b
PR-URL: https://github.com/nodejs-private/node-private/pull/128
Reviewed-By: Michael Dawson <michael_dawson@ca.ibm.com>
Reviewed-By: Evan Lucas <evanlucas@me.com>
2018-06-12 20:46:15 -04:00

42 lines
1.1 KiB
JavaScript

// Flags: --expose-gc
'use strict';
const common = require('../common');
const assert = require('assert');
const net = require('net');
// Tests that, when receiving small chunks, we do not keep the full length
// of the original allocation for the libuv read call in memory.
let client;
let baseRSS;
const receivedChunks = [];
const N = 250000;
const server = net.createServer(common.mustCall((socket) => {
baseRSS = process.memoryUsage().rss;
socket.setNoDelay(true);
socket.on('data', (chunk) => {
receivedChunks.push(chunk);
if (receivedChunks.length < N) {
client.write('a');
} else {
client.end();
server.close();
}
});
})).listen(0, common.mustCall(() => {
client = net.connect(server.address().port);
client.setNoDelay(true);
client.write('hello!');
}));
process.on('exit', () => {
global.gc();
const bytesPerChunk =
(process.memoryUsage().rss - baseRSS) / receivedChunks.length;
// We should always have less than one page (usually ~ 4 kB) per chunk.
assert(bytesPerChunk < 512, `measured ${bytesPerChunk} bytes per chunk`);
});