This error occurs when a write operation to a Node.js stream takes longer than the configured timeout threshold. It typically happens when downstream consumers are slow or unresponsive, causing backpressure to accumulate and the write operation to stall.
The "Stream write timeout" error indicates that a write operation to a writable stream has exceeded an enforced time limit. This typically occurs in scenarios where: 1. The receiving end (downstream consumer) is not consuming data fast enough, causing the internal write buffer to fill up 2. Network conditions are degraded, introducing latency into the transmission 3. The process handling the stream data is blocked or overloaded 4. A timeout has been explicitly set on the socket or stream, and the operation takes too long to complete This error is different from a simple failure—it means the operation started but couldn't complete within the allowed time window. It often occurs during large file transfers, streaming large amounts of data, or when piping between processes with mismatched throughput.
Add error handlers to catch and respond to timeout errors gracefully:
const fs = require('fs');
const stream = fs.createWriteStream('large-file.txt');
stream.on('error', (err) => {
if (err.code === 'ETIMEDOUT') {
console.error('Write operation timed out - receiver too slow');
// Pause and retry with longer timeout, or fail gracefully
} else {
console.error('Stream error:', err);
}
});
stream.write(data, (err) => {
if (err) {
if (err.code === 'ETIMEDOUT') {
console.error('Write callback timeout');
}
}
});For HTTP/socket operations:
const socket = req.socket;
socket.on('timeout', () => {
console.error('Socket timeout during write');
socket.destroy();
});
socket.setTimeout(60000); // 60 second timeoutThe key to avoiding write timeouts is respecting backpressure—pause writing when the buffer is full and resume when it drains:
const fs = require('fs');
function writeWithBackpressure(readStream, writeStream) {
let isPaused = false;
readStream.on('data', (chunk) => {
const canContinue = writeStream.write(chunk);
if (!canContinue) {
console.log('Buffer full, pausing read');
readStream.pause();
isPaused = true;
}
});
writeStream.on('drain', () => {
if (isPaused) {
console.log('Buffer drained, resuming read');
readStream.resume();
isPaused = false;
}
});
}
const input = fs.createReadStream('input.txt');
const output = fs.createWriteStream('output.txt');
writeWithBackpressure(input, output);Or use automatic piping which handles backpressure:
const input = fs.createReadStream('input.txt');
const output = fs.createWriteStream('output.txt');
input.pipe(output); // Automatically handles backpressureIf the timeout is too strict for your use case, increase it based on the expected operation duration:
const net = require('net');
const socket = net.createConnection({
host: 'server.example.com',
port: 3000
});
// Set a longer timeout for large transfers (5 minutes)
socket.setTimeout(300000);
socket.on('timeout', () => {
console.log('Write operation exceeded 5 minute timeout');
socket.destroy();
});
socket.write(largeData);For HTTP requests:
const http = require('http');
const req = http.request({
hostname: 'example.com',
path: '/upload',
method: 'POST',
timeout: 300000 // 5 minutes
}, (res) => {
console.log('Response received');
});
req.on('timeout', () => {
console.error('Request timeout');
req.abort();
});
req.write(largeData);The stream.pipeline() API provides better error handling and can help prevent timeout scenarios:
const { pipeline } = require('stream');
const fs = require('fs');
const zlib = require('zlib');
pipeline(
fs.createReadStream('input.txt'),
zlib.createGzip(),
fs.createWriteStream('input.txt.gz'),
(err) => {
if (err) {
if (err.code === 'ETIMEDOUT') {
console.error('Pipeline write operation timed out');
} else {
console.error('Pipeline failed:', err);
}
// Clean up is automatic
} else {
console.log('Pipeline succeeded');
}
}
);Pipeline automatically:
- Respects backpressure
- Propagates errors from any stage
- Cleans up all streams on error
If data is backing up, increase the stream buffer size to allow more data before backpressure kicks in:
const fs = require('fs');
// Default highWaterMark is 16KB for most streams
const readStream = fs.createReadStream('large-file.bin', {
highWaterMark: 256 * 1024 // 256 KB buffer
});
const writeStream = fs.createWriteStream('output.bin', {
highWaterMark: 256 * 1024 // Larger buffer = less frequent backpressure
});
// Respect backpressure even with larger buffer
let isPaused = false;
readStream.on('data', (chunk) => {
const ok = writeStream.write(chunk);
if (!ok) {
readStream.pause();
isPaused = true;
}
});
writeStream.on('drain', () => {
if (isPaused) {
readStream.resume();
isPaused = false;
}
});Note: Larger buffers use more memory but can reduce backpressure-induced stalls.
Add visibility into stream throughput and timeout conditions:
const { pipeline } = require('stream');
const fs = require('fs');
function createMonitoringStream() {
let bytesWritten = 0;
const startTime = Date.now();
return {
logStats: () => {
const elapsedMs = Date.now() - startTime;
const throughputMBps = (bytesWritten / (1024 * 1024)) / (elapsedMs / 1000);
console.log(`Throughput: ${throughputMBps.toFixed(2)} MB/s, Total: ${(bytesWritten / 1024 / 1024).toFixed(2)} MB`);
},
track: (bytes) => {
bytesWritten += bytes;
}
};
}
const monitor = createMonitoringStream();
pipeline(
fs.createReadStream('large-file.bin'),
{
// Custom transform to track bytes
transform(chunk, encoding, callback) {
monitor.track(chunk.length);
if (Date.now() % 1000 === 0) {
monitor.logStats();
}
callback(null, chunk);
}
},
fs.createWriteStream('output.bin'),
(err) => {
if (err) {
console.error('Pipeline error:', err);
} else {
monitor.logStats();
}
}
);Backpressure and Memory Management: Stream timeouts often mask underlying backpressure issues. If writes are timing out, it usually means the receiving end can't keep up. Ignoring backpressure and retrying writes will cause memory to accumulate in Node.js process buffers, eventually leading to out-of-memory errors. Always respect the return value of stream.write() and listen for the 'drain' event.
highWaterMark Tuning: The default highWaterMark (typically 16KB for object streams, 64KB for binary streams) might be too small for high-throughput scenarios. However, increasing it too much wastes memory. Monitor your application's throughput and adjust based on actual performance data. For large file transfers, 256KB to 1MB is often appropriate.
TCP Window Size and Network Considerations: Even with proper backpressure handling in Node.js, network-level issues can cause timeouts. TCP window scaling and network middleware (proxies, load balancers) can affect throughput. If timeouts persist despite correct code, investigate network-level bottlenecks.
Socket vs Stream Timeouts: Node.js sockets have their own timeout mechanism separate from stream write timeouts. A socket timeout will trigger a 'timeout' event and often an ETIMEDOUT error. Always set appropriate socket timeouts for network I/O, as they serve as a safety net when backpressure handling fails.
Chunked Encoding and HTTP: When streaming HTTP responses, Node.js uses chunked transfer encoding by default. Very large chunks can exceed TCP window sizes and cause delays. Consider limiting individual chunk sizes to 64KB-256KB for HTTP streaming to balance throughput with responsiveness.
Production Monitoring: In production, log all timeout errors with context (bytes written, duration, throughput estimate). This data helps identify whether timeouts are caused by slow clients, network issues, or application performance problems. Tools like New Relic or Datadog can track stream performance across your infrastructure.
Graceful Degradation: Instead of failing immediately on timeout, consider implementing retry logic with exponential backoff or fallback mechanisms (e.g., switch to chunked delivery, compress data, reduce payload size).
Error: EMFILE: too many open files, watch
EMFILE: fs.watch() limit exceeded
Error: Middleware next() called multiple times (next() invoked twice)
Express middleware next() called multiple times
Error: Worker failed to initialize (worker startup error)
Worker failed to initialize in Node.js
Error: EMFILE: too many open files, open 'file.txt'
EMFILE: too many open files
Error: cluster.fork() failed (cannot create child process)
cluster.fork() failed - Cannot create child process