feature not bug

This commit is contained in:
Para Dox
2025-06-02 02:02:02 +07:00
parent a610057600
commit 9cdc73b298

View File

@@ -610,8 +610,8 @@ class RPCProxy {
code: error.code, code: error.code,
}, 'Upstream stream error'); }, 'Upstream stream error');
// Only destroy if response hasn't been sent yet // Only destroy if response hasn't been sent yet and isn't already destroyed
if (!res.headersSent && !res.writableEnded) { if (!res.headersSent && !res.writableEnded && !res.destroyed) {
res.destroy(); res.destroy();
} }
}); });
@@ -629,10 +629,22 @@ class RPCProxy {
// Always capture raw chunks for comparison // Always capture raw chunks for comparison
chunks.push(chunk); chunks.push(chunk);
// Stream data to client // Stream data to client - check both writableEnded and destroyed state
if (!res.writableEnded) { if (!res.writableEnded && !res.destroyed) {
// Chain writes to handle backpressure properly // Chain writes to handle backpressure properly
writeQueue = writeQueue.then(() => new Promise((resolve) => { writeQueue = writeQueue.then(() => new Promise((resolve) => {
// Double-check stream state before writing
if (res.destroyed || res.writableEnded) {
logger.debug({
requestId,
destroyed: res.destroyed,
writableEnded: res.writableEnded,
chunkSize: chunk.length,
}, 'Stream destroyed/ended before write, skipping chunk');
resolve();
return;
}
try { try {
const canContinue = res.write(chunk, (err) => { const canContinue = res.write(chunk, (err) => {
// Log per-chunk overhead // Log per-chunk overhead
@@ -645,22 +657,49 @@ class RPCProxy {
}, 'High chunk processing overhead'); }, 'High chunk processing overhead');
} }
if (err) { if (err) {
// Check if it's the specific "destroyed" error
if (err.message === 'Cannot call write after a stream was destroyed') {
logger.debug({
requestId,
error: err.message,
chunkSize: chunk.length,
destroyed: res.destroyed,
writableEnded: res.writableEnded,
}, 'Stream was destroyed during write (expected race condition)');
} else {
logger.error({ logger.error({
requestId, requestId,
error: err.message, error: err.message,
chunkSize: chunk.length, chunkSize: chunk.length,
}, 'Error in write callback'); }, 'Error in write callback');
} }
}
resolve(); resolve();
}); });
if (!canContinue) { if (!canContinue && !res.destroyed) {
// Wait for drain event if write buffer is full // Wait for drain event if write buffer is full
logger.debug({ logger.debug({
requestId, requestId,
chunkSize: chunk.length, chunkSize: chunk.length,
}, 'Backpressure detected, waiting for drain'); }, 'Backpressure detected, waiting for drain');
res.once('drain', resolve);
// Set up drain listener with error handling
const drainHandler = () => resolve();
const errorHandler = (err) => {
res.removeListener('drain', drainHandler);
logger.debug({
requestId,
error: err.message,
}, 'Stream error while waiting for drain');
resolve();
};
res.once('drain', drainHandler);
res.once('error', errorHandler);
// Clean up error handler if drain happens first
res.once('drain', () => res.removeListener('error', errorHandler));
} else { } else {
resolve(); resolve();
} }
@@ -674,6 +713,13 @@ class RPCProxy {
resolve(); // Continue even on error resolve(); // Continue even on error
} }
})); }));
} else {
logger.debug({
requestId,
destroyed: res.destroyed,
writableEnded: res.writableEnded,
chunkSize: chunk.length,
}, 'Skipping chunk write - stream not writable');
} }
}); });
@@ -708,11 +754,13 @@ class RPCProxy {
} }
// End the response // End the response
if (!res.writableEnded) { if (!res.writableEnded && !res.destroyed) {
try { try {
// If there's still data in the write buffer, wait for it to drain // If there's still data in the write buffer, wait for it to drain
if (res.writableHighWaterMark && res.writableLength > 0) { if (res.writableHighWaterMark && res.writableLength > 0) {
res.once('drain', () => { res.once('drain', () => {
// Check again before ending
if (!res.writableEnded && !res.destroyed) {
res.end(() => { res.end(() => {
const transferCompleteHR = Number(process.hrtime.bigint() - hrStartTime) / 1000000; const transferCompleteHR = Number(process.hrtime.bigint() - hrStartTime) / 1000000;
logger.debug({ logger.debug({
@@ -723,6 +771,13 @@ class RPCProxy {
transferCompleteHrMs: transferCompleteHR, transferCompleteHrMs: transferCompleteHR,
}, 'Ended streaming response after drain'); }, 'Ended streaming response after drain');
}); });
} else {
logger.debug({
requestId,
destroyed: res.destroyed,
writableEnded: res.writableEnded,
}, 'Response already ended/destroyed after drain');
}
}); });
} else { } else {
res.end(() => { res.end(() => {
@@ -741,8 +796,17 @@ class RPCProxy {
requestId, requestId,
error: endError.message, error: endError.message,
clientClosed: isClientClosed(), clientClosed: isClientClosed(),
destroyed: res.destroyed,
writableEnded: res.writableEnded,
}, 'Error ending response'); }, 'Error ending response');
} }
} else {
logger.debug({
requestId,
destroyed: res.destroyed,
writableEnded: res.writableEnded,
responseSize: rawData.length,
}, 'Response already ended/destroyed, skipping end call');
} }
// Log if client closed very early // Log if client closed very early