feat: 增加批量处理和数据库离线恢复机制以提升可靠性

- 新增 BatchProcessor 类实现消息批量插入,提高数据库写入性能
- 在 consumer 中禁用 autoCommit 并实现手动提交,确保数据一致性
- 添加数据库健康检查机制,在数据库离线时暂停消费并自动恢复
- 支持 0x0E 命令字处理,扩展消息类型识别范围
- 增加数据库连接重试逻辑,解决 Windows 环境端口冲突问题
- 更新环境变量配置,优化 Kafka 消费者参数
- 添加相关单元测试验证批量处理和可靠性功能
This commit is contained in:
2026-02-04 20:36:33 +08:00
parent 339db6f95f
commit 680bf6a957
16 changed files with 557 additions and 43 deletions

View File

@@ -3,12 +3,13 @@ import { logger } from '../utils/logger.js';
const { ConsumerGroup } = kafka;
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex, healthCheck }) => {
const kafkaHost = kafkaConfig.brokers.join(',');
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
const id = `${clientId}-${process.pid}-${Date.now()}`;
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 50;
let inFlight = 0;
let isPausedForHealth = false;
const consumer = new ConsumerGroup(
{
@@ -19,7 +20,7 @@ const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) =
fromOffset: 'earliest',
protocol: ['roundrobin'],
outOfRangeOffset: 'latest',
autoCommit: true,
autoCommit: false,
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
fetchMinBytes: kafkaConfig.fetchMinBytes,
@@ -30,7 +31,7 @@ const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) =
);
const tryResume = () => {
if (inFlight < maxInFlight) {
if (!isPausedForHealth && inFlight < maxInFlight) {
consumer.resume();
}
};
@@ -40,9 +41,48 @@ const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) =
if (inFlight >= maxInFlight) {
consumer.pause();
}
Promise.resolve(onMessage(message))
.catch((error) => {
return Promise.resolve(onMessage(message))
.then(() => {
consumer.commit((err) => {
if (err) {
logger.error('Kafka commit failed', { error: err.message });
}
});
})
.catch(async (error) => {
logger.error('Kafka message handling failed', { error: error?.message });
let shouldCommit = true;
if (!isPausedForHealth && healthCheck && await healthCheck.shouldPause(error)) {
shouldCommit = false;
isPausedForHealth = true;
consumer.pause();
logger.warn('Pausing consumer due to dependency failure. Entering recovery mode...');
const checkInterval = setInterval(async () => {
try {
const isHealthy = await healthCheck.check();
if (isHealthy) {
clearInterval(checkInterval);
isPausedForHealth = false;
consumer.resume();
logger.info('Dependency recovered. Resuming consumer.');
}
} catch (err) {
logger.error('Health check failed', { error: err.message });
}
}, 60000);
}
if (shouldCommit) {
consumer.commit((err) => {
if (err) {
logger.error('Kafka commit failed (error case)', { error: err.message });
}
});
}
if (onError) {
onError(error, message);
}
@@ -63,13 +103,13 @@ const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) =
return consumer;
};
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError, healthCheck }) => {
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
const count = Math.max(1, instances);
return Array.from({ length: count }, (_, idx) =>
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx, healthCheck })
);
};
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) =>
createKafkaConsumers({ kafkaConfig, onMessage, onError })[0];
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError, healthCheck }) =>
createKafkaConsumers({ kafkaConfig, onMessage, onError, healthCheck })[0];