feat(processor): 实现批量写库容错机制并添加失败统计

添加数据库批量写入失败处理逻辑,当批量写入失败时自动切换为逐条写入
记录失败数据并统计失败数量,同时更新相关测试和统计模块
符合新增的批量写库容错需求规范
This commit is contained in:
2026-01-20 08:22:55 +08:00
parent 41301f9ce5
commit b90faf4aa4
5 changed files with 111 additions and 44 deletions

View File

@@ -8,16 +8,19 @@ describe('StatsCounters', () => {
stats.incDbWritten(3);
stats.incFiltered(2);
stats.incKafkaPulled(5);
stats.incDbWriteFailed(4);
const first = stats.snapshotAndResetMinute();
assert.equal(first.dbWritten, 3n);
assert.equal(first.filtered, 2n);
assert.equal(first.kafkaPulled, 5n);
assert.equal(first.dbWriteFailed, 4n);
const second = stats.snapshotAndResetMinute();
assert.equal(second.dbWritten, 0n);
assert.equal(second.filtered, 0n);
assert.equal(second.kafkaPulled, 0n);
assert.equal(second.dbWriteFailed, 0n);
});
});
@@ -27,6 +30,7 @@ describe('StatsReporter', () => {
stats.incDbWritten(7);
stats.incFiltered(8);
stats.incKafkaPulled(9);
stats.incDbWriteFailed(2);
const calls = { push: [] };
const redis = {
@@ -39,13 +43,15 @@ describe('StatsReporter', () => {
const reporter = new StatsReporter({ redis, stats });
reporter.flushOnce();
assert.equal(calls.push.length, 3);
assert.equal(calls.push.length, 4);
assert.equal(calls.push[0].level, 'info');
assert.equal(calls.push[1].level, 'info');
assert.equal(calls.push[2].level, 'info');
assert.equal(calls.push[3].level, 'info');
assert.match(calls.push[0].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} 数据库写入量: 7条$/);
assert.match(calls.push[1].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} 数据过滤量: 8条$/);
assert.match(calls.push[2].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} Kafka拉取量: 9条$/);
assert.match(calls.push[1].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} 数据库写入失败量: 2条$/);
assert.match(calls.push[2].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} 数据过滤量: 8条$/);
assert.match(calls.push[3].message, /^\[STATS\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} Kafka拉取量: 9条$/);
});
});