- 新增配置项以支持旧/新明细表的独立写入开关及目标表名。 - 重构 DatabaseManager,抽象通用批量 COPY 写入内核,支持不同目标表的复用。 - 新增双明细写入编排器,支持旧/新表独立执行、重试及 fallback。 - 调整 HeartbeatProcessor.processBatch(),确保 room_status 独立执行。 - 错误表仅记录新表写入失败,旧表失败不再写入错误表。 - 重新定义消费暂停策略,基于当前启用的关键 sink 判断。 - 补充按 sink 维度的统计项与启动日志。 新增 G4 热表相关的数据库规范与处理逻辑,确保系统在双写模式下的稳定性与可扩展性。
79 lines
2.9 KiB
JavaScript
79 lines
2.9 KiB
JavaScript
import assert from 'node:assert/strict';
|
|
import { StatsCounters, StatsReporter } from '../src/stats/statsManager.js';
|
|
import { HeartbeatProcessor } from '../src/processor/heartbeatProcessor.js';
|
|
|
|
describe('StatsCounters', () => {
|
|
it('snapshots and resets minute counters atomically', () => {
|
|
const stats = new StatsCounters();
|
|
stats.incDbWritten(3);
|
|
stats.incFiltered(2);
|
|
stats.incKafkaPulled(5);
|
|
stats.incDbWriteFailed(4);
|
|
|
|
const first = stats.snapshotAndResetMinute();
|
|
assert.equal(first.dbWritten, 3n);
|
|
assert.equal(first.filtered, 2n);
|
|
assert.equal(first.kafkaPulled, 5n);
|
|
assert.equal(first.dbWriteFailed, 4n);
|
|
|
|
const second = stats.snapshotAndResetMinute();
|
|
assert.equal(second.dbWritten, 0n);
|
|
assert.equal(second.filtered, 0n);
|
|
assert.equal(second.kafkaPulled, 0n);
|
|
assert.equal(second.dbWriteFailed, 0n);
|
|
});
|
|
});
|
|
|
|
describe('StatsReporter', () => {
|
|
it('writes three [STATS] info logs to redis console', () => {
|
|
const stats = new StatsCounters();
|
|
stats.incDbWritten(7);
|
|
stats.incFiltered(8);
|
|
stats.incKafkaPulled(9);
|
|
stats.incDbWriteFailed(2);
|
|
|
|
const calls = { push: [] };
|
|
const redis = {
|
|
isEnabled: () => true,
|
|
pushConsoleLog: ({ level, message, metadata }) => {
|
|
calls.push.push({ level, message, metadata });
|
|
},
|
|
};
|
|
|
|
const reporter = new StatsReporter({ redis, stats });
|
|
reporter.flushOnce();
|
|
|
|
assert.equal(calls.push.length, 9);
|
|
for (const c of calls.push) assert.equal(c.level, 'info');
|
|
assert.match(calls.push[0].message, /Legacy写入量: 7条$/);
|
|
assert.match(calls.push[1].message, /Legacy写入失败量: 2条$/);
|
|
assert.match(calls.push[2].message, /G4Hot写入量: 0条$/);
|
|
assert.match(calls.push[3].message, /G4Hot写入失败量: 0条$/);
|
|
assert.match(calls.push[4].message, /RoomStatus写入量: 0条$/);
|
|
assert.match(calls.push[5].message, /RoomStatus失败量: 0条$/);
|
|
assert.match(calls.push[6].message, /G4Hot错误表插入量: 0条$/);
|
|
assert.match(calls.push[7].message, /数据过滤量: 8条$/);
|
|
assert.match(calls.push[8].message, /Kafka拉取量: 9条$/);
|
|
});
|
|
});
|
|
|
|
describe('HeartbeatProcessor db write error logging', () => {
|
|
it('emits [ERROR] warn log with raw data', () => {
|
|
const calls = { warn: [] };
|
|
const redis = {
|
|
isEnabled: () => true,
|
|
pushConsoleLog: ({ level, message }) => {
|
|
if (level === 'warn') calls.warn.push(message);
|
|
},
|
|
};
|
|
|
|
const processor = new HeartbeatProcessor({ batchSize: 1, batchTimeout: 10 }, {}, { redis });
|
|
processor._emitDbWriteError(new Error('boom'), [{ a: 1 }]);
|
|
|
|
assert.equal(calls.warn.length >= 1, true);
|
|
assert.match(calls.warn[0], /^\[ERROR\] \d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3} db_write_failed: /);
|
|
assert.match(calls.warn[0], /"errorId":"db_write_failed"/);
|
|
assert.match(calls.warn[0], /"rawData":\{"a":1\}/);
|
|
});
|
|
});
|