feat: 增加批量处理和数据库离线恢复机制以提升可靠性
- 新增 BatchProcessor 类实现消息批量插入,提高数据库写入性能 - 在 consumer 中禁用 autoCommit 并实现手动提交,确保数据一致性 - 添加数据库健康检查机制,在数据库离线时暂停消费并自动恢复 - 支持 0x0E 命令字处理,扩展消息类型识别范围 - 增加数据库连接重试逻辑,解决 Windows 环境端口冲突问题 - 更新环境变量配置,优化 Kafka 消费者参数 - 添加相关单元测试验证批量处理和可靠性功能
This commit is contained in:
97
bls-rcu-action-backend/tests/batch_processor.test.js
Normal file
97
bls-rcu-action-backend/tests/batch_processor.test.js
Normal file
@@ -0,0 +1,97 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { BatchProcessor } from '../src/db/batchProcessor.js';
|
||||
|
||||
describe('BatchProcessor', () => {
|
||||
let dbManager;
|
||||
let config;
|
||||
let batchProcessor;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.useFakeTimers();
|
||||
dbManager = {
|
||||
insertRows: vi.fn().mockResolvedValue(true)
|
||||
};
|
||||
config = {
|
||||
db: { schema: 'test_schema', table: 'test_table' }
|
||||
};
|
||||
batchProcessor = new BatchProcessor(dbManager, config, { batchSize: 3, flushInterval: 1000 });
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
vi.useRealTimers();
|
||||
});
|
||||
|
||||
it('should buffer items and not flush until batch size is reached', async () => {
|
||||
const p1 = batchProcessor.add({ rows: ['r1'] });
|
||||
const p2 = batchProcessor.add({ rows: ['r2'] });
|
||||
|
||||
expect(dbManager.insertRows).not.toHaveBeenCalled();
|
||||
|
||||
const p3 = batchProcessor.add({ rows: ['r3'] });
|
||||
|
||||
// Wait for microtasks
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dbManager.insertRows).toHaveBeenCalledTimes(1);
|
||||
expect(dbManager.insertRows).toHaveBeenCalledWith({
|
||||
schema: 'test_schema',
|
||||
table: 'test_table',
|
||||
rows: ['r1', 'r2', 'r3']
|
||||
});
|
||||
|
||||
await expect(p1).resolves.toBe(1);
|
||||
await expect(p2).resolves.toBe(1);
|
||||
await expect(p3).resolves.toBe(1);
|
||||
});
|
||||
|
||||
it('should flush when timer expires', async () => {
|
||||
const p1 = batchProcessor.add({ rows: ['r1'] });
|
||||
|
||||
expect(dbManager.insertRows).not.toHaveBeenCalled();
|
||||
|
||||
vi.advanceTimersByTime(1000);
|
||||
|
||||
// Wait for microtasks
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dbManager.insertRows).toHaveBeenCalledTimes(1);
|
||||
expect(dbManager.insertRows).toHaveBeenCalledWith({
|
||||
schema: 'test_schema',
|
||||
table: 'test_table',
|
||||
rows: ['r1']
|
||||
});
|
||||
|
||||
await expect(p1).resolves.toBe(1);
|
||||
});
|
||||
|
||||
it('should handle db error and reject all pending promises', async () => {
|
||||
dbManager.insertRows.mockRejectedValue(new Error('DB Fail'));
|
||||
|
||||
const p1 = batchProcessor.add({ rows: ['r1'] });
|
||||
const p2 = batchProcessor.add({ rows: ['r2'] });
|
||||
const p3 = batchProcessor.add({ rows: ['r3'] }); // Triggers flush
|
||||
|
||||
await expect(p1).rejects.toThrow('DB Fail');
|
||||
await expect(p2).rejects.toThrow('DB Fail');
|
||||
await expect(p3).rejects.toThrow('DB Fail');
|
||||
});
|
||||
|
||||
it('should handle mixed batch sizes', async () => {
|
||||
// 3 items with different row counts
|
||||
const p1 = batchProcessor.add({ rows: ['r1', 'r2'] });
|
||||
const p2 = batchProcessor.add({ rows: [] }); // Empty rows
|
||||
const p3 = batchProcessor.add({ rows: ['r3'] });
|
||||
|
||||
await Promise.resolve();
|
||||
|
||||
expect(dbManager.insertRows).toHaveBeenCalledWith({
|
||||
schema: 'test_schema',
|
||||
table: 'test_table',
|
||||
rows: ['r1', 'r2', 'r3']
|
||||
});
|
||||
|
||||
await expect(p1).resolves.toBe(2);
|
||||
await expect(p2).resolves.toBe(0);
|
||||
await expect(p3).resolves.toBe(1);
|
||||
});
|
||||
});
|
||||
124
bls-rcu-action-backend/tests/consumer_reliability.test.js
Normal file
124
bls-rcu-action-backend/tests/consumer_reliability.test.js
Normal file
@@ -0,0 +1,124 @@
|
||||
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';
|
||||
import { createKafkaConsumers } from '../src/kafka/consumer.js';
|
||||
import kafka from 'kafka-node';
|
||||
|
||||
// Mock kafka-node
|
||||
vi.mock('kafka-node', () => {
|
||||
return {
|
||||
ConsumerGroup: vi.fn(),
|
||||
default: { ConsumerGroup: vi.fn() }
|
||||
};
|
||||
});
|
||||
|
||||
describe('Consumer Reliability', () => {
|
||||
let mockConsumer;
|
||||
let onMessage;
|
||||
let onError;
|
||||
let healthCheck;
|
||||
|
||||
const kafkaConfig = {
|
||||
brokers: ['localhost:9092'],
|
||||
groupId: 'test-group',
|
||||
clientId: 'test-client',
|
||||
topic: 'test-topic',
|
||||
autoCommitIntervalMs: 5000
|
||||
};
|
||||
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks();
|
||||
|
||||
mockConsumer = {
|
||||
on: vi.fn(),
|
||||
commit: vi.fn(),
|
||||
pause: vi.fn(),
|
||||
resume: vi.fn(),
|
||||
close: vi.fn()
|
||||
};
|
||||
|
||||
kafka.ConsumerGroup.mockImplementation(function() {
|
||||
return mockConsumer;
|
||||
});
|
||||
|
||||
onMessage = vi.fn().mockResolvedValue(true);
|
||||
onError = vi.fn();
|
||||
healthCheck = {
|
||||
shouldPause: vi.fn().mockResolvedValue(false),
|
||||
check: vi.fn().mockResolvedValue(true)
|
||||
};
|
||||
});
|
||||
|
||||
it('should initialize with autoCommit: false', () => {
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError });
|
||||
expect(kafka.ConsumerGroup).toHaveBeenCalledWith(
|
||||
expect.objectContaining({ autoCommit: false }),
|
||||
expect.anything()
|
||||
);
|
||||
});
|
||||
|
||||
it('should commit offset after successful message processing', async () => {
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError });
|
||||
|
||||
// Simulate 'message' event
|
||||
const message = { value: 'test' };
|
||||
const messageHandler = mockConsumer.on.mock.calls.find(call => call[0] === 'message')[1];
|
||||
|
||||
await messageHandler(message);
|
||||
|
||||
expect(onMessage).toHaveBeenCalledWith(message);
|
||||
expect(mockConsumer.commit).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should NOT commit if processing fails and health check says pause', async () => {
|
||||
onMessage.mockRejectedValue(new Error('Fail'));
|
||||
healthCheck.shouldPause.mockResolvedValue(true);
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError, healthCheck });
|
||||
|
||||
const messageHandler = mockConsumer.on.mock.calls.find(call => call[0] === 'message')[1];
|
||||
await messageHandler({ value: 'test' });
|
||||
|
||||
expect(mockConsumer.commit).not.toHaveBeenCalled();
|
||||
expect(onError).toHaveBeenCalled();
|
||||
});
|
||||
|
||||
it('should commit if processing fails but health check says continue (Data Error)', async () => {
|
||||
onMessage.mockRejectedValue(new Error('Data Error'));
|
||||
healthCheck.shouldPause.mockResolvedValue(false); // Do not pause, it's just bad data
|
||||
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError, healthCheck });
|
||||
|
||||
const messageHandler = mockConsumer.on.mock.calls.find(call => call[0] === 'message')[1];
|
||||
await messageHandler({ value: 'bad_data' });
|
||||
|
||||
expect(mockConsumer.commit).toHaveBeenCalled(); // Should commit to move past bad data
|
||||
expect(onError).toHaveBeenCalled(); // Should still report error
|
||||
});
|
||||
|
||||
it('should pause and enter recovery mode if healthCheck.shouldPause returns true', async () => {
|
||||
vi.useFakeTimers();
|
||||
|
||||
onMessage.mockRejectedValue(new Error('DB Error'));
|
||||
healthCheck.shouldPause.mockResolvedValue(true);
|
||||
healthCheck.check.mockResolvedValueOnce(false).mockResolvedValueOnce(true); // Fail once, then succeed
|
||||
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError, healthCheck });
|
||||
const messageHandler = mockConsumer.on.mock.calls.find(call => call[0] === 'message')[1];
|
||||
|
||||
// Trigger error
|
||||
await messageHandler({ value: 'fail' });
|
||||
|
||||
expect(mockConsumer.pause).toHaveBeenCalled();
|
||||
expect(healthCheck.shouldPause).toHaveBeenCalled();
|
||||
|
||||
// Fast-forward time for interval check (1st check - fails)
|
||||
await vi.advanceTimersByTimeAsync(60000);
|
||||
expect(healthCheck.check).toHaveBeenCalledTimes(1);
|
||||
expect(mockConsumer.resume).not.toHaveBeenCalled();
|
||||
|
||||
// Fast-forward time for interval check (2nd check - succeeds)
|
||||
await vi.advanceTimersByTimeAsync(60000);
|
||||
expect(healthCheck.check).toHaveBeenCalledTimes(2);
|
||||
expect(mockConsumer.resume).toHaveBeenCalled();
|
||||
|
||||
vi.useRealTimers();
|
||||
});
|
||||
});
|
||||
@@ -1,7 +1,19 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { describe, it, expect, vi } from 'vitest';
|
||||
import { buildRowsFromPayload } from '../src/processor/index.js';
|
||||
import projectMetadata from '../src/cache/projectMetadata.js';
|
||||
|
||||
// Mock config to ensure loop name generation is enabled
|
||||
vi.mock('../src/config/config.js', async (importOriginal) => {
|
||||
const actual = await importOriginal();
|
||||
return {
|
||||
...actual,
|
||||
config: {
|
||||
...actual.config,
|
||||
enableLoopNameAutoGeneration: true,
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
describe('Processor Logic', () => {
|
||||
const basePayload = {
|
||||
ts_ms: 1700000000000,
|
||||
@@ -227,3 +239,58 @@ describe('Processor Logic', () => {
|
||||
expect(rows[1].loop_name).toBe('[1强电继电器(输出状态)-10-2]');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Processor Logic - 0x0E Support', () => {
|
||||
const basePayload = {
|
||||
ts_ms: 1700000000000,
|
||||
hotel_id: 1001,
|
||||
room_id: '8001',
|
||||
device_id: 'dev_001',
|
||||
direction: '上报',
|
||||
cmd_word: '0x0E',
|
||||
frame_id: 1,
|
||||
udp_raw: 'AA552000543353413610CD63088151000000000000000001180003000114005ECB',
|
||||
sys_lock_status: 0,
|
||||
report_count: 0,
|
||||
fault_count: 0
|
||||
};
|
||||
|
||||
it('should handle 0x0E Status Report with device list (same as 0x36)', () => {
|
||||
const payload = {
|
||||
...basePayload,
|
||||
direction: '上报',
|
||||
cmd_word: '0x0E',
|
||||
report_count: 2,
|
||||
device_list: [
|
||||
{ dev_type: 1, dev_addr: 10, dev_loop: 1, dev_data: 100 },
|
||||
{ dev_type: 1, dev_addr: 11, dev_loop: 2, dev_data: 0 }
|
||||
]
|
||||
};
|
||||
|
||||
const rows = buildRowsFromPayload(payload);
|
||||
expect(rows).toHaveLength(2);
|
||||
expect(rows[0].action_type).toBe('设备回路状态');
|
||||
expect(rows[0].dev_addr).toBe(10);
|
||||
expect(rows[0].cmd_word).toBe('0x0e'); // Normalized
|
||||
expect(rows[1].dev_addr).toBe(11);
|
||||
expect(rows[0].details.device_list).toHaveLength(2);
|
||||
});
|
||||
|
||||
it('should handle 0x0E Fault Report', () => {
|
||||
const payload = {
|
||||
...basePayload,
|
||||
direction: '上报',
|
||||
cmd_word: '0x0E',
|
||||
fault_count: 1,
|
||||
fault_list: [
|
||||
{ dev_type: 1, dev_addr: 10, dev_loop: 1, error_type: 2, error_data: 5 }
|
||||
]
|
||||
};
|
||||
|
||||
const rows = buildRowsFromPayload(payload);
|
||||
expect(rows).toHaveLength(1);
|
||||
expect(rows[0].action_type).toBe('设备回路状态');
|
||||
expect(rows[0].error_type).toBe(2);
|
||||
expect(rows[0].cmd_word).toBe('0x0e');
|
||||
});
|
||||
});
|
||||
|
||||
Reference in New Issue
Block a user