feat: 实现Redis集成与Kafka消息处理优化
- 新增Redis集成模块,支持心跳写入与控制台日志队列 - 优化Kafka消费者实现,支持多实例与自动重连 - 改进消息处理器,支持批量处理与多层解码 - 更新数据库表结构,调整字段类型与约束 - 添加Redis与Kafka的配置项和环境变量支持 - 补充测试用例和文档说明
This commit is contained in:
@@ -83,9 +83,9 @@ class DatabaseManager {
|
||||
|
||||
ts_ms bigint NOT NULL,
|
||||
hotel_id int2 NOT NULL,
|
||||
room_id int4 NOT NULL,
|
||||
room_id varchar(50) NOT NULL,
|
||||
device_id varchar(64) NOT NULL,
|
||||
ip inet NOT NULL,
|
||||
ip varchar(21) NOT NULL,
|
||||
power_state int2 NOT NULL,
|
||||
guest_type int2 NOT NULL,
|
||||
cardless_state int2 NOT NULL,
|
||||
@@ -93,7 +93,7 @@ class DatabaseManager {
|
||||
pms_state int2 NOT NULL,
|
||||
carbon_state int2 NOT NULL,
|
||||
device_count int2 NOT NULL,
|
||||
comm_seq int2 NOT NULL,
|
||||
comm_seq int4 NOT NULL,
|
||||
|
||||
extra jsonb,
|
||||
|
||||
@@ -101,14 +101,14 @@ class DatabaseManager {
|
||||
|
||||
CONSTRAINT chk_ts_ms_positive CHECK (ts_ms > 0),
|
||||
CONSTRAINT chk_hotel_id_range CHECK (hotel_id >= 0 AND hotel_id <= 32767),
|
||||
CONSTRAINT chk_room_id_range CHECK (room_id >= 0),
|
||||
CONSTRAINT chk_room_id_len CHECK (char_length(room_id) > 0 AND char_length(room_id) <= 50),
|
||||
CONSTRAINT chk_power_state_range CHECK (power_state >= 0 AND power_state <= 32767),
|
||||
CONSTRAINT chk_guest_type_range CHECK (guest_type >= 0 AND guest_type <= 32767),
|
||||
CONSTRAINT chk_cardless_state_range CHECK (cardless_state >= 0 AND cardless_state <= 32767),
|
||||
CONSTRAINT chk_pms_state_range CHECK (pms_state >= 0 AND pms_state <= 32767),
|
||||
CONSTRAINT chk_carbon_state_range CHECK (carbon_state >= 0 AND carbon_state <= 32767),
|
||||
CONSTRAINT chk_device_count_range CHECK (device_count >= 0 AND device_count <= 32767),
|
||||
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0 AND comm_seq <= 32767)
|
||||
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0)
|
||||
)
|
||||
PARTITION BY RANGE (ts_ms);
|
||||
|
||||
@@ -194,6 +194,8 @@ class DatabaseManager {
|
||||
|
||||
await this.pool.query(legacyTableQuery);
|
||||
await this.pool.query(v2SchemaQuery);
|
||||
await this.ensureIpColumnVarchar();
|
||||
await this.ensureRoomIdColumnVarchar();
|
||||
console.log('数据库表初始化成功');
|
||||
} catch (error) {
|
||||
console.error('数据库表初始化失败:', error);
|
||||
@@ -201,6 +203,116 @@ class DatabaseManager {
|
||||
}
|
||||
}
|
||||
|
||||
async ensureRoomIdColumnVarchar() {
|
||||
const res = await this.pool.query(
|
||||
`
|
||||
SELECT format_type(a.atttypid, a.atttypmod) AS type
|
||||
FROM pg_attribute a
|
||||
JOIN pg_class c ON c.oid = a.attrelid
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname = 'heartbeat'
|
||||
AND c.relname = 'heartbeat_events'
|
||||
AND a.attname = 'room_id'
|
||||
AND a.attnum > 0
|
||||
AND NOT a.attisdropped
|
||||
`
|
||||
);
|
||||
|
||||
const type = String(res?.rows?.[0]?.type ?? '').toLowerCase();
|
||||
if (!type) return;
|
||||
if (type.startsWith('character varying')) return;
|
||||
|
||||
await this.pool.query('ALTER TABLE heartbeat.heartbeat_events DROP CONSTRAINT IF EXISTS chk_room_id_range');
|
||||
await this.pool.query('ALTER TABLE heartbeat.heartbeat_events DROP CONSTRAINT IF EXISTS chk_room_id_len');
|
||||
|
||||
await this.pool.query(
|
||||
`ALTER TABLE heartbeat.heartbeat_events
|
||||
ALTER COLUMN room_id TYPE varchar(50)
|
||||
USING room_id::text`
|
||||
);
|
||||
|
||||
await this.pool.query(
|
||||
'ALTER TABLE heartbeat.heartbeat_events ADD CONSTRAINT chk_room_id_len CHECK (char_length(room_id) > 0 AND char_length(room_id) <= 50)'
|
||||
);
|
||||
|
||||
const parts = await this.pool.query(
|
||||
`
|
||||
SELECT c.relname AS partition
|
||||
FROM pg_inherits i
|
||||
JOIN pg_class c ON c.oid = i.inhrelid
|
||||
JOIN pg_class p ON p.oid = i.inhparent
|
||||
JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
WHERE n.nspname = 'heartbeat'
|
||||
AND p.relname = 'heartbeat_events'
|
||||
ORDER BY c.relname
|
||||
`
|
||||
);
|
||||
|
||||
for (const row of parts.rows ?? []) {
|
||||
const name = row?.partition;
|
||||
if (!name) continue;
|
||||
await this.pool.query(
|
||||
`ALTER TABLE heartbeat.${this.escapeIdentifier(name)}
|
||||
ALTER COLUMN room_id TYPE varchar(50)
|
||||
USING room_id::text`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async ensureIpColumnVarchar() {
|
||||
const res = await this.pool.query(
|
||||
`
|
||||
SELECT format_type(a.atttypid, a.atttypmod) AS type
|
||||
FROM pg_attribute a
|
||||
JOIN pg_class c ON c.oid = a.attrelid
|
||||
JOIN pg_namespace n ON n.oid = c.relnamespace
|
||||
WHERE n.nspname = 'heartbeat'
|
||||
AND c.relname = 'heartbeat_events'
|
||||
AND a.attname = 'ip'
|
||||
AND a.attnum > 0
|
||||
AND NOT a.attisdropped
|
||||
`
|
||||
);
|
||||
|
||||
const type = String(res?.rows?.[0]?.type ?? '').toLowerCase();
|
||||
if (!type) return;
|
||||
if (type.startsWith('character varying')) return;
|
||||
if (!type.startsWith('inet')) return;
|
||||
|
||||
await this.pool.query(
|
||||
`ALTER TABLE heartbeat.heartbeat_events
|
||||
ALTER COLUMN ip TYPE varchar(21)
|
||||
USING ip::text`
|
||||
);
|
||||
|
||||
const parts = await this.pool.query(
|
||||
`
|
||||
SELECT c.relname AS partition
|
||||
FROM pg_inherits i
|
||||
JOIN pg_class c ON c.oid = i.inhrelid
|
||||
JOIN pg_class p ON p.oid = i.inhparent
|
||||
JOIN pg_namespace n ON n.oid = p.relnamespace
|
||||
WHERE n.nspname = 'heartbeat'
|
||||
AND p.relname = 'heartbeat_events'
|
||||
ORDER BY c.relname
|
||||
`
|
||||
);
|
||||
|
||||
for (const row of parts.rows ?? []) {
|
||||
const name = row?.partition;
|
||||
if (!name) continue;
|
||||
await this.pool.query(
|
||||
`ALTER TABLE heartbeat.${this.escapeIdentifier(name)}
|
||||
ALTER COLUMN ip TYPE varchar(21)
|
||||
USING ip::text`
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
escapeIdentifier(id) {
|
||||
return `"${String(id).replace(/"/g, '""')}"`;
|
||||
}
|
||||
|
||||
getPartitionConfig() {
|
||||
const cfg = this.config.partitionMaintenance ?? {};
|
||||
return {
|
||||
@@ -218,7 +330,7 @@ class DatabaseManager {
|
||||
const startOffset = Number(startDayOffset ?? 0);
|
||||
const endOffset = Number(endDayOffset ?? 0);
|
||||
await this.pool.query(
|
||||
'SELECT heartbeat.ensure_partitions(current_date + $1::int, current_date + $2::int)',
|
||||
"SELECT heartbeat.ensure_partitions(((now() AT TIME ZONE 'Asia/Shanghai')::date) + $1::int, ((now() AT TIME ZONE 'Asia/Shanghai')::date) + $2::int)",
|
||||
[startOffset, endOffset]
|
||||
);
|
||||
}
|
||||
@@ -245,9 +357,6 @@ class DatabaseManager {
|
||||
console.error('[db] 分区预创建维护失败:', err);
|
||||
}
|
||||
}, intervalMs);
|
||||
|
||||
// 不阻止进程退出
|
||||
this.partitionMaintenanceTimer.unref?.();
|
||||
}
|
||||
|
||||
stopPartitionMaintenance() {
|
||||
@@ -289,11 +398,6 @@ class DatabaseManager {
|
||||
}
|
||||
if (events.length === 0) return;
|
||||
|
||||
const tsValues = events.map((e) => Number(e.ts_ms)).filter((n) => Number.isFinite(n));
|
||||
if (tsValues.length > 0) {
|
||||
await this.ensurePartitionsForTsRange(Math.min(...tsValues), Math.max(...tsValues));
|
||||
}
|
||||
|
||||
const columns = [
|
||||
'ts_ms',
|
||||
'hotel_id',
|
||||
@@ -338,21 +442,59 @@ class DatabaseManager {
|
||||
|
||||
const sql = `INSERT INTO heartbeat.heartbeat_events (${columns.join(', ')}) VALUES ${placeholders}`;
|
||||
|
||||
try {
|
||||
await this.pool.query(sql, values);
|
||||
} catch (error) {
|
||||
// 兜底:若仍因缺分区失败,尝试确保“当前到未来 N 天”后重试一次
|
||||
if (this.isMissingPartitionError(error)) {
|
||||
console.warn('[db] 检测到缺分区写入失败,执行兜底预创建并重试一次');
|
||||
await this.ensurePartitionsForRange({
|
||||
startDayOffset: -7,
|
||||
endDayOffset: this.getPartitionFutureDays(),
|
||||
});
|
||||
await this.pool.query(sql, values);
|
||||
return;
|
||||
const runInsertOnce = async () => {
|
||||
const tsValues = events.map((e) => Number(e.ts_ms)).filter((n) => Number.isFinite(n));
|
||||
if (tsValues.length > 0) {
|
||||
await this.ensurePartitionsForTsRange(Math.min(...tsValues), Math.max(...tsValues));
|
||||
}
|
||||
|
||||
const client = await this.pool.connect();
|
||||
try {
|
||||
await client.query('BEGIN');
|
||||
const res = await client.query(sql, values);
|
||||
const insertedCount = Number(res?.rowCount ?? 0);
|
||||
if (insertedCount !== events.length) {
|
||||
throw new Error(`insert rowCount mismatch: expect=${events.length} actual=${insertedCount}`);
|
||||
}
|
||||
await client.query('COMMIT');
|
||||
return { insertedCount };
|
||||
} catch (error) {
|
||||
try {
|
||||
await client.query('ROLLBACK');
|
||||
} catch (rollbackError) {
|
||||
console.error('[db] rollback failed:', rollbackError);
|
||||
}
|
||||
throw error;
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
};
|
||||
|
||||
const retryAttempts = Number(this.config?.retryAttempts ?? 0);
|
||||
const retryDelay = Math.max(250, Number(this.config?.retryDelay ?? 1000));
|
||||
const maxAttempts = retryAttempts > 0 ? retryAttempts : 1;
|
||||
|
||||
let lastError = null;
|
||||
for (let attempt = 1; attempt <= maxAttempts; attempt += 1) {
|
||||
try {
|
||||
return await runInsertOnce();
|
||||
} catch (error) {
|
||||
lastError = error;
|
||||
if (this.isMissingPartitionError(error)) {
|
||||
console.warn('[db] 检测到缺分区写入失败,执行兜底预创建并重试一次');
|
||||
await this.ensurePartitionsForRange({
|
||||
startDayOffset: -7,
|
||||
endDayOffset: this.getPartitionFutureDays(),
|
||||
});
|
||||
}
|
||||
if (attempt < maxAttempts) {
|
||||
await new Promise((r) => setTimeout(r, retryDelay));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
async insertHeartbeatData(data) {
|
||||
@@ -381,8 +523,9 @@ class DatabaseManager {
|
||||
values: values.flat()
|
||||
};
|
||||
|
||||
await this.pool.query(query);
|
||||
const res = await this.pool.query(query);
|
||||
console.log(`成功插入 ${data.length} 条心跳数据`);
|
||||
return { insertedCount: Number(res?.rowCount ?? data.length) };
|
||||
} catch (error) {
|
||||
console.error('插入心跳数据失败:', error);
|
||||
throw error;
|
||||
@@ -430,4 +573,4 @@ class DatabaseManager {
|
||||
}
|
||||
}
|
||||
|
||||
export { DatabaseManager };
|
||||
export { DatabaseManager };
|
||||
|
||||
Reference in New Issue
Block a user