feat: 实现Redis集成与Kafka消息处理优化

- 新增Redis集成模块,支持心跳写入与控制台日志队列
- 优化Kafka消费者实现,支持多实例与自动重连
- 改进消息处理器,支持批量处理与多层解码
- 更新数据库表结构,调整字段类型与约束
- 添加Redis与Kafka的配置项和环境变量支持
- 补充测试用例和文档说明
This commit is contained in:
2026-01-14 17:58:45 +08:00
parent eb94aaf92b
commit 910f1c353f
28 changed files with 1691 additions and 177 deletions

View File

@@ -13,9 +13,9 @@ CREATE TABLE IF NOT EXISTS heartbeat.heartbeat_events (
ts_ms bigint NOT NULL,
hotel_id int2 NOT NULL,
room_id int4 NOT NULL,
room_id varchar(50) NOT NULL,
device_id varchar(64) NOT NULL,
ip inet NOT NULL,
ip varchar(21) NOT NULL,
power_state int2 NOT NULL,
guest_type int2 NOT NULL,
cardless_state int2 NOT NULL,
@@ -23,7 +23,7 @@ CREATE TABLE IF NOT EXISTS heartbeat.heartbeat_events (
pms_state int2 NOT NULL,
carbon_state int2 NOT NULL,
device_count int2 NOT NULL,
comm_seq int2 NOT NULL,
comm_seq int4 NOT NULL,
-- 弹性字段:电参/空调等(后续可结构化拆列;当前先放 extra
extra jsonb,
@@ -33,14 +33,14 @@ CREATE TABLE IF NOT EXISTS heartbeat.heartbeat_events (
-- CHECK 约束:先做“非负+上界”约束(避免未来枚举扩展导致写入失败)
CONSTRAINT chk_ts_ms_positive CHECK (ts_ms > 0),
CONSTRAINT chk_hotel_id_range CHECK (hotel_id >= 0 AND hotel_id <= 32767),
CONSTRAINT chk_room_id_range CHECK (room_id >= 0),
CONSTRAINT chk_room_id_len CHECK (char_length(room_id) > 0 AND char_length(room_id) <= 50),
CONSTRAINT chk_power_state_range CHECK (power_state >= 0 AND power_state <= 32767),
CONSTRAINT chk_guest_type_range CHECK (guest_type >= 0 AND guest_type <= 32767),
CONSTRAINT chk_cardless_state_range CHECK (cardless_state >= 0 AND cardless_state <= 32767),
CONSTRAINT chk_pms_state_range CHECK (pms_state >= 0 AND pms_state <= 32767),
CONSTRAINT chk_carbon_state_range CHECK (carbon_state >= 0 AND carbon_state <= 32767),
CONSTRAINT chk_device_count_range CHECK (device_count >= 0 AND device_count <= 32767),
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0 AND comm_seq <= 32767)
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0)
)
PARTITION BY RANGE (ts_ms);

View File

@@ -15,10 +15,10 @@ function getEnv(name, fallback) {
function buildClientConfig(database) {
const db = config.db;
return {
host: getEnv('PGHOST', db.host),
port: Number(getEnv('PGPORT', db.port)),
user: getEnv('PGUSER', db.user),
password: getEnv('PGPASSWORD', db.password),
host: getEnv('POSTGRES_HOST', getEnv('PGHOST', db.host)),
port: Number(getEnv('POSTGRES_PORT', getEnv('PGPORT', db.port))),
user: getEnv('POSTGRES_USER', getEnv('PGUSER', db.user)),
password: getEnv('POSTGRES_PASSWORD', getEnv('PGPASSWORD', db.password)),
database,
};
}
@@ -36,7 +36,7 @@ async function main() {
const schemaFile = path.join(scriptsDir, '010_heartbeat_schema.sql');
const partitionFile = path.join(scriptsDir, '020_partitioning_auto_daily.sql');
const targetDb = getEnv('PGTARGETDB', config.db.database);
const targetDb = getEnv('POSTGRES_DATABASE', getEnv('PGTARGETDB', config.db.database));
console.log(`[db] Connecting to target db: ${targetDb}`);
const targetClient = new Client(buildClientConfig(targetDb));

View File

@@ -15,6 +15,29 @@ async function main() {
// 预创建今日分区,避免“无分区时 INSERT 直接失败”
await client.query('SELECT heartbeat.ensure_partitions(current_date, current_date)');
const ipType = await client.query(
`
SELECT format_type(a.atttypid, a.atttypmod) AS type
FROM pg_attribute a
JOIN pg_class c ON c.oid = a.attrelid
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'heartbeat'
AND c.relname = 'heartbeat_events'
AND a.attname = 'ip'
AND a.attnum > 0
AND NOT a.attisdropped
`
);
const type = String(ipType?.rows?.[0]?.type ?? '').toLowerCase();
if (type.startsWith('inet')) {
await client.query(
`ALTER TABLE heartbeat.heartbeat_events
ALTER COLUMN ip TYPE varchar(21)
USING ip::text`
);
}
const ts = Date.now();
await client.query(
`INSERT INTO heartbeat.heartbeat_events (
@@ -27,7 +50,7 @@ async function main() {
1,
101,
'dev-1',
'192.168.0.1',
'192.168.0.1:12345',
1,
0,
0,
@@ -51,11 +74,33 @@ async function main() {
ORDER BY c.relname`
);
const parent = await client.query(
`
SELECT c.relkind AS kind
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'heartbeat'
AND c.relname = 'heartbeat_events'
`
);
const parentIndexes = await client.query(
`
SELECT indexname
FROM pg_indexes
WHERE schemaname = 'heartbeat'
AND tablename = 'heartbeat_events'
ORDER BY indexname
`
);
const cnt = await client.query(
'SELECT count(*)::int AS n FROM heartbeat.heartbeat_events'
);
console.log('parentKind:', parent.rows?.[0]?.kind);
console.log('partitions:', partitions.rows.map((r) => r.partition));
console.log('parentIndexes:', parentIndexes.rows.map((r) => r.indexname));
console.log('rows:', cnt.rows[0].n);
await client.end();