feat: 删除无用的测试报告,添加房间状态快照表的数据库初始化脚本及集成方案

This commit is contained in:
2026-02-10 09:06:37 +08:00
parent 680bf6a957
commit 21cf140c68
5 changed files with 258 additions and 89 deletions

View File

@@ -1,44 +0,0 @@
2026-01-30T16:54:47: Error [ERR_MODULE_NOT_FOUND]: Cannot find package 'node-cron' imported from R:\nodejsROOT\bls\rcu-action\dist\index.js
2026-01-30T16:54:47: at Object.getPackageJSONURL (node:internal/modules/package_json_reader:316:9)
2026-01-30T16:54:47: at packageResolve (node:internal/modules/esm/resolve:768:81)
2026-01-30T16:54:47: at moduleResolve (node:internal/modules/esm/resolve:858:18)
2026-01-30T16:54:47: at defaultResolve (node:internal/modules/esm/resolve:990:11)
2026-01-30T16:54:47: at #cachedDefaultResolve (node:internal/modules/esm/loader:737:20)
2026-01-30T16:54:47: at ModuleLoader.resolve (node:internal/modules/esm/loader:714:38)
2026-01-30T16:54:47: at ModuleLoader.getModuleJobForImport (node:internal/modules/esm/loader:293:38)
2026-01-30T16:54:47: at #link (node:internal/modules/esm/module_job:208:49)
2026-01-30T16:54:47: at process.processTicksAndRejections (node:internal/process/task_queues:103:5) {
2026-01-30T16:54:47: code: 'ERR_MODULE_NOT_FOUND'
2026-01-30T16:54:47: }
2026-01-30T16:56:12: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763372054,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:12: {"level":"error","message":"Service bootstrap failed","timestamp":1769763372055,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:12: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763372929,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:12: {"level":"error","message":"Service bootstrap failed","timestamp":1769763372929,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:13: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763373801,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:13: {"level":"error","message":"Service bootstrap failed","timestamp":1769763373801,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:14: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763374671,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:14: {"level":"error","message":"Service bootstrap failed","timestamp":1769763374671,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:15: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763375539,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:15: {"level":"error","message":"Service bootstrap failed","timestamp":1769763375539,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:16: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763376418,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:16: {"level":"error","message":"Service bootstrap failed","timestamp":1769763376419,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:17: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763377290,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:17: {"level":"error","message":"Service bootstrap failed","timestamp":1769763377291,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:18: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763378161,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:18: {"level":"error","message":"Service bootstrap failed","timestamp":1769763378162,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:19: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763379035,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:19: {"level":"error","message":"Service bootstrap failed","timestamp":1769763379035,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:19: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763379920,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:19: {"level":"error","message":"Service bootstrap failed","timestamp":1769763379921,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:20: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763380801,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:20: {"level":"error","message":"Service bootstrap failed","timestamp":1769763380802,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:21: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763381675,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:21: {"level":"error","message":"Service bootstrap failed","timestamp":1769763381675,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:22: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763382560,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:22: {"level":"error","message":"Service bootstrap failed","timestamp":1769763382561,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:23: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763383432,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:23: {"level":"error","message":"Service bootstrap failed","timestamp":1769763383433,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:24: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763384307,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:24: {"level":"error","message":"Service bootstrap failed","timestamp":1769763384307,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}
2026-01-30T16:56:25: {"level":"error","message":"Error initializing schema and table:","timestamp":1769763385185,"context":{"errno":-4058,"code":"ENOENT","syscall":"open","path":"R:\\nodejsROOT\\bls\\scripts\\init_db.sql"}}
2026-01-30T16:56:25: {"level":"error","message":"Service bootstrap failed","timestamp":1769763385185,"context":{"error":"ENOENT: no such file or directory, open 'R:\\nodejsROOT\\bls\\scripts\\init_db.sql'"}}

View File

@@ -92,3 +92,5 @@ ACK (待补充)
- 例如:`dev_type=35` (名称: TimeCtrl), `addr=14`, `loop=21` -> `[35TimeCtrl-14-21]`
- 最后将找到的或生成的 `loop_name` 写入 `rcu_action_events` 表。
- 注意,所有查库操作都要通过内存缓存来实现。

141
docs/room_status_moment.sql Normal file
View File

@@ -0,0 +1,141 @@
-- ============================================================================
-- 数据库初始化脚本
-- 描述:创建 log_platform 库逻辑参考、room_status 模式及 room_status_moment 分区表
-- 对应项目需求project.md #L57-65
-- ============================================================================
-- 注意:在 PostgreSQL 中CREATE DATABASE 不能在事务块中执行。
-- 通常建议先手动创建数据库,然后再执行后续脚本。
-- CREATE DATABASE log_platform;
-- 切换到 log_platform 数据库后执行以下内容:
-- 1. 创建模式
CREATE SCHEMA IF NOT EXISTS room_status;
-- 2. 创建主表 (使用声明式分区)
-- 根据需求 L57-65考虑后期十万级以上数据的扩展按 hotel_id 进行 LIST 分区
CREATE TABLE IF NOT EXISTS room_status.room_status_moment (
-- 基础标识字段
guid UUID NOT NULL,
ts_ms INT8 NOT NULL DEFAULT (EXTRACT(EPOCH FROM CURRENT_TIMESTAMP) * 1000)::BIGINT,
hotel_id INT2 NOT NULL,
room_id TEXT NOT NULL,
device_id TEXT NOT NULL,
-- 设备状态字段
sys_lock_status INT2,
online_status INT2,
launcher_version TEXT,
app_version TEXT,
config_version TEXT,
register_ts_ms INT8,
upgrade_ts_ms INT8,
config_ts_ms INT8,
ip TEXT,
-- 房间业务状态字段
pms_status INT2,
power_state INT2,
cardless_state INT2,
service_mask INT8,
insert_card INT2,
bright_g INT2,
agreement_ver TEXT,
-- 空调相关
air_address TEXT[],
air_state INT2[],
air_model INT2[],
air_speed INT2[],
air_set_temp INT2[],
air_now_temp INT2[],
air_solenoid_valve INT2[],
-- 能耗相关
elec_address TEXT[],
elec_voltage DOUBLE PRECISION[],
elec_ampere DOUBLE PRECISION[],
elec_power DOUBLE PRECISION[],
elec_phase DOUBLE PRECISION[],
elec_energy DOUBLE PRECISION[],
elec_sum_energy DOUBLE PRECISION[],
-- 节能与外设
carbon_state INT2,
dev_loops JSONB,
energy_carbon_sum DOUBLE PRECISION,
energy_nocard_sum DOUBLE PRECISION,
external_device JSONB DEFAULT '{}',
faulty_device_count JSONB DEFAULT '{}',
-- 约束:分区表的主键必须包含分区键 (hotel_id)
PRIMARY KEY (hotel_id, room_id, device_id, guid)
) PARTITION BY LIST (hotel_id);
-- 3. 创建索引 (针对高频查询字段)
-- 注意:在分区表上创建索引会自动在所有子表上创建对应的索引
CREATE INDEX IF NOT EXISTS idx_room_status_moment_hotel_room ON room_status.room_status_moment (hotel_id, room_id);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_device_id ON room_status.room_status_moment (device_id);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_sys_lock ON room_status.room_status_moment (sys_lock_status);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_online ON room_status.room_status_moment (online_status);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_pms ON room_status.room_status_moment (pms_status);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_power ON room_status.room_status_moment (power_state);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_cardless ON room_status.room_status_moment (cardless_state);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_insert_card ON room_status.room_status_moment (insert_card);
CREATE INDEX IF NOT EXISTS idx_room_status_moment_carbon ON room_status.room_status_moment (carbon_state);
-- 3.1 唯一索引 (支持 UPSERT)
-- 必须在 (hotel_id, room_id, device_id) 上建立唯一约束,才能使用 ON CONFLICT
CREATE UNIQUE INDEX IF NOT EXISTS idx_room_status_unique_device
ON room_status.room_status_moment (hotel_id, room_id, device_id);
-- 4. 示例:创建第一个分区 (hotel_id = 1)
-- 实际部署时,可根据 hotel_id 动态创建分区
CREATE TABLE IF NOT EXISTS room_status.room_status_moment_h1
PARTITION OF room_status.room_status_moment
FOR VALUES IN (1);
-- 5. 添加表和字段注释
COMMENT ON TABLE room_status.room_status_moment IS '房间即时状态表 - 记录设备及房间业务的最新实时状态';
COMMENT ON COLUMN room_status.room_status_moment.guid IS '主键 guid uuid 32位无符号UUID';
COMMENT ON COLUMN room_status.room_status_moment.ts_ms IS '最后更新时间';
COMMENT ON COLUMN room_status.room_status_moment.hotel_id IS '酒店';
COMMENT ON COLUMN room_status.room_status_moment.room_id IS '房间';
COMMENT ON COLUMN room_status.room_status_moment.device_id IS '设备编号';
COMMENT ON COLUMN room_status.room_status_moment.sys_lock_status IS '系统锁状态';
COMMENT ON COLUMN room_status.room_status_moment.online_status IS '设备在线状态';
COMMENT ON COLUMN room_status.room_status_moment.launcher_version IS '设备launcher版本';
COMMENT ON COLUMN room_status.room_status_moment.app_version IS '设备App版本';
COMMENT ON COLUMN room_status.room_status_moment.config_version IS '设备配置版本';
COMMENT ON COLUMN room_status.room_status_moment.register_ts_ms IS '最后一次注册时间';
COMMENT ON COLUMN room_status.room_status_moment.upgrade_ts_ms IS '最后一次升级时间';
COMMENT ON COLUMN room_status.room_status_moment.config_ts_ms IS '最后一次下发配置时间';
COMMENT ON COLUMN room_status.room_status_moment.ip IS '当前公网IP地址';
COMMENT ON COLUMN room_status.room_status_moment.pms_status IS 'PMS状态';
COMMENT ON COLUMN room_status.room_status_moment.power_state IS '取电状态';
COMMENT ON COLUMN room_status.room_status_moment.cardless_state IS '有、无人状态';
COMMENT ON COLUMN room_status.room_status_moment.service_mask IS '服务状态';
COMMENT ON COLUMN room_status.room_status_moment.insert_card IS '插卡状态';
COMMENT ON COLUMN room_status.room_status_moment.air_address IS '空调地址';
COMMENT ON COLUMN room_status.room_status_moment.air_state IS '空调状态';
COMMENT ON COLUMN room_status.room_status_moment.air_model IS '空调模型';
COMMENT ON COLUMN room_status.room_status_moment.air_speed IS '空调风速';
COMMENT ON COLUMN room_status.room_status_moment.air_set_temp IS '空调设置温度';
COMMENT ON COLUMN room_status.room_status_moment.air_now_temp IS '房间当前温度';
COMMENT ON COLUMN room_status.room_status_moment.air_solenoid_valve IS '空调电磁阀状态';
COMMENT ON COLUMN room_status.room_status_moment.elec_address IS '能耗表地址';
COMMENT ON COLUMN room_status.room_status_moment.elec_voltage IS '能耗表电压';
COMMENT ON COLUMN room_status.room_status_moment.elec_ampere IS '能耗表电流';
COMMENT ON COLUMN room_status.room_status_moment.elec_power IS '能耗表功率';
COMMENT ON COLUMN room_status.room_status_moment.elec_phase IS '当前相位';
COMMENT ON COLUMN room_status.room_status_moment.elec_energy IS '能耗表能耗';
COMMENT ON COLUMN room_status.room_status_moment.elec_sum_energy IS '能耗表累计能耗';
COMMENT ON COLUMN room_status.room_status_moment.carbon_state IS '碳达人状态';
COMMENT ON COLUMN room_status.room_status_moment.bright_g IS '光亮值';
COMMENT ON COLUMN room_status.room_status_moment.agreement_ver IS '协议版本';
COMMENT ON COLUMN room_status.room_status_moment.dev_loops IS '回路状态';
COMMENT ON COLUMN room_status.room_status_moment.energy_carbon_sum IS '碳达人节能累计';
COMMENT ON COLUMN room_status.room_status_moment.energy_nocard_sum IS '无卡节能累计';
COMMENT ON COLUMN room_status.room_status_moment.external_device IS '外设设备管理(数组)';
COMMENT ON COLUMN room_status.room_status_moment.faulty_device_count IS '故障设备数量';

View File

@@ -1,45 +0,0 @@
# 测试报告
## 基本信息
- 运行时间: 2026-01-29
- 运行方式: 控制台启动 `npm run dev`,运行约 60 秒后 Ctrl + C 终止
- 测试目标: 验证 Kafka 消费与入库链路,定位无入库原因
## 控制台关键日志
```
{"level":"error","message":"Message processing failed","timestamp":1769734880590,"context":{"error":"[\n {\n \"expected\": \"number\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"hotel_id\"\n ],\n \"message\": \"Invalid input: expected number, received string\"\n }\n]","type":"PARSE_ERROR","stack":"ZodError: ...","rawPayload":"{\"ts_ms\":1769692878011,\"hotel_id\":\"2147\",\"room_id\":\"8209\",\"device_id\":\"099008129081\",\"direction\":\"上报\",\"cmd_word\":\"36\",\"frame_id\":52496,...}","validationIssues":[{"expected":"number","code":"invalid_type","path":["hotel_id"],"message":"Invalid input: expected number, received string"}]}}
```
## 结论
- 数据未入库的直接原因: Kafka 消息在解析阶段触发 Zod 校验失败,`hotel_id` 为字符串类型而非文档要求的 Number导致 `PARSE_ERROR`,数据库插入流程未执行。
## 与文档格式的一致性检查
对照 [kafka_format.md](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/docs/kafka_format.md):
- `hotel_id`: 文档要求 Number但实测为字符串 (示例: `"2147"`),不一致。
- `cmd_word`: 文档要求 `"0x36"`/`"0x0F"`,实测为 `"36"`,不一致。
- `control_list`: 文档要求 Array/可选,但实测为 `null`,不一致。
- 其余关键字段如 `ts_ms`, `room_id`, `device_id`, `direction`, `udp_raw` 均存在。
## 已增强的控制台错误输出
为了便于定位异常,以下模块已经增加详细错误输出到 PowerShell 控制台:
- Kafka 处理异常: 输出 `type`, `stack`, `rawPayload`, `validationIssues`, `dbContext`
- 数据库插入异常: 输出 `schema`, `table`, `rowsLength`
- Redis 入队与重试异常: 输出详细错误信息
相关改动文件:
- [index.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/index.js)
- [databaseManager.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/db/databaseManager.js)
- [errorQueue.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/redis/errorQueue.js)
## 建议修改方向
以下为解决无入库问题的可选方案,由你决定是否执行:
1. 上游严格按文档输出:
- `hotel_id` 改为 Number
- `cmd_word` 改为 `"0x36"` / `"0x0F"`
- `control_list``[]` 或省略字段,避免 `null`
2. 下游放宽校验并做类型转换:
-`hotel_id` 支持字符串并转换为 Number
- 继续兼容 `cmd_word = "36"` 的写法
- `control_list/device_list/fault_list` 接受 `null` 并转为空数组
当前代码已兼容 `cmd_word="36"``control_list=null`,但 `hotel_id` 仍按文档严格要求 Number。

View File

@@ -0,0 +1,115 @@
# Room Status Moment 集成方案
## 1. 背景
我们需要将一个新的数据库表 `room_status.room_status_moment`(快照表)集成到现有的 Kafka 处理流程中。
该表用于存储每个房间/设备的最新状态。
现有的逻辑(批量插入到 `rcu_action_events`)必须保持不变。
## 2. 数据库配置
新表位于一个独立的数据库中(可能是 `log_platform`,或者现有数据库中的新模式 `room_status`)。
我们将添加对 `ROOM_STATUS` 独立连接池的支持,以确保灵活性。
**环境变量配置:**
```env
# 现有数据库配置
DB_HOST=...
...
# 新 Room Status 数据库配置 (如果未提供,默认使用现有数据库,但使用独立的连接池)
ROOM_STATUS_DB_HOST=...
ROOM_STATUS_DB_PORT=...
ROOM_STATUS_DB_USER=...
ROOM_STATUS_DB_PASSWORD=...
ROOM_STATUS_DB_DATABASE=log_platform <-- SQL 脚本中的目标数据库名
ROOM_STATUS_DB_SCHEMA=room_status
```
## 3. 字段映射策略
目标表:`room_status.room_status_moment`
唯一键:`(hotel_id, room_id, device_id)`
| 源字段 (Kafka) | 目标字段 | 更新逻辑 |
| :--- | :--- | :--- |
| `hotel_id` | `hotel_id` | 主键/索引键 |
| `room_id` | `room_id` | 主键/索引键 |
| `device_id` | `device_id` | 主键/索引键 |
| `ts_ms` | `ts_ms` | 始终更新为最新值 |
| `sys_lock_status` | `sys_lock_status` | 直接映射 (如果存在) |
| `device_list` (0x36) <br> `control_list` (0x0F) | `dev_loops` (JSONB) | **合并策略 (Merge)**: <br> Key: `001002003` (Type(3)+Addr(3)+Loop(3)) <br> Value: `dev_data` (int) <br> 操作: `old_json || new_json` (旧值合并新值) |
| `fault_list` (0x36) | `faulty_device_count` (JSONB) | **替换策略 (Replace)**: <br> 由于 0x36 上报的是完整故障列表,我们直接覆盖该字段。<br> 内容: `{dev_type, dev_addr, dev_loop, error_type, error_data}` 的列表 |
| `fault_list` -> item `error_type=1` | `online_status` | 如果 `error_data=1` -> 离线 (0) <br> 如果 `error_data=0` -> 在线 (1) <br> *需要验证具体的映射约定* |
**关于在线状态 (Online Status) 的说明**:
文档描述: "0x01: 0:在线 1:离线"。
表字段 `online_status` 类型为 INT2。
约定:通常 1=在线, 0=离线。
逻辑:
- 如果故障类型 0x01, 数据 0 (在线) -> 设置 `online_status` = 1
- 如果故障类型 0x01, 数据 1 (离线) -> 设置 `online_status` = 0
- 否则 -> 不更新 `online_status`
## 4. Upsert 逻辑 (PostgreSQL)
我们将使用 `INSERT ... ON CONFLICT DO UPDATE` 语法。
```sql
INSERT INTO room_status.room_status_moment (
guid, ts_ms, hotel_id, room_id, device_id,
sys_lock_status, online_status,
dev_loops, faulty_device_count
) VALUES (
$guid, $ts_ms, $hotel_id, $room_id, $device_id,
$sys_lock_status, $online_status,
$dev_loops::jsonb, $faulty_device_count::jsonb
)
ON CONFLICT (hotel_id, room_id, device_id)
DO UPDATE SET
ts_ms = EXCLUDED.ts_ms,
-- 仅在新数据不为空时更新 sys_lock_status
sys_lock_status = COALESCE(EXCLUDED.sys_lock_status, room_status.room_status_moment.sys_lock_status),
-- 仅在新数据不为空时更新 online_status
online_status = COALESCE(EXCLUDED.online_status, room_status.room_status_moment.online_status),
-- 合并 dev_loops
dev_loops = CASE
WHEN EXCLUDED.dev_loops IS NULL THEN room_status.room_status_moment.dev_loops
ELSE COALESCE(room_status.room_status_moment.dev_loops, '{}'::jsonb) || EXCLUDED.dev_loops
END,
-- 如果存在则替换 faulty_device_count
faulty_device_count = COALESCE(EXCLUDED.faulty_device_count, room_status.room_status_moment.faulty_device_count)
WHERE
-- 可选优化:仅在时间戳更新时写入
-- 注意:对于 JSON 合并,很难在不计算的情况下检测是否相等。
-- 我们依赖 ts_ms 的变化来表示数据的“新鲜度”。
EXCLUDED.ts_ms >= room_status.room_status_moment.ts_ms
;
```
## 5. 架构变更
1. **`src/config/config.js`**: 添加 `roomStatusDb` 配置项。
2. **`src/db/roomStatusManager.js`**: 新增单例类,用于管理 `log_platform` 的数据库连接池。
3. **`src/db/statusBatchProcessor.js`**: 针对 `room_status_moment` 的专用批量处理器。
* **原因**: Upsert 逻辑复杂,且与 `rcu_action_events` 的追加写日志模式不同。
* 它需要在批处理内聚合每个设备的更新,以减少数据库负载(去重)。
4. **`src/processor/statusExtractor.js`**: 辅助工具,用于将 `KafkaPayload` 转换为 `StatusRow` 数据结构。
5. **`src/index.js`**: 挂载新的处理器逻辑。
## 6. 去重策略 (内存批量聚合)
由于 `room_status_moment` 是快照表,如果我们在 1 秒内收到同一设备的 10 次更新:
- 我们只需要写入 **最后一次** 的状态(或合并后的状态)。
- `StatusBatchProcessor` 应该维护一个映射: `Map<Key(hotel,room,device), LatestData>`
- 在 Flush 时,将 Map 的值转换为批量 Upsert 操作。
- **约束**: `dev_loops` 的更新如果是针对不同回路的,可能需要累积合并。
- **优化策略**:
- 如果 `dev_loops` 是部分更新,我们不能简单地取最后一条消息。
- 但是,在短时间的批处理窗口(例如 500ms我们可以在内存中将它们合并后再发送给数据库。
- 结构: `Map<Key, MergedState>`
- 逻辑: `MergedState.dev_loops = Object.assign({}, old.dev_loops, new.dev_loops)`
## 7. 执行计划
1. 添加配置 (Config) 和数据库管理器 (DB Manager)。
2. 实现 `StatusExtractor` (将 Kafka 载荷转换为快照数据)。
3. 实现 `StatusBatchProcessor` (包含内存合并逻辑)。
4. 更新 `processKafkaMessage`,使其同时返回 `LogRows` (现有) 和 `StatusUpdate` (新增)。
5. 在主循环中处理分发。