feat: 升级心跳数据库为高吞吐日分区模型(v2)

- 新增 heartbeat 数据库与表结构文档,描述心跳明细表设计及字段约束。
- 新增 OpenSpec 符合性说明文档,指出与规范的一致点及偏差。
- 新增 Kafka 心跳数据推送说明文档,定义消息格式与推送方式。
- 更新数据库创建脚本,支持 UTF-8 编码与中文排序规则。
- 更新心跳表结构脚本,定义主表及索引,采用 ts_ms 日分区。
- 实现自动分区机制,确保按天创建分区以支持高吞吐写入。
- 添加数据库应用脚本,自动执行 SQL 文件并验证表结构。
- 添加运行时烟雾测试脚本,验证数据库连接与基本操作。
- 添加完整的烟雾测试脚本,验证数据插入与分区创建。
This commit is contained in:
2026-01-12 19:53:27 +08:00
parent adc3bfd87d
commit eb94aaf92b
18 changed files with 978 additions and 11 deletions

View File

@@ -0,0 +1,21 @@
-- 001_create_database.sql
-- 说明:本项目当前约定【不新建数据库】,而是在既有数据库(默认 log_platform中创建心跳表。
-- 因此该文件仅保留“若未来需要独立库时的参考写法”,不会被执行器强依赖。
-- 推荐方式Linux 常见)
-- 如果服务器安装了 zh_CN.utf8
-- CREATE DATABASE heartbeat
-- WITH ENCODING 'UTF8'
-- LC_COLLATE 'zh_CN.utf8'
-- LC_CTYPE 'zh_CN.utf8'
-- TEMPLATE template0;
-- ICU 方式PostgreSQL 15+ 且启用 ICU具体 locale 需按服务器实际调整)
-- CREATE DATABASE heartbeat
-- WITH ENCODING 'UTF8'
-- LOCALE_PROVIDER icu
-- ICU_LOCALE 'zh-Hans-CN'
-- TEMPLATE template0;
-- 兜底方式(若无中文 locale可先创建 UTF8再在需要排序的列上使用 ICU/自定义 collations
-- CREATE DATABASE heartbeat WITH ENCODING 'UTF8';

View File

@@ -0,0 +1,61 @@
-- 010_heartbeat_schema.sql
-- 在 heartbeat 数据库内执行
BEGIN;
CREATE SCHEMA IF NOT EXISTS heartbeat;
-- 主表(按 ts_ms 日分区)
-- 说明PostgreSQL 分区表的 PRIMARY KEY 通常需要包含分区键。
-- 这里使用 (ts_ms, id) 作为主键以保证可创建且可执行。
CREATE TABLE IF NOT EXISTS heartbeat.heartbeat_events (
id bigserial,
ts_ms bigint NOT NULL,
hotel_id int2 NOT NULL,
room_id int4 NOT NULL,
device_id varchar(64) NOT NULL,
ip inet NOT NULL,
power_state int2 NOT NULL,
guest_type int2 NOT NULL,
cardless_state int2 NOT NULL,
service_mask bigint NOT NULL,
pms_state int2 NOT NULL,
carbon_state int2 NOT NULL,
device_count int2 NOT NULL,
comm_seq int2 NOT NULL,
-- 弹性字段:电参/空调等(后续可结构化拆列;当前先放 extra
extra jsonb,
CONSTRAINT heartbeat_events_pk PRIMARY KEY (ts_ms, id),
-- CHECK 约束:先做“非负+上界”约束(避免未来枚举扩展导致写入失败)
CONSTRAINT chk_ts_ms_positive CHECK (ts_ms > 0),
CONSTRAINT chk_hotel_id_range CHECK (hotel_id >= 0 AND hotel_id <= 32767),
CONSTRAINT chk_room_id_range CHECK (room_id >= 0),
CONSTRAINT chk_power_state_range CHECK (power_state >= 0 AND power_state <= 32767),
CONSTRAINT chk_guest_type_range CHECK (guest_type >= 0 AND guest_type <= 32767),
CONSTRAINT chk_cardless_state_range CHECK (cardless_state >= 0 AND cardless_state <= 32767),
CONSTRAINT chk_pms_state_range CHECK (pms_state >= 0 AND pms_state <= 32767),
CONSTRAINT chk_carbon_state_range CHECK (carbon_state >= 0 AND carbon_state <= 32767),
CONSTRAINT chk_device_count_range CHECK (device_count >= 0 AND device_count <= 32767),
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0 AND comm_seq <= 32767)
)
PARTITION BY RANGE (ts_ms);
-- 指定索引
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_hotel_id ON heartbeat.heartbeat_events (hotel_id);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_power_state ON heartbeat.heartbeat_events (power_state);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_guest_type ON heartbeat.heartbeat_events (guest_type);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_device_id ON heartbeat.heartbeat_events (device_id);
-- 需求指定service_mask 使用 BRIN
-- 说明BRIN 对“随时间递增且有相关性”的列收益更大service_mask 若不具备相关性,收益可能有限。
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_service_mask_brin ON heartbeat.heartbeat_events USING BRIN (service_mask);
-- 高价值附加索引(不在需求强制列表内):常见查询是 hotel_id + 时间范围
-- 若不希望额外索引,可注释掉
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_hotel_ts ON heartbeat.heartbeat_events (hotel_id, ts_ms);
COMMIT;

View File

@@ -0,0 +1,91 @@
-- 020_partitioning_auto_daily.sql
-- 在 heartbeat 数据库内执行
-- 目标:按 ts_msepoch ms日分区 + 自动创建分区(预创建/定时任务方式)
BEGIN;
-- 清理旧方案遗留的 DEFAULT 分区(若存在)
-- 说明:当前方案采用“预创建分区”,不使用 DEFAULT 分区兜底,避免数据落入 default 后影响按天管理。
DO $$
BEGIN
IF to_regclass('heartbeat.heartbeat_events_default') IS NOT NULL THEN
EXECUTE 'DROP TABLE heartbeat.heartbeat_events_default';
END IF;
END $$;
-- 将 date按 Asia/Shanghai 00:00转换为 epoch ms
CREATE OR REPLACE FUNCTION heartbeat.day_start_ms_shanghai(p_day date)
RETURNS bigint
LANGUAGE sql
IMMUTABLE
AS $$
SELECT (
EXTRACT(EPOCH FROM (p_day::timestamp AT TIME ZONE 'Asia/Shanghai'))
* 1000
)::bigint;
$$;
CREATE OR REPLACE FUNCTION heartbeat.partition_name_for_day(p_day date)
RETURNS text
LANGUAGE sql
IMMUTABLE
AS $$
SELECT format('heartbeat_events_%s', to_char(p_day, 'YYYYMMDD'));
$$;
-- 创建单日分区(若不存在)并创建该分区上的索引
CREATE OR REPLACE FUNCTION heartbeat.create_daily_partition(p_day date)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
start_ms bigint;
end_ms bigint;
part_name text;
BEGIN
start_ms := heartbeat.day_start_ms_shanghai(p_day);
end_ms := start_ms + 86400000;
part_name := heartbeat.partition_name_for_day(p_day);
IF to_regclass(format('heartbeat.%I', part_name)) IS NOT NULL THEN
RETURN;
END IF;
EXECUTE format(
'CREATE TABLE heartbeat.%I PARTITION OF heartbeat.heartbeat_events FOR VALUES FROM (%s) TO (%s);',
part_name, start_ms, end_ms
);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I (hotel_id);', 'idx_'||part_name||'_hotel_id', part_name);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I (power_state);', 'idx_'||part_name||'_power_state', part_name);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I (guest_type);', 'idx_'||part_name||'_guest_type', part_name);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I (device_id);', 'idx_'||part_name||'_device_id', part_name);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I USING BRIN (service_mask);', 'idx_'||part_name||'_service_mask_brin', part_name);
EXECUTE format('CREATE INDEX IF NOT EXISTS %I ON heartbeat.%I (hotel_id, ts_ms);', 'idx_'||part_name||'_hotel_ts', part_name);
END;
$$;
-- 确保日期范围内的分区都存在(含首尾)
CREATE OR REPLACE FUNCTION heartbeat.ensure_partitions(p_start_day date, p_end_day date)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
d date;
BEGIN
IF p_end_day < p_start_day THEN
RAISE EXCEPTION 'p_end_day (%) must be >= p_start_day (%)', p_end_day, p_start_day;
END IF;
d := p_start_day;
WHILE d <= p_end_day LOOP
PERFORM heartbeat.create_daily_partition(d);
d := d + 1;
END LOOP;
END;
$$;
-- 安装时预创建:昨天到未来 7 天
SELECT heartbeat.ensure_partitions(current_date - 1, current_date + 7);
COMMIT;

107
scripts/db/apply.js Normal file
View File

@@ -0,0 +1,107 @@
import fs from 'node:fs/promises';
import path from 'node:path';
import { fileURLToPath } from 'node:url';
import { Client } from 'pg';
import config from '../../src/config/config.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
function getEnv(name, fallback) {
return process.env[name] ?? fallback;
}
function buildClientConfig(database) {
const db = config.db;
return {
host: getEnv('PGHOST', db.host),
port: Number(getEnv('PGPORT', db.port)),
user: getEnv('PGUSER', db.user),
password: getEnv('PGPASSWORD', db.password),
database,
};
}
async function runSqlFile(client, filePath) {
const sql = await fs.readFile(filePath, 'utf8');
const trimmed = sql.trim();
if (!trimmed) return;
await client.query(sql);
}
async function main() {
const scriptsDir = __dirname;
const schemaFile = path.join(scriptsDir, '010_heartbeat_schema.sql');
const partitionFile = path.join(scriptsDir, '020_partitioning_auto_daily.sql');
const targetDb = getEnv('PGTARGETDB', config.db.database);
console.log(`[db] Connecting to target db: ${targetDb}`);
const targetClient = new Client(buildClientConfig(targetDb));
await targetClient.connect();
try {
const dbMeta = await targetClient.query(
`SELECT
current_database() AS db,
pg_encoding_to_char(encoding) AS encoding,
datcollate,
datctype,
datlocprovider
FROM pg_database
WHERE datname = current_database()`
);
if (dbMeta.rowCount === 1) {
const m = dbMeta.rows[0];
console.log(
`[db] ${m.db} meta: encoding=${m.encoding} collate=${m.datcollate} ctype=${m.datctype} provider=${m.datlocprovider}`
);
if (String(m.encoding).toUpperCase() !== 'UTF8') {
console.warn(`[db] WARN: ${m.db} encoding is not UTF8`);
}
const coll = String(m.datcollate ?? '').toLowerCase();
if (coll && !coll.includes('zh') && !coll.includes('chinese')) {
console.warn(
`[db] WARN: ${m.db} collation is not obviously Chinese; if required, use ICU collation per-column or rebuild DB with zh locale`
);
}
}
console.log(`[db] Applying: ${path.basename(schemaFile)}`);
await runSqlFile(targetClient, schemaFile);
console.log(`[db] Applying: ${path.basename(partitionFile)}`);
await runSqlFile(targetClient, partitionFile);
const tableCheck = await targetClient.query(
"SELECT to_regclass('heartbeat.heartbeat_events') AS reg"
);
if (!tableCheck.rows?.[0]?.reg) {
throw new Error('heartbeat.heartbeat_events was not created');
}
const indexCheck = await targetClient.query(
`SELECT indexname
FROM pg_indexes
WHERE schemaname = 'heartbeat'
AND tablename = 'heartbeat_events'
ORDER BY indexname`
);
console.log('[db] Parent table indexes:');
for (const row of indexCheck.rows) {
console.log(` - ${row.indexname}`);
}
console.log('[db] Done');
} finally {
await targetClient.end();
}
}
main().catch((err) => {
console.error('[db] Failed:', err);
process.exit(1);
});

View File

@@ -0,0 +1,15 @@
import config from '../../src/config/config.js';
import { DatabaseManager } from '../../src/db/databaseManager.js';
async function main() {
const db = new DatabaseManager(config.db);
await db.connect();
console.log('runtime smoke: connected');
await db.disconnect();
console.log('runtime smoke: disconnected');
}
main().catch((err) => {
console.error('runtime smoke failed:', err);
process.exit(1);
});

67
scripts/db/smokeTest.js Normal file
View File

@@ -0,0 +1,67 @@
import { Client } from 'pg';
import config from '../../src/config/config.js';
async function main() {
const client = new Client({
host: config.db.host,
port: config.db.port,
user: config.db.user,
password: config.db.password,
database: config.db.database,
});
await client.connect();
// 预创建今日分区,避免“无分区时 INSERT 直接失败”
await client.query('SELECT heartbeat.ensure_partitions(current_date, current_date)');
const ts = Date.now();
await client.query(
`INSERT INTO heartbeat.heartbeat_events (
ts_ms, hotel_id, room_id, device_id, ip,
power_state, guest_type, cardless_state, service_mask,
pms_state, carbon_state, device_count, comm_seq, extra
) VALUES ($1,$2,$3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14)`,
[
ts,
1,
101,
'dev-1',
'192.168.0.1',
1,
0,
0,
5,
0,
0,
1,
1,
{ source: 'smoke-test' },
]
);
const partitions = await client.query(
`SELECT c.relname AS partition
FROM pg_inherits i
JOIN pg_class c ON c.oid = i.inhrelid
JOIN pg_class p ON p.oid = i.inhparent
JOIN pg_namespace n ON n.oid = p.relnamespace
WHERE n.nspname = 'heartbeat'
AND p.relname = 'heartbeat_events'
ORDER BY c.relname`
);
const cnt = await client.query(
'SELECT count(*)::int AS n FROM heartbeat.heartbeat_events'
);
console.log('partitions:', partitions.rows.map((r) => r.partition));
console.log('rows:', cnt.rows[0].n);
await client.end();
}
main().catch((err) => {
console.error('smoke test failed:', err);
process.exit(1);
});