feat: 移除运行时代码中的数据库初始化与分区维护逻辑

- 新增备份 SQL 脚本 `01_init_schema.sql` 和 `02_create_partitions.sql`,用于数据库结构初始化和分区预创建。
- 新增 Node.js 脚本 `run_init.js` 和 `run_ensure_partitions.js`,支持通过外部程序调用进行数据库初始化和分区维护。
- 确保数据库初始化脚本支持幂等重复执行。
- 更新文档,说明新的执行顺序和使用方法。
- 移除运行时相关的数据库初始化和分区维护配置,简化服务职责。
- 保留写入失败时的缺分区兜底逻辑,确保服务稳定性。
This commit is contained in:
2026-03-04 11:47:22 +08:00
parent b4967f4c35
commit 3b98c6239b
11 changed files with 732 additions and 390 deletions

View File

@@ -0,0 +1,297 @@
-- =============================================================
-- 01_init_schema.sql
-- 目标数据库log_platform或你实际使用的库
-- 执行方式:
-- psql -h <host> -p <port> -U <user> -d <database> -f 01_init_schema.sql
-- 或通过 run_init.js 自动执行
-- 幂等安全:全部使用 IF NOT EXISTS / OR REPLACE可重复执行
-- =============================================================
BEGIN;
-- ----------------------------------------------------------
-- 1. 扩展 & Schema
-- ----------------------------------------------------------
CREATE EXTENSION IF NOT EXISTS pgcrypto;
CREATE SCHEMA IF NOT EXISTS heartbeat;
-- ----------------------------------------------------------
-- 2. 主表heartbeat.heartbeat_eventsRANGE 分区,按 ts_ms
-- ----------------------------------------------------------
CREATE TABLE IF NOT EXISTS heartbeat.heartbeat_events (
guid varchar(32) NOT NULL DEFAULT replace(gen_random_uuid()::text, '-', ''),
ts_ms bigint NOT NULL,
write_ts_ms bigint DEFAULT (EXTRACT(EPOCH FROM clock_timestamp()) * 1000)::bigint,
hotel_id int2 NOT NULL,
room_id varchar(50) NOT NULL,
device_id varchar(64) NOT NULL,
ip varchar(21) NOT NULL,
power_state int2 NOT NULL,
guest_type int2 NOT NULL,
cardless_state int2 NOT NULL,
service_mask bigint NOT NULL,
pms_state int2 NOT NULL,
carbon_state int2 NOT NULL,
device_count int2 NOT NULL,
comm_seq int4 NOT NULL,
insert_card int2,
bright_g int2,
version int4,
elec_address text[],
air_address text[],
voltage double precision[],
ampere double precision[],
power double precision[],
phase text[],
energy double precision[],
sum_energy double precision[],
state int2[],
model int2[],
speed int2[],
set_temp int2[],
now_temp int2[],
solenoid_valve int2[],
extra jsonb,
CONSTRAINT heartbeat_events_pk PRIMARY KEY (ts_ms, guid),
CONSTRAINT chk_guid_32_hex CHECK (guid ~ '^[0-9a-f]{32}$'),
CONSTRAINT chk_ts_ms_positive CHECK (ts_ms > 0),
CONSTRAINT chk_hotel_id_range CHECK (hotel_id >= 0 AND hotel_id <= 32767),
CONSTRAINT chk_room_id_len CHECK (char_length(room_id) > 0 AND char_length(room_id) <= 50),
CONSTRAINT chk_power_state_range CHECK (power_state >= 0 AND power_state <= 32767),
CONSTRAINT chk_guest_type_range CHECK (guest_type >= 0 AND guest_type <= 32767),
CONSTRAINT chk_cardless_state_range CHECK (cardless_state >= 0 AND cardless_state <= 32767),
CONSTRAINT chk_pms_state_range CHECK (pms_state >= 0 AND pms_state <= 32767),
CONSTRAINT chk_carbon_state_range CHECK (carbon_state >= 0 AND carbon_state <= 32767),
CONSTRAINT chk_device_count_range CHECK (device_count >= 0 AND device_count <= 32767),
CONSTRAINT chk_comm_seq_range CHECK (comm_seq >= 0)
)
PARTITION BY RANGE (ts_ms);
-- 补列(迁移安全,已有列时静默跳过)
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS write_ts_ms bigint;
ALTER TABLE heartbeat.heartbeat_events ALTER COLUMN write_ts_ms SET DEFAULT (EXTRACT(EPOCH FROM clock_timestamp()) * 1000)::bigint;
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS elec_address text[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS air_address text[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS voltage double precision[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS ampere double precision[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS power double precision[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS phase text[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS energy double precision[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS sum_energy double precision[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS state int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS model int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS speed int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS set_temp int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS now_temp int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS solenoid_valve int2[];
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS insert_card int2;
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS bright_g int2;
ALTER TABLE heartbeat.heartbeat_events ADD COLUMN IF NOT EXISTS version int4;
-- ip / room_id 类型迁移inet -> varchar仅在旧类型存在时执行
DO $$
BEGIN
IF EXISTS (
SELECT 1 FROM pg_attribute a
JOIN pg_class c ON c.oid = a.attrelid
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'heartbeat' AND c.relname = 'heartbeat_events'
AND a.attname = 'ip' AND format_type(a.atttypid, a.atttypmod) = 'inet'
) THEN
ALTER TABLE heartbeat.heartbeat_events ALTER COLUMN ip TYPE varchar(21) USING ip::text;
END IF;
IF EXISTS (
SELECT 1 FROM pg_attribute a
JOIN pg_class c ON c.oid = a.attrelid
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = 'heartbeat' AND c.relname = 'heartbeat_events'
AND a.attname = 'room_id'
AND format_type(a.atttypid, a.atttypmod) NOT LIKE 'character varying%'
) THEN
ALTER TABLE heartbeat.heartbeat_events DROP CONSTRAINT IF EXISTS chk_room_id_range;
ALTER TABLE heartbeat.heartbeat_events DROP CONSTRAINT IF EXISTS chk_room_id_len;
ALTER TABLE heartbeat.heartbeat_events ALTER COLUMN room_id TYPE varchar(50) USING room_id::text;
ALTER TABLE heartbeat.heartbeat_events ADD CONSTRAINT chk_room_id_len
CHECK (char_length(room_id) > 0 AND char_length(room_id) <= 50);
END IF;
END $$;
-- ----------------------------------------------------------
-- 3. 父表索引(自动继承到每个子分区)
-- ----------------------------------------------------------
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_hotel_id
ON heartbeat.heartbeat_events (hotel_id);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_power_state
ON heartbeat.heartbeat_events (power_state);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_guest_type
ON heartbeat.heartbeat_events (guest_type);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_device_id
ON heartbeat.heartbeat_events (device_id);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_service_mask_brin
ON heartbeat.heartbeat_events USING BRIN (service_mask);
CREATE INDEX IF NOT EXISTS idx_service_mask_first_bit
ON heartbeat.heartbeat_events ((service_mask & 1));
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_hotel_ts
ON heartbeat.heartbeat_events (hotel_id, ts_ms);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_elec_address_gin
ON heartbeat.heartbeat_events USING GIN (elec_address);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_air_address_gin
ON heartbeat.heartbeat_events USING GIN (air_address);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_state_gin
ON heartbeat.heartbeat_events USING GIN (state);
CREATE INDEX IF NOT EXISTS idx_heartbeat_events_model_gin
ON heartbeat.heartbeat_events USING GIN (model);
-- ----------------------------------------------------------
-- 4. 分区辅助函数
-- ----------------------------------------------------------
-- 将 date 转换为上海时区当日 00:00 的 epoch ms
CREATE OR REPLACE FUNCTION heartbeat.day_start_ms_shanghai(p_day date)
RETURNS bigint
LANGUAGE sql IMMUTABLE
AS $$
SELECT (
EXTRACT(EPOCH FROM (p_day::timestamp AT TIME ZONE 'Asia/Shanghai')) * 1000
)::bigint;
$$;
-- 生成分区表名,如 heartbeat_events_20260304
CREATE OR REPLACE FUNCTION heartbeat.partition_name_for_day(p_day date)
RETURNS text
LANGUAGE sql IMMUTABLE
AS $$
SELECT format('heartbeat_events_%s', to_char(p_day, 'YYYYMMDD'));
$$;
-- 强制将分区及其全部索引、TOAST 表和 TOAST 索引迁移到指定表空间(幂等)
CREATE OR REPLACE FUNCTION heartbeat.relocate_partition_to_tablespace(
p_schema text,
p_partition text,
p_tablespace text DEFAULT 'ts_hot'
)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
v_part_oid oid;
v_toast_oid oid;
r record;
BEGIN
SELECT c.oid INTO v_part_oid
FROM pg_class c
JOIN pg_namespace n ON n.oid = c.relnamespace
WHERE n.nspname = p_schema
AND c.relname = p_partition
AND c.relkind = 'r';
IF v_part_oid IS NULL THEN
RAISE EXCEPTION 'partition %.% not found', p_schema, p_partition;
END IF;
-- 1) 分区表 -> 指定 tablespace
EXECUTE format('ALTER TABLE %I.%I SET TABLESPACE %I', p_schema, p_partition, p_tablespace);
-- 2) 分区全部索引 -> 指定 tablespace
FOR r IN
SELECT idxn.nspname AS index_schema, i.relname AS index_name
FROM pg_index x
JOIN pg_class i ON i.oid = x.indexrelid
JOIN pg_namespace idxn ON idxn.oid = i.relnamespace
LEFT JOIN pg_tablespace ts ON ts.oid = i.reltablespace
WHERE x.indrelid = v_part_oid
AND COALESCE(ts.spcname, 'pg_default') <> p_tablespace
LOOP
EXECUTE format('ALTER INDEX %I.%I SET TABLESPACE %I', r.index_schema, r.index_name, p_tablespace);
END LOOP;
-- 3) TOAST 表 + TOAST 全部索引 -> 指定 tablespace若存在
SELECT reltoastrelid INTO v_toast_oid FROM pg_class WHERE oid = v_part_oid;
IF v_toast_oid IS NOT NULL AND v_toast_oid <> 0 THEN
EXECUTE format('ALTER TABLE %s SET TABLESPACE %I', v_toast_oid::regclass, p_tablespace);
FOR r IN
SELECT idxn.nspname AS index_schema, i.relname AS index_name
FROM pg_index x
JOIN pg_class i ON i.oid = x.indexrelid
JOIN pg_namespace idxn ON idxn.oid = i.relnamespace
LEFT JOIN pg_tablespace ts ON ts.oid = i.reltablespace
WHERE x.indrelid = v_toast_oid
AND COALESCE(ts.spcname, 'pg_default') <> p_tablespace
LOOP
EXECUTE format('ALTER INDEX %I.%I SET TABLESPACE %I', r.index_schema, r.index_name, p_tablespace);
END LOOP;
END IF;
-- 4) 更新统计信息
EXECUTE format('ANALYZE %I.%I', p_schema, p_partition);
END;
$$;
-- 创建单日分区(幂等)并将其置于 ts_hot 表空间
CREATE OR REPLACE FUNCTION heartbeat.create_daily_partition(p_day date)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
start_ms bigint;
end_ms bigint;
part_name text;
BEGIN
start_ms := heartbeat.day_start_ms_shanghai(p_day);
end_ms := start_ms + 86400000;
part_name := heartbeat.partition_name_for_day(p_day);
EXECUTE format(
'CREATE TABLE IF NOT EXISTS heartbeat.%I PARTITION OF heartbeat.heartbeat_events FOR VALUES FROM (%s) TO (%s)',
part_name, start_ms, end_ms
);
PERFORM heartbeat.relocate_partition_to_tablespace('heartbeat', part_name, 'ts_hot');
END;
$$;
-- 确保 [p_start_day, p_end_day] 范围内每天的分区都存在(含首尾,幂等)
CREATE OR REPLACE FUNCTION heartbeat.ensure_partitions(p_start_day date, p_end_day date)
RETURNS void
LANGUAGE plpgsql
AS $$
DECLARE
d date;
BEGIN
IF p_end_day < p_start_day THEN
RAISE EXCEPTION 'p_end_day (%) must be >= p_start_day (%)', p_end_day, p_start_day;
END IF;
FOR d IN
SELECT generate_series(p_start_day, p_end_day, interval '1 day')::date
LOOP
PERFORM heartbeat.create_daily_partition(d);
END LOOP;
END;
$$;
-- ----------------------------------------------------------
-- 5. 清理旧兼容DEFAULT 分区)
-- ----------------------------------------------------------
DO $$
BEGIN
IF to_regclass('heartbeat.heartbeat_events_default') IS NOT NULL THEN
EXECUTE 'DROP TABLE heartbeat.heartbeat_events_default';
END IF;
END $$;
COMMIT;

View File

@@ -0,0 +1,15 @@
-- =============================================================
-- 02_create_partitions.sql
-- 目标:预创建分区(昨天 + 未来 N 天)
-- 前提:已执行 01_init_schema.sql函数 ensure_partitions 已存在)
-- 执行方式:
-- psql -h <host> -p <port> -U <user> -d <database> -f 02_create_partitions.sql
-- 或通过 run_ensure_partitions.js 自动执行(支持自定义范围参数)
-- =============================================================
-- 默认预创建:昨天 到 未来 30 天(按上海时区自然日)
-- 调整 -1 / 30 可控制范围,负数=过去天数,正数=未来天数
SELECT heartbeat.ensure_partitions(
((now() AT TIME ZONE 'Asia/Shanghai')::date) - 1,
((now() AT TIME ZONE 'Asia/Shanghai')::date) + 30
);

97
SQL_Script/README.md Normal file
View File

@@ -0,0 +1,97 @@
# SQL_Script
数据库初始化与分区维护脚本。与主服务完全独立可被任何工具psql / Node.js / cron / CI调用。
## 文件说明
| 文件 | 说明 |
|------|------|
| `01_init_schema.sql` | 建库Extension + Schema + 主表 + 索引 + 分区辅助函数(幂等,可重复执行) |
| `02_create_partitions.sql` | 预创建分区(昨天 ~ 未来 30 天),直接用 psql 执行 |
| `run_init.js` | Node.js 脚本:执行 `01_init_schema.sql` |
| `run_ensure_partitions.js` | Node.js 脚本:调用 `ensure_partitions()` 预创建指定日期范围分区 |
---
## 执行顺序(首次部署)
```bash
# 第一步:初始化 Schema建表 + 函数)
node SQL_Script/run_init.js
# 第二步:预创建分区(默认昨天 ~ 未来 30 天)
node SQL_Script/run_ensure_partitions.js
```
之后每天由外部 cron / 调度任务调用 `run_ensure_partitions.js` 即可。
---
## run_init.js 用法
```bash
# 最简方式(从 .env 读取连接参数)
node SQL_Script/run_init.js
# 指定连接参数
node SQL_Script/run_init.js \
--host=10.8.8.109 \
--port=5433 \
--user=log_admin \
--password=yourpassword \
--database=log_platform
```
---
## run_ensure_partitions.js 用法
```bash
# 默认:昨天 到 未来 30 天
node SQL_Script/run_ensure_partitions.js
# 按 offset相对今天的天数偏移
node SQL_Script/run_ensure_partitions.js --start-offset=-1 --end-offset=60
# 按具体日期
node SQL_Script/run_ensure_partitions.js --start-date=2026-03-01 --end-date=2026-06-30
```
---
## psql 直接执行
```bash
# Schema 初始化
psql -h 10.8.8.109 -p 5433 -U log_admin -d log_platform -f SQL_Script/01_init_schema.sql
# 预创建分区(默认昨天 ~ 未来 30 天)
psql -h 10.8.8.109 -p 5433 -U log_admin -d log_platform -f SQL_Script/02_create_partitions.sql
```
---
## 连接参数优先级
```
命令行参数 > 环境变量 > 根目录 .env 文件 > 默认值
```
支持的环境变量:
| 环境变量 | 说明 |
|----------|------|
| `POSTGRES_HOST` | 主机,默认 `127.0.0.1` |
| `POSTGRES_PORT` | 端口,默认 `5432` |
| `POSTGRES_USER` | 用户名,默认 `postgres` |
| `POSTGRES_PASSWORD` | 密码 |
| `POSTGRES_DATABASE` | 数据库名,默认 `log_platform` |
---
## 定时分区维护cron 示例)
```cron
# 每天凌晨 01:00 自动创建未来 30 天分区
0 1 * * * cd /path/to/Web_BLS_Heartbeat_Server && node SQL_Script/run_ensure_partitions.js >> /var/log/partition_cron.log 2>&1
```

View File

@@ -0,0 +1,138 @@
#!/usr/bin/env node
/**
* run_ensure_partitions.js
* 调用 heartbeat.ensure_partitions() 预创建指定日期范围的分区
*
* 用法:
* # 默认:昨天 到 未来 30 天
* node SQL_Script/run_ensure_partitions.js
*
* # 按 offset相对今天的天数偏移
* node SQL_Script/run_ensure_partitions.js --start-offset=-1 --end-offset=60
*
* # 按具体日期YYYY-MM-DD
* node SQL_Script/run_ensure_partitions.js --start-date=2026-03-01 --end-date=2026-06-30
*
* 连接参数优先级:命令行参数 > 环境变量 > .env 文件
*
* 命令行参数(均可选):
* --host=<host>
* --port=<port>
* --user=<user>
* --password=<password>
* --database=<database>
* --start-offset=<n> 相对今天的起始天数偏移,负数=过去,默认 -1
* --end-offset=<n> 相对今天的结束天数偏移,正数=未来,默认 30
* --start-date=<YYYY-MM-DD> 直接指定起始日期(优先于 start-offset
* --end-date=<YYYY-MM-DD> 直接指定结束日期(优先于 end-offset
*/
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { Client } from 'pg';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// ----------------------------------------------------------
// 1. 加载 .env
// ----------------------------------------------------------
function loadEnv() {
const candidates = [
path.resolve(process.cwd(), '.env'),
path.resolve(__dirname, '../.env'),
];
for (const envPath of candidates) {
if (!fs.existsSync(envPath)) continue;
const lines = fs.readFileSync(envPath, 'utf8').split(/\r?\n/);
for (const line of lines) {
const t = line.trim();
if (!t || t.startsWith('#')) continue;
const idx = t.indexOf('=');
if (idx <= 0) continue;
const key = t.slice(0, idx).trim();
let val = t.slice(idx + 1).trim();
if ((val.startsWith('"') && val.endsWith('"')) || (val.startsWith("'") && val.endsWith("'"))) {
val = val.slice(1, -1);
}
if (process.env[key] === undefined) process.env[key] = val;
}
console.log(`[env] 已加载 ${envPath}`);
return;
}
}
// ----------------------------------------------------------
// 2. 解析命令行参数
// ----------------------------------------------------------
function parseArgs() {
const args = {};
for (const arg of process.argv.slice(2)) {
const m = arg.match(/^--([^=]+)=(.*)$/);
if (m) args[m[1]] = m[2];
}
return args;
}
// 将 offset 天数换算为上海时区 YYYY-MM-DD 日期字符串
function shanghaiDateWithOffset(offsetDays) {
const now = new Date();
const shanghaiStr = now.toLocaleDateString('en-CA', { timeZone: 'Asia/Shanghai' }); // YYYY-MM-DD
const base = new Date(`${shanghaiStr}T00:00:00+08:00`);
base.setDate(base.getDate() + offsetDays);
return base.toLocaleDateString('en-CA', { timeZone: 'Asia/Shanghai' });
}
// ----------------------------------------------------------
// 3. 主流程
// ----------------------------------------------------------
async function main() {
loadEnv();
const args = parseArgs();
const env = process.env;
const config = {
host: args.host ?? env.POSTGRES_HOST ?? env.PGHOST ?? '127.0.0.1',
port: Number(args.port ?? env.POSTGRES_PORT ?? env.PGPORT ?? 5432),
user: args.user ?? env.POSTGRES_USER ?? env.PGUSER ?? 'postgres',
password: args.password ?? env.POSTGRES_PASSWORD ?? env.PGPASSWORD ?? '',
database: args.database ?? env.POSTGRES_DATABASE ?? env.PGTARGETDB ?? 'log_platform',
};
// 计算分区范围
const startDate = args['start-date']
? args['start-date']
: shanghaiDateWithOffset(Number(args['start-offset'] ?? -1));
const endDate = args['end-date']
? args['end-date']
: shanghaiDateWithOffset(Number(args['end-offset'] ?? 30));
console.log('[partition] 连接数据库:', {
host: config.host,
port: config.port,
user: config.user,
database: config.database,
});
console.log(`[partition] 预创建分区范围: ${startDate} ~ ${endDate}`);
const client = new Client(config);
try {
await client.connect();
console.log('[partition] 数据库连接成功,开始预创建分区...');
await client.query(
'SELECT heartbeat.ensure_partitions($1::date, $2::date)',
[startDate, endDate]
);
console.log(`[partition] ✅ 分区预创建完成 (${startDate} ~ ${endDate})`);
} catch (err) {
console.error('[partition] ❌ 分区预创建失败:', err.message);
process.exit(1);
} finally {
await client.end();
}
}
main();

112
SQL_Script/run_init.js Normal file
View File

@@ -0,0 +1,112 @@
#!/usr/bin/env node
/**
* run_init.js
* 执行数据库 Schema 初始化01_init_schema.sql
*
* 用法:
* node SQL_Script/run_init.js
*
* 连接参数优先级:命令行参数 > 环境变量 > .env 文件
*
* 命令行参数(均可选):
* --host=<host> PostgreSQL 主机,默认 POSTGRES_HOST 或 127.0.0.1
* --port=<port> 端口,默认 POSTGRES_PORT 或 5432
* --user=<user> 用户,默认 POSTGRES_USER 或 postgres
* --password=<password> 密码,默认 POSTGRES_PASSWORD
* --database=<database> 数据库名,默认 POSTGRES_DATABASE 或 log_platform
*/
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { Client } from 'pg';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
// ----------------------------------------------------------
// 1. 加载 .env
// ----------------------------------------------------------
function loadEnv() {
const candidates = [
path.resolve(process.cwd(), '.env'),
path.resolve(__dirname, '../.env'),
];
for (const envPath of candidates) {
if (!fs.existsSync(envPath)) continue;
const lines = fs.readFileSync(envPath, 'utf8').split(/\r?\n/);
for (const line of lines) {
const t = line.trim();
if (!t || t.startsWith('#')) continue;
const idx = t.indexOf('=');
if (idx <= 0) continue;
const key = t.slice(0, idx).trim();
let val = t.slice(idx + 1).trim();
if ((val.startsWith('"') && val.endsWith('"')) || (val.startsWith("'") && val.endsWith("'"))) {
val = val.slice(1, -1);
}
if (process.env[key] === undefined) process.env[key] = val;
}
console.log(`[env] 已加载 ${envPath}`);
return;
}
}
// ----------------------------------------------------------
// 2. 解析命令行参数
// ----------------------------------------------------------
function parseArgs() {
const args = {};
for (const arg of process.argv.slice(2)) {
const m = arg.match(/^--([^=]+)=(.*)$/);
if (m) args[m[1]] = m[2];
}
return args;
}
// ----------------------------------------------------------
// 3. 主流程
// ----------------------------------------------------------
async function main() {
loadEnv();
const args = parseArgs();
const env = process.env;
const config = {
host: args.host ?? env.POSTGRES_HOST ?? env.PGHOST ?? '127.0.0.1',
port: Number(args.port ?? env.POSTGRES_PORT ?? env.PGPORT ?? 5432),
user: args.user ?? env.POSTGRES_USER ?? env.PGUSER ?? 'postgres',
password: args.password ?? env.POSTGRES_PASSWORD ?? env.PGPASSWORD ?? '',
database: args.database ?? env.POSTGRES_DATABASE ?? env.PGTARGETDB ?? 'log_platform',
};
console.log('[init] 连接数据库:', {
host: config.host,
port: config.port,
user: config.user,
database: config.database,
});
const sqlPath = path.resolve(__dirname, '01_init_schema.sql');
if (!fs.existsSync(sqlPath)) {
console.error(`[init] 找不到 SQL 文件: ${sqlPath}`);
process.exit(1);
}
const sql = fs.readFileSync(sqlPath, 'utf8');
const client = new Client(config);
try {
await client.connect();
console.log('[init] 数据库连接成功,开始执行初始化 SQL...');
await client.query(sql);
console.log('[init] ✅ Schema 初始化完成');
} catch (err) {
console.error('[init] ❌ 初始化失败:', err.message);
process.exit(1);
} finally {
await client.end();
}
}
main();