From 1eccc2e3aaf17879a321d851e6a795ce01c44922 Mon Sep 17 00:00:00 2001 From: XuJiacheng Date: Mon, 2 Mar 2026 11:49:02 +0800 Subject: [PATCH] =?UTF-8?q?refactor:=20=E9=87=8D=E6=9E=84=E5=88=86?= =?UTF-8?q?=E5=8C=BA=E7=B4=A2=E5=BC=95=E7=AD=96=E7=95=A5=EF=BC=8C=E7=A7=BB?= =?UTF-8?q?=E9=99=A4=E6=98=BE=E5=BC=8F=E5=88=9B=E5=BB=BA=E7=B4=A2=E5=BC=95?= =?UTF-8?q?=E7=9A=84=E6=96=B9=E6=B3=95=E5=B9=B6=E6=9B=B4=E6=96=B0=E7=9B=B8?= =?UTF-8?q?=E5=85=B3=E6=B5=81=E7=A8=8B?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- bls-onoffline-backend/dist/index.js | 66 +---------------- .../proposal.md | 11 +++ .../specs/onoffline/spec.md | 11 +++ .../tasks.md | 6 ++ bls-onoffline-backend/src/db/initializer.js | 1 - .../src/db/partitionManager.js | 73 ------------------- bls-onoffline-backend/src/index.js | 4 +- 7 files changed, 32 insertions(+), 140 deletions(-) create mode 100644 bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/proposal.md create mode 100644 bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/specs/onoffline/spec.md create mode 100644 bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/tasks.md diff --git a/bls-onoffline-backend/dist/index.js b/bls-onoffline-backend/dist/index.js index 0911b58..ab89c16 100644 --- a/bls-onoffline-backend/dist/index.js +++ b/bls-onoffline-backend/dist/index.js @@ -193,65 +193,6 @@ class PartitionManager { const endMs = end.getTime(); return { startMs, endMs, partitionSuffix }; } - async ensurePartitionIndexes(client, schema, table, partitionSuffix) { - const startedAt = Date.now(); - const partitionName = `${schema}.${table}_${partitionSuffix}`; - const indexBase = `${table}_${partitionSuffix}`; - const indexSpecs = [ - { name: `idx_${indexBase}_ts`, column: "ts_ms" }, - { name: `idx_${indexBase}_hid`, column: "hotel_id" }, - { name: `idx_${indexBase}_mac`, column: "mac" }, - { name: `idx_${indexBase}_did`, column: "device_id" }, - { name: `idx_${indexBase}_rid`, column: "room_id" }, - { name: `idx_${indexBase}_cs`, column: "current_status" } - ]; - for (const spec of indexSpecs) { - await client.query(`CREATE INDEX IF NOT EXISTS ${spec.name} ON ${partitionName} (${spec.column});`); - } - await client.query(`ANALYZE ${partitionName};`); - const elapsedMs = Date.now() - startedAt; - if (elapsedMs > 1e3) { - logger.warn(`Partition index ensure slow`, { partitionName, elapsedMs }); - } - } - async ensureIndexesForExistingPartitions() { - const startedAt = Date.now(); - const client = await dbManager.pool.connect(); - try { - const schema = config.db.schema; - const table = config.db.table; - const res = await client.query( - ` - SELECT c.relname AS relname - FROM pg_inherits i - JOIN pg_class p ON i.inhparent = p.oid - JOIN pg_namespace pn ON pn.oid = p.relnamespace - JOIN pg_class c ON i.inhrelid = c.oid - WHERE pn.nspname = $1 AND p.relname = $2 - ORDER BY c.relname; - `, - [schema, table] - ); - const suffixes = /* @__PURE__ */ new Set(); - const pattern = new RegExp(`^${table}_(\\d{8})$`); - for (const row of res.rows) { - const relname = row?.relname; - if (typeof relname !== "string") continue; - const match = relname.match(pattern); - if (!match) continue; - suffixes.add(match[1]); - } - for (const suffix of suffixes) { - await this.ensurePartitionIndexes(client, schema, table, suffix); - } - const elapsedMs = Date.now() - startedAt; - if (elapsedMs > 5e3) { - logger.warn("Ensure existing partition indexes slow", { schema, table, partitions: suffixes.size, elapsedMs }); - } - } finally { - client.release(); - } - } /** * Ensure partitions exist for the past M days and next N days. * @param {number} daysAhead - Number of days to pre-create. @@ -284,7 +225,6 @@ class PartitionManager { `; await client.query(createSql); } - await this.ensurePartitionIndexes(client, schema, table, partitionSuffix); } logger.info("Partition check completed."); } catch (err) { @@ -329,7 +269,6 @@ class PartitionManager { FOR VALUES FROM (${startMs}) TO (${endMs}); `); } - await this.ensurePartitionIndexes(client, schema, table, partitionSuffix); } } finally { client.release(); @@ -345,7 +284,6 @@ class DatabaseInitializer { await this.ensureDatabaseExists(); await this.ensureSchemaAndTable(); await partitionManager.ensurePartitions(30); - await partitionManager.ensureIndexesForExistingPartitions(); console.log("Database initialization completed successfully."); logger.info("Database initialization completed successfully."); } @@ -875,9 +813,9 @@ const bootstrap = async () => { const metrics = metricCollector.getAndReset(); const flushAvgMs = metrics.batch_flush_count > 0 ? (metrics.batch_flush_ms_sum / metrics.batch_flush_count).toFixed(1) : "0.0"; const dbAvgMs = metrics.db_insert_count > 0 ? (metrics.db_insert_ms_sum / metrics.db_insert_count).toFixed(1) : "0.0"; - const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}, FlushAvgMs: ${flushAvgMs}, DbAvgMs: ${dbAvgMs}, PulledByPartition: ${JSON.stringify(metrics.keyed?.kafka_pulled_by_partition || {})}, InsertedByPartition: ${JSON.stringify(metrics.keyed?.db_inserted_by_partition || {})}, FailedByPartition: ${JSON.stringify(metrics.keyed?.db_failed_by_partition || {})}, InsertedByDay: ${JSON.stringify(metrics.keyed?.db_inserted_by_day || {})}, DbMsByDay: ${JSON.stringify(metrics.keyed?.db_insert_ms_sum_by_day || {})}`; + const report = `[Metrics] Pulled:${metrics.kafka_pulled} ParseErr:${metrics.parse_error} Inserted:${metrics.db_inserted} Failed:${metrics.db_failed} FlushAvg:${flushAvgMs}ms DbAvg:${dbAvgMs}ms`; console.log(report); - logger.info(report, metrics); + logger.info(report); try { await redisIntegration.info("Minute Metrics", metrics); } catch (err) { diff --git a/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/proposal.md b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/proposal.md new file mode 100644 index 0000000..67e0e57 --- /dev/null +++ b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/proposal.md @@ -0,0 +1,11 @@ +# Proposal: Refactor Partition Indexes + +## Goal +利用 PostgreSQL 默认的支持,改变每日分区创立时的索引策略,不再在代码中对每个分区单独创建索引。 + +## Context +当前 `PartitionManager` 在动态创建子分区后,会隐式调用查询在子分区上创建六个单列索引。由于我们使用的是 PostgreSQL 11+,且我们在初始化脚本中的主分区表 `onoffline.onoffline_record` 上已经创建了所有的索引,此主表上的索引会自动应用于所有的子分区,不需要我们在创建分区时另外手动添加。 + +## Proposed Changes +1. 在 `src/db/partitionManager.js` 中移除子分区显式创建索引的方法 `ensurePartitionIndexes` 以及针对已有子分区的循环索引检查函数 `ensureIndexesForExistingPartitions`。 +2. 在更新分区流程 `ensurePartitions` 以及 `ensurePartitionsForTimestamps` 中,移除对 `ensurePartitionIndexes` 的调用。 \ No newline at end of file diff --git a/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/specs/onoffline/spec.md b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/specs/onoffline/spec.md new file mode 100644 index 0000000..e42cc97 --- /dev/null +++ b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/specs/onoffline/spec.md @@ -0,0 +1,11 @@ +# Spec Delta: onoffline-backend + +## MODIFIED Requirements + +### Requirement: 数据库分区策略 +系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。 + +#### Scenario: 分区预创建 +- **GIVEN** 系统启动或每日凌晨 +- **WHEN** 运行分区维护任务 +- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引 diff --git a/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/tasks.md b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/tasks.md new file mode 100644 index 0000000..b205407 --- /dev/null +++ b/bls-onoffline-backend/openspec/changes/2026-03-03-refactor-partition-indexes/tasks.md @@ -0,0 +1,6 @@ +# Tasks: Refactor Partition Indexes + +- [x] refactor `src/db/partitionManager.js`: remove `ensurePartitionIndexes` and `ensureIndexesForExistingPartitions`. +- [x] refactor `src/db/partitionManager.js`: update `ensurePartitions` and `ensurePartitionsForTimestamps` to remove calls to `ensurePartitionIndexes`. +- [x] refactor `src/db/initializer.js` (and any other occurrences) to reflect the removal. +- [x] update openspec requirements to clarify that index propagation relies on PostgreSQL parent-table indexes. \ No newline at end of file diff --git a/bls-onoffline-backend/src/db/initializer.js b/bls-onoffline-backend/src/db/initializer.js index 9f2b44c..1f83cc2 100644 --- a/bls-onoffline-backend/src/db/initializer.js +++ b/bls-onoffline-backend/src/db/initializer.js @@ -23,7 +23,6 @@ class DatabaseInitializer { // 3. Ensure Partitions for the next month await partitionManager.ensurePartitions(30); - await partitionManager.ensureIndexesForExistingPartitions(); console.log('Database initialization completed successfully.'); logger.info('Database initialization completed successfully.'); diff --git a/bls-onoffline-backend/src/db/partitionManager.js b/bls-onoffline-backend/src/db/partitionManager.js index c55d819..a28d029 100644 --- a/bls-onoffline-backend/src/db/partitionManager.js +++ b/bls-onoffline-backend/src/db/partitionManager.js @@ -26,75 +26,6 @@ class PartitionManager { return { startMs, endMs, partitionSuffix }; } - async ensurePartitionIndexes(client, schema, table, partitionSuffix) { - const startedAt = Date.now(); - const partitionName = `${schema}.${table}_${partitionSuffix}`; - const indexBase = `${table}_${partitionSuffix}`; - - const indexSpecs = [ - { name: `idx_${indexBase}_ts`, column: 'ts_ms' }, - { name: `idx_${indexBase}_hid`, column: 'hotel_id' }, - { name: `idx_${indexBase}_mac`, column: 'mac' }, - { name: `idx_${indexBase}_did`, column: 'device_id' }, - { name: `idx_${indexBase}_rid`, column: 'room_id' }, - { name: `idx_${indexBase}_cs`, column: 'current_status' } - ]; - - for (const spec of indexSpecs) { - await client.query(`CREATE INDEX IF NOT EXISTS ${spec.name} ON ${partitionName} (${spec.column});`); - } - - await client.query(`ANALYZE ${partitionName};`); - - const elapsedMs = Date.now() - startedAt; - if (elapsedMs > 1000) { - logger.warn(`Partition index ensure slow`, { partitionName, elapsedMs }); - } - } - - async ensureIndexesForExistingPartitions() { - const startedAt = Date.now(); - const client = await dbManager.pool.connect(); - try { - const schema = config.db.schema; - const table = config.db.table; - - const res = await client.query( - ` - SELECT c.relname AS relname - FROM pg_inherits i - JOIN pg_class p ON i.inhparent = p.oid - JOIN pg_namespace pn ON pn.oid = p.relnamespace - JOIN pg_class c ON i.inhrelid = c.oid - WHERE pn.nspname = $1 AND p.relname = $2 - ORDER BY c.relname; - `, - [schema, table] - ); - - const suffixes = new Set(); - const pattern = new RegExp(`^${table}_(\\d{8})$`); - for (const row of res.rows) { - const relname = row?.relname; - if (typeof relname !== 'string') continue; - const match = relname.match(pattern); - if (!match) continue; - suffixes.add(match[1]); - } - - for (const suffix of suffixes) { - await this.ensurePartitionIndexes(client, schema, table, suffix); - } - - const elapsedMs = Date.now() - startedAt; - if (elapsedMs > 5000) { - logger.warn('Ensure existing partition indexes slow', { schema, table, partitions: suffixes.size, elapsedMs }); - } - } finally { - client.release(); - } - } - /** * Ensure partitions exist for the past M days and next N days. * @param {number} daysAhead - Number of days to pre-create. @@ -132,8 +63,6 @@ class PartitionManager { `; await client.query(createSql); } - - await this.ensurePartitionIndexes(client, schema, table, partitionSuffix); } logger.info('Partition check completed.'); } catch (err) { @@ -185,8 +114,6 @@ class PartitionManager { FOR VALUES FROM (${startMs}) TO (${endMs}); `); } - - await this.ensurePartitionIndexes(client, schema, table, partitionSuffix); } } finally { client.release(); diff --git a/bls-onoffline-backend/src/index.js b/bls-onoffline-backend/src/index.js index 0964284..0451cba 100644 --- a/bls-onoffline-backend/src/index.js +++ b/bls-onoffline-backend/src/index.js @@ -78,9 +78,9 @@ const bootstrap = async () => { const metrics = metricCollector.getAndReset(); const flushAvgMs = metrics.batch_flush_count > 0 ? (metrics.batch_flush_ms_sum / metrics.batch_flush_count).toFixed(1) : '0.0'; const dbAvgMs = metrics.db_insert_count > 0 ? (metrics.db_insert_ms_sum / metrics.db_insert_count).toFixed(1) : '0.0'; - const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}, FlushAvgMs: ${flushAvgMs}, DbAvgMs: ${dbAvgMs}, PulledByPartition: ${JSON.stringify(metrics.keyed?.kafka_pulled_by_partition || {})}, InsertedByPartition: ${JSON.stringify(metrics.keyed?.db_inserted_by_partition || {})}, FailedByPartition: ${JSON.stringify(metrics.keyed?.db_failed_by_partition || {})}, InsertedByDay: ${JSON.stringify(metrics.keyed?.db_inserted_by_day || {})}, DbMsByDay: ${JSON.stringify(metrics.keyed?.db_insert_ms_sum_by_day || {})}`; + const report = `[Metrics] Pulled:${metrics.kafka_pulled} ParseErr:${metrics.parse_error} Inserted:${metrics.db_inserted} Failed:${metrics.db_failed} FlushAvg:${flushAvgMs}ms DbAvg:${dbAvgMs}ms`; console.log(report); - logger.info(report, metrics); + logger.info(report); try { await redisIntegration.info('Minute Metrics', metrics);