feat: 添加MySQL2依赖并实现云端数据库集成

添加mysql2依赖以支持MySQL数据库连接
实现云端数据库配置和连接池管理
新增数据验证脚本用于检查酒店和房间数据
优化数据服务以支持云端数据缓存和同步
更新环境变量和配置以支持新功能
This commit is contained in:
2026-03-13 09:27:55 +08:00
parent a34e180db6
commit a13414ffad
11 changed files with 699 additions and 21 deletions

View File

@@ -13,16 +13,16 @@ POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
POSTGRES_MAX_CONNECTIONS=2 POSTGRES_MAX_CONNECTIONS=2
POSTGRES_IDLE_TIMEOUT_MS=30000 POSTGRES_IDLE_TIMEOUT_MS=30000
# 启用的酒店ID列表 # 启用的酒店ID列表
ENABLED_HOTEL_IDS=1001,1068,1085,1865,2000-2500 ENABLED_HOTEL_IDS=1000-2500 #1001,1003,1068,1085,1865,2000-2500
# 接口启用配置 (true/false) # 接口启用配置 (true/false)
ENABLE_API_HOTEL_LIST=true # 酒店列表 ENABLE_API_HOTEL_LIST=true # 酒店列表
ENABLE_API_HOST_LIST=true # 房间列表 ENABLE_API_HOST_LIST=true # 房间列表
ENABLE_API_ROOM_TYPE_INFO=true # 房型列表 ENABLE_API_ROOM_TYPE_INFO=true # 房型列表
ENABLE_API_ROOM_TYPE_MODAL_INFO=false # 回路列表 ENABLE_API_ROOM_TYPE_MODAL_INFO=true # 回路列表
CLOUD_DB_HOST=blv-cloud-db.mysql.rds.aliyuncs.com CLOUD_DB_HOST=blv-cloud-db.mysql.rds.aliyuncs.com
CLOUD_DB_PORT=3307 CLOUD_DB_PORT=3307
CLOUD_DB_DATABASE=tbl_room_type_circuit_powers CLOUD_DB_DATABASE=blv_rcu_db
CLOUD_DB_USER=blv_rcu CLOUD_DB_USER=blv_rcu
CLOUD_DB_PASSWORD=fnadiaJDIJ7546 CLOUD_DB_PASSWORD=fnadiaJDIJ7546

View File

@@ -0,0 +1,123 @@
/*
Navicat Premium Dump SQL
Source Server : FnOS 109
Source Server Type : PostgreSQL
Source Server Version : 150014 (150014)
Source Host : 10.8.8.109:5433
Source Catalog : log_platform
Source Schema : temporary_project
Target Server Type : PostgreSQL
Target Server Version : 150014 (150014)
File Encoding : 65001
Date: 12/03/2026 18:30:38
*/
-- ----------------------------
-- Table structure for loops_default
-- ----------------------------
DROP TABLE IF EXISTS "temporary_project"."loops_default";
CREATE TABLE "temporary_project"."loops_default" (
"guid" varchar(32) COLLATE "pg_catalog"."default" NOT NULL,
"id" int4 NOT NULL,
"loop_name" varchar(255) COLLATE "pg_catalog"."default",
"room_type_id" int4 NOT NULL,
"loop_address" varchar(255) COLLATE "pg_catalog"."default",
"loop_type" varchar(50) COLLATE "pg_catalog"."default",
"type" varchar(254) COLLATE "pg_catalog"."default",
"name" varchar(254) COLLATE "pg_catalog"."default",
"power" float8,
"rate" float8,
"temperature" float8,
"air_type" varchar(254) COLLATE "pg_catalog"."default",
"air_brand" varchar(254) COLLATE "pg_catalog"."default",
"air_model" varchar(254) COLLATE "pg_catalog"."default",
"height" float8,
"area" float8,
"heat_loss" float8,
"remark" varchar(254) COLLATE "pg_catalog"."default"
)
TABLESPACE "ts_hot"
;
-- ----------------------------
-- Table structure for loops
-- ----------------------------
DROP TABLE IF EXISTS "temporary_project"."loops";
CREATE TABLE "temporary_project"."loops" (
"guid" varchar(32) COLLATE "pg_catalog"."default" NOT NULL,
"id" int4 NOT NULL,
"loop_name" varchar(255) COLLATE "pg_catalog"."default",
"room_type_id" int4 NOT NULL,
"loop_address" varchar(255) COLLATE "pg_catalog"."default",
"loop_type" varchar(50) COLLATE "pg_catalog"."default",
"type" varchar(254) COLLATE "pg_catalog"."default",
"name" varchar(254) COLLATE "pg_catalog"."default",
"power" float8,
"rate" float8,
"temperature" float8,
"air_type" varchar(254) COLLATE "pg_catalog"."default",
"air_brand" varchar(254) COLLATE "pg_catalog"."default",
"air_model" varchar(254) COLLATE "pg_catalog"."default",
"height" float8,
"area" float8,
"heat_loss" float8,
"remark" varchar(254) COLLATE "pg_catalog"."default"
)
PARTITION BY LIST (
"room_type_id" "pg_catalog"."int4_ops"
)
TABLESPACE "ts_hot"
;
ALTER TABLE "temporary_project"."loops" ATTACH PARTITION "temporary_project"."loops_default" DEFAULT;
-- ----------------------------
-- Indexes structure for table loops_default
-- ----------------------------
CREATE INDEX "loops_default_id_idx" ON "temporary_project"."loops_default" USING btree (
"id" "pg_catalog"."int4_ops" ASC NULLS LAST
) TABLESPACE "ts_hot";
CREATE INDEX "loops_default_loop_address_idx" ON "temporary_project"."loops_default" USING btree (
"loop_address" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
) TABLESPACE "ts_hot";
CREATE INDEX "loops_default_loop_name_idx" ON "temporary_project"."loops_default" USING btree (
"loop_name" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
) TABLESPACE "ts_hot";
CREATE INDEX "loops_default_loop_type_idx" ON "temporary_project"."loops_default" USING btree (
"loop_type" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
) TABLESPACE "ts_hot";
CREATE INDEX "loops_default_room_type_id_idx" ON "temporary_project"."loops_default" USING btree (
"room_type_id" "pg_catalog"."int4_ops" ASC NULLS LAST
) TABLESPACE "ts_hot";
-- ----------------------------
-- Primary Key structure for table loops_default
-- ----------------------------
ALTER TABLE "temporary_project"."loops_default" ADD CONSTRAINT "loops_default_pkey" PRIMARY KEY ("guid", "id", "room_type_id") USING INDEX TABLESPACE "ts_hot";
-- ----------------------------
-- Indexes structure for table loops
-- ----------------------------
CREATE INDEX "idx_loops_address" ON "temporary_project"."loops" USING btree (
"loop_address" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
);
CREATE INDEX "idx_loops_id" ON "temporary_project"."loops" USING btree (
"id" "pg_catalog"."int4_ops" ASC NULLS LAST
);
CREATE INDEX "idx_loops_name" ON "temporary_project"."loops" USING btree (
"loop_name" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
);
CREATE INDEX "idx_loops_room_type_id" ON "temporary_project"."loops" USING btree (
"room_type_id" "pg_catalog"."int4_ops" ASC NULLS LAST
);
CREATE INDEX "idx_loops_type" ON "temporary_project"."loops" USING btree (
"loop_type" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
);
-- ----------------------------
-- Primary Key structure for table loops
-- ----------------------------
ALTER TABLE "temporary_project"."loops" ADD CONSTRAINT "loops_pkey" PRIMARY KEY ("guid", "id", "room_type_id");

View File

@@ -11,6 +11,7 @@
"dependencies": { "dependencies": {
"axios": "^1.6.0", "axios": "^1.6.0",
"dotenv": "^16.0.0", "dotenv": "^16.0.0",
"mysql2": "^3.0.0",
"pg": "^8.0.0", "pg": "^8.0.0",
"pino": "^8.0.0", "pino": "^8.0.0",
"uuid": "^9.0.0" "uuid": "^9.0.0"
@@ -1037,8 +1038,8 @@
"version": "25.1.0", "version": "25.1.0",
"resolved": "https://registry.npmmirror.com/@types/node/-/node-25.1.0.tgz", "resolved": "https://registry.npmmirror.com/@types/node/-/node-25.1.0.tgz",
"integrity": "sha512-t7frlewr6+cbx+9Ohpl0NOTKXZNV9xHRmNOvql47BFJKcEG1CxtxlPEEe+gR9uhVWM4DwhnvTF110mIL4yP9RA==", "integrity": "sha512-t7frlewr6+cbx+9Ohpl0NOTKXZNV9xHRmNOvql47BFJKcEG1CxtxlPEEe+gR9uhVWM4DwhnvTF110mIL4yP9RA==",
"dev": true,
"license": "MIT", "license": "MIT",
"peer": true,
"dependencies": { "dependencies": {
"undici-types": "~7.16.0" "undici-types": "~7.16.0"
} }
@@ -1160,6 +1161,15 @@
"node": ">=8.0.0" "node": ">=8.0.0"
} }
}, },
"node_modules/aws-ssl-profiles": {
"version": "1.1.2",
"resolved": "https://registry.npmmirror.com/aws-ssl-profiles/-/aws-ssl-profiles-1.1.2.tgz",
"integrity": "sha512-NZKeq9AfyQvEeNlN0zSYAaWrmBffJh3IELMZfRpJVWgrpEbtEpnjvzqBPf+mxoI287JohRDoa+/nsfqqiZmF6g==",
"license": "MIT",
"engines": {
"node": ">= 6.0.0"
}
},
"node_modules/axios": { "node_modules/axios": {
"version": "1.13.4", "version": "1.13.4",
"resolved": "https://registry.npmmirror.com/axios/-/axios-1.13.4.tgz", "resolved": "https://registry.npmmirror.com/axios/-/axios-1.13.4.tgz",
@@ -1737,6 +1747,15 @@
"node": ">=0.4.0" "node": ">=0.4.0"
} }
}, },
"node_modules/denque": {
"version": "2.1.0",
"resolved": "https://registry.npmmirror.com/denque/-/denque-2.1.0.tgz",
"integrity": "sha512-HVQE3AAb/pxF8fQAoiqpvg9i3evqug3hoiwakOyZAwJm+6vZehbkYXZ0l4JxS+I3QxM97v5aaRNhj8v5oBhekw==",
"license": "Apache-2.0",
"engines": {
"node": ">=0.10"
}
},
"node_modules/detect-newline": { "node_modules/detect-newline": {
"version": "3.1.0", "version": "3.1.0",
"resolved": "https://registry.npmmirror.com/detect-newline/-/detect-newline-3.1.0.tgz", "resolved": "https://registry.npmmirror.com/detect-newline/-/detect-newline-3.1.0.tgz",
@@ -2111,6 +2130,15 @@
"url": "https://github.com/sponsors/ljharb" "url": "https://github.com/sponsors/ljharb"
} }
}, },
"node_modules/generate-function": {
"version": "2.3.1",
"resolved": "https://registry.npmmirror.com/generate-function/-/generate-function-2.3.1.tgz",
"integrity": "sha512-eeB5GfMNeevm/GRYq20ShmsaGcmI81kIX2K9XQx5miC8KdHaC6Jm0qQ8ZNeGOi7wYB8OsdxKs+Y2oVuTFuVwKQ==",
"license": "MIT",
"dependencies": {
"is-property": "^1.0.2"
}
},
"node_modules/gensync": { "node_modules/gensync": {
"version": "1.0.0-beta.2", "version": "1.0.0-beta.2",
"resolved": "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz", "resolved": "https://registry.npmmirror.com/gensync/-/gensync-1.0.0-beta.2.tgz",
@@ -2318,6 +2346,22 @@
"node": ">=10.17.0" "node": ">=10.17.0"
} }
}, },
"node_modules/iconv-lite": {
"version": "0.7.2",
"resolved": "https://registry.npmmirror.com/iconv-lite/-/iconv-lite-0.7.2.tgz",
"integrity": "sha512-im9DjEDQ55s9fL4EYzOAv0yMqmMBSZp6G0VvFyTMPKWxiSBHUj9NW/qqLmXUwXrrM7AvqSlTCfvqRb0cM8yYqw==",
"license": "MIT",
"dependencies": {
"safer-buffer": ">= 2.1.2 < 3.0.0"
},
"engines": {
"node": ">=0.10.0"
},
"funding": {
"type": "opencollective",
"url": "https://opencollective.com/express"
}
},
"node_modules/ieee754": { "node_modules/ieee754": {
"version": "1.2.1", "version": "1.2.1",
"resolved": "https://registry.npmmirror.com/ieee754/-/ieee754-1.2.1.tgz", "resolved": "https://registry.npmmirror.com/ieee754/-/ieee754-1.2.1.tgz",
@@ -2483,6 +2527,12 @@
"node": ">=0.12.0" "node": ">=0.12.0"
} }
}, },
"node_modules/is-property": {
"version": "1.0.2",
"resolved": "https://registry.npmmirror.com/is-property/-/is-property-1.0.2.tgz",
"integrity": "sha512-Ks/IoX00TtClbGQr4TWXemAnktAQvYB7HzcCxDGqEZU6oCmb2INHuOoKxbtR+HFkmYWBKv/dOZtGRiAjDhj92g==",
"license": "MIT"
},
"node_modules/is-stream": { "node_modules/is-stream": {
"version": "2.0.1", "version": "2.0.1",
"resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz", "resolved": "https://registry.npmmirror.com/is-stream/-/is-stream-2.0.1.tgz",
@@ -3274,6 +3324,27 @@
"node": ">=8" "node": ">=8"
} }
}, },
"node_modules/long": {
"version": "5.3.2",
"resolved": "https://registry.npmmirror.com/long/-/long-5.3.2.tgz",
"integrity": "sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==",
"license": "Apache-2.0"
},
"node_modules/lru.min": {
"version": "1.1.4",
"resolved": "https://registry.npmmirror.com/lru.min/-/lru.min-1.1.4.tgz",
"integrity": "sha512-DqC6n3QQ77zdFpCMASA1a3Jlb64Hv2N2DciFGkO/4L9+q/IpIAuRlKOvCXabtRW6cQf8usbmM6BE/TOPysCdIA==",
"license": "MIT",
"engines": {
"bun": ">=1.0.0",
"deno": ">=1.30.0",
"node": ">=8.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/sponsors/wellwelwel"
}
},
"node_modules/make-dir": { "node_modules/make-dir": {
"version": "4.0.0", "version": "4.0.0",
"resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-4.0.0.tgz", "resolved": "https://registry.npmmirror.com/make-dir/-/make-dir-4.0.0.tgz",
@@ -3391,6 +3462,40 @@
"dev": true, "dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/mysql2": {
"version": "3.19.1",
"resolved": "https://registry.npmmirror.com/mysql2/-/mysql2-3.19.1.tgz",
"integrity": "sha512-yn4zh+Uxu5J3Zvi6Ao96lJ7BSBRkspHflWQAmOPND+htbpIKDQw99TTvPzgihKO/QyMickZopO4OsnixnpcUwA==",
"license": "MIT",
"dependencies": {
"aws-ssl-profiles": "^1.1.2",
"denque": "^2.1.0",
"generate-function": "^2.3.1",
"iconv-lite": "^0.7.2",
"long": "^5.3.2",
"lru.min": "^1.1.4",
"named-placeholders": "^1.1.6",
"sql-escaper": "^1.3.3"
},
"engines": {
"node": ">= 8.0"
},
"peerDependencies": {
"@types/node": ">= 8"
}
},
"node_modules/named-placeholders": {
"version": "1.1.6",
"resolved": "https://registry.npmmirror.com/named-placeholders/-/named-placeholders-1.1.6.tgz",
"integrity": "sha512-Tz09sEL2EEuv5fFowm419c1+a/jSMiBjI9gHxVLrVdbUkkNUUfjsVYs9pVZu5oCon/kmRh9TfLEObFtkVxmY0w==",
"license": "MIT",
"dependencies": {
"lru.min": "^1.1.0"
},
"engines": {
"node": ">=8.0.0"
}
},
"node_modules/natural-compare": { "node_modules/natural-compare": {
"version": "1.4.0", "version": "1.4.0",
"resolved": "https://registry.npmmirror.com/natural-compare/-/natural-compare-1.4.0.tgz", "resolved": "https://registry.npmmirror.com/natural-compare/-/natural-compare-1.4.0.tgz",
@@ -4190,6 +4295,12 @@
"node": ">=10" "node": ">=10"
} }
}, },
"node_modules/safer-buffer": {
"version": "2.1.2",
"resolved": "https://registry.npmmirror.com/safer-buffer/-/safer-buffer-2.1.2.tgz",
"integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==",
"license": "MIT"
},
"node_modules/secure-json-parse": { "node_modules/secure-json-parse": {
"version": "2.7.0", "version": "2.7.0",
"resolved": "https://registry.npmmirror.com/secure-json-parse/-/secure-json-parse-2.7.0.tgz", "resolved": "https://registry.npmmirror.com/secure-json-parse/-/secure-json-parse-2.7.0.tgz",
@@ -4316,6 +4427,21 @@
"dev": true, "dev": true,
"license": "BSD-3-Clause" "license": "BSD-3-Clause"
}, },
"node_modules/sql-escaper": {
"version": "1.3.3",
"resolved": "https://registry.npmmirror.com/sql-escaper/-/sql-escaper-1.3.3.tgz",
"integrity": "sha512-BsTCV265VpTp8tm1wyIm1xqQCS+Q9NHx2Sr+WcnUrgLrQ6yiDIvHYJV5gHxsj1lMBy2zm5twLaZao8Jd+S8JJw==",
"license": "MIT",
"engines": {
"bun": ">=1.0.0",
"deno": ">=2.0.0",
"node": ">=12.0.0"
},
"funding": {
"type": "github",
"url": "https://github.com/mysqljs/sql-escaper?sponsor=1"
}
},
"node_modules/stack-utils": { "node_modules/stack-utils": {
"version": "2.0.6", "version": "2.0.6",
"resolved": "https://registry.npmmirror.com/stack-utils/-/stack-utils-2.0.6.tgz", "resolved": "https://registry.npmmirror.com/stack-utils/-/stack-utils-2.0.6.tgz",
@@ -4514,7 +4640,6 @@
"version": "7.16.0", "version": "7.16.0",
"resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-7.16.0.tgz", "resolved": "https://registry.npmmirror.com/undici-types/-/undici-types-7.16.0.tgz",
"integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==", "integrity": "sha512-Zz+aZWSj8LE6zoxD+xrjh4VfkIG8Ya6LvYkZqtUQGJPZjYl53ypCaUwWqo7eI0x66KBGeRo+mlBEkMSeSZ38Nw==",
"dev": true,
"license": "MIT" "license": "MIT"
}, },
"node_modules/update-browserslist-db": { "node_modules/update-browserslist-db": {

View File

@@ -15,6 +15,7 @@
"dependencies": { "dependencies": {
"axios": "^1.6.0", "axios": "^1.6.0",
"dotenv": "^16.0.0", "dotenv": "^16.0.0",
"mysql2": "^3.0.0",
"pg": "^8.0.0", "pg": "^8.0.0",
"pino": "^8.0.0", "pino": "^8.0.0",
"uuid": "^9.0.0" "uuid": "^9.0.0"

View File

@@ -11,6 +11,13 @@ module.exports = {
max: parseInt(process.env.POSTGRES_MAX_CONNECTIONS, 10) || 6, max: parseInt(process.env.POSTGRES_MAX_CONNECTIONS, 10) || 6,
idleTimeoutMillis: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS, 10) || 30000, idleTimeoutMillis: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS, 10) || 30000,
}, },
cloudDbConfig: {
host: process.env.CLOUD_DB_HOST || 'blv-cloud-db.mysql.rds.aliyuncs.com',
port: parseInt(process.env.CLOUD_DB_PORT, 10) || 3307,
database: process.env.CLOUD_DB_DATABASE || 'tbl_room_type_circuit_powers',
user: process.env.CLOUD_DB_USER || 'blv_rcu',
password: process.env.CLOUD_DB_PASSWORD || 'fnadiaJDIJ7546',
},
enabledHotelIds: parseHotelIds(process.env.ENABLED_HOTEL_IDS), enabledHotelIds: parseHotelIds(process.env.ENABLED_HOTEL_IDS),
apiBaseUrl: process.env.API_BASE_URL || 'http://www.boonlive-rcu.com:7000/api/values', apiBaseUrl: process.env.API_BASE_URL || 'http://www.boonlive-rcu.com:7000/api/values',
apiToggles: { apiToggles: {

View File

@@ -0,0 +1,105 @@
const mysql = require('mysql2/promise');
const { cloudDbConfig } = require('../config');
const logger = require('../utils/logger');
let cloudPool = null;
const initCloudDb = async () => {
try {
cloudPool = mysql.createPool({
host: cloudDbConfig.host,
port: cloudDbConfig.port,
database: cloudDbConfig.database,
user: cloudDbConfig.user,
password: cloudDbConfig.password,
waitForConnections: true,
connectionLimit: 5,
queueLimit: 0,
connectTimeout: 10000,
enableKeepAlive: true,
keepAliveInitialDelay: 10000
});
cloudPool.on('connection', (connection) => {
logger.info('New cloud MySQL connection created');
});
cloudPool.on('acquire', (connection) => {
logger.debug('Cloud MySQL connection acquired from pool');
});
cloudPool.on('release', (connection) => {
logger.debug('Cloud MySQL connection released back to pool');
});
cloudPool.on('enqueue', () => {
logger.warn('Cloud MySQL connection queue waiting for available connection');
});
const connection = await cloudPool.getConnection();
connection.release();
logger.info('Cloud MySQL database connection initialized successfully');
} catch (error) {
logger.error({ error }, 'Failed to initialize cloud MySQL database connection');
throw error;
}
};
const getCloudClient = async () => {
if (!cloudPool) {
await initCloudDb();
}
return cloudPool.getConnection();
};
const queryCloudWithRetry = async (sql, params = [], retries = 3, delay = 1000) => {
let connection;
for (let i = 0; i < retries; i++) {
try {
connection = await getCloudClient();
const [rows] = await connection.execute(sql, params);
return rows;
} catch (error) {
logger.error({ error, sql, params, attempt: i + 1, retries }, `Error executing cloud database query (attempt ${i + 1}/${retries})`);
if (connection) {
try {
connection.destroy();
} catch (e) {
// Ignore destroy errors
}
connection = null;
}
if (i < retries - 1) {
logger.info(`Retrying in ${delay}ms...`);
await new Promise(resolve => setTimeout(resolve, delay));
delay *= 2;
} else {
throw error;
}
} finally {
if (connection) {
connection.release();
}
}
}
};
const queryCloud = async (sql, params = []) => {
return queryCloudWithRetry(sql, params, 3, 1000);
};
const closeCloudDb = async () => {
if (cloudPool) {
await cloudPool.end();
logger.info('Cloud MySQL database connection closed');
}
};
module.exports = {
initCloudDb,
getCloudClient,
queryCloud,
closeCloudDb
};

View File

@@ -0,0 +1,64 @@
const { query, close } = require('../db/utils');
const logger = require('../utils/logger');
const checkHotels = async () => {
try {
logger.info('Checking hotels 1003 and 2170...');
// 1. 检查酒店是否存在
const hotel1003Res = await query('SELECT * FROM temporary_project.hotels WHERE hotel_id = $1', [1003]);
logger.info(`Hotel 1003 exists: ${hotel1003Res.rows.length > 0}`);
if (hotel1003Res.rows.length > 0) {
logger.info(`Hotel 1003 data: ${JSON.stringify(hotel1003Res.rows[0])}`);
}
const hotel2170Res = await query('SELECT * FROM temporary_project.hotels WHERE hotel_id = $1', [2170]);
logger.info(`Hotel 2170 exists: ${hotel2170Res.rows.length > 0}`);
if (hotel2170Res.rows.length > 0) {
logger.info(`Hotel 2170 data: ${JSON.stringify(hotel2170Res.rows[0])}`);
}
// 2. 检查房型数据
if (hotel1003Res.rows.length > 0) {
const internalId = hotel1003Res.rows[0].id;
const roomTypesRes = await query('SELECT * FROM temporary_project.room_type WHERE hotel_id = $1 LIMIT 5', [internalId]);
logger.info(`Room types for hotel 1003: ${roomTypesRes.rows.length}`);
roomTypesRes.rows.forEach((row, index) => {
logger.info(`Room type ${index + 1}: ${JSON.stringify(row)}`);
});
}
if (hotel2170Res.rows.length > 0) {
const internalId = hotel2170Res.rows[0].id;
const roomTypesRes = await query('SELECT * FROM temporary_project.room_type WHERE hotel_id = $1 LIMIT 5', [internalId]);
logger.info(`Room types for hotel 2170: ${roomTypesRes.rows.length}`);
roomTypesRes.rows.forEach((row, index) => {
logger.info(`Room type ${index + 1}: ${JSON.stringify(row)}`);
});
}
// 3. 检查云端数据库中的数据
const { queryCloud, closeCloudDb } = require('../db/cloudDb');
const cloud1003Res = await queryCloud('SELECT * FROM tbl_room_type_circuit_powers WHERE hotel_rcu_code = ? LIMIT 5', [1003]);
logger.info(`Cloud data for hotel 1003: ${cloud1003Res.length}`);
cloud1003Res.forEach((row, index) => {
logger.info(`Cloud row ${index + 1}: ${JSON.stringify(row)}`);
});
const cloud2170Res = await queryCloud('SELECT * FROM tbl_room_type_circuit_powers WHERE hotel_rcu_code = ? LIMIT 5', [2170]);
logger.info(`Cloud data for hotel 2170: ${cloud2170Res.length}`);
cloud2170Res.forEach((row, index) => {
logger.info(`Cloud row ${index + 1}: ${JSON.stringify(row)}`);
});
await closeCloudDb();
} catch (error) {
logger.error({ error }, 'Error during hotel check');
} finally {
await close();
}
};
checkHotels();

View File

@@ -0,0 +1,51 @@
const { query, close } = require('../db/utils');
const logger = require('../utils/logger');
const checkRooms = async () => {
try {
logger.info('Checking rooms table structure...');
// 1. 查看rooms表的前几条数据
const roomsRes = await query('SELECT * FROM temporary_project.rooms LIMIT 5');
logger.info(`Rooms data:`);
roomsRes.rows.forEach((row, index) => {
logger.info(`Room ${index + 1}: ${JSON.stringify(row)}`);
});
// 2. 查看云端数据库中的数据特别是room_type_code字段
const { queryCloud, closeCloudDb } = require('../db/cloudDb');
const cloudRes = await queryCloud('SELECT DISTINCT room_type_code FROM tbl_room_type_circuit_powers LIMIT 10');
logger.info(`Distinct room_type_code in cloud DB:`);
cloudRes.forEach((row, index) => {
logger.info(`Room type code ${index + 1}: ${row.room_type_code}`);
});
// 3. 检查云端数据库中hotel 2170的room_type_code
const cloud2170Res = await queryCloud('SELECT DISTINCT room_type_code FROM tbl_room_type_circuit_powers WHERE hotel_rcu_code = ?', [2170]);
logger.info(`Room type codes for hotel 2170 in cloud DB:`);
cloud2170Res.forEach((row, index) => {
logger.info(`Room type code ${index + 1}: ${row.room_type_code}`);
});
// 4. 检查本地数据库中hotel 2170的room_type id
const hotel2170Res = await query('SELECT * FROM temporary_project.hotels WHERE hotel_id = $1', [2170]);
if (hotel2170Res.rows.length > 0) {
const internalId = hotel2170Res.rows[0].id;
const roomTypesRes = await query('SELECT id, room_type_name FROM temporary_project.room_type WHERE hotel_id = $1', [internalId]);
logger.info(`Room type IDs for hotel 2170 in local DB:`);
roomTypesRes.rows.forEach((row, index) => {
logger.info(`Room type ${index + 1}: id=${row.id}, name=${row.room_type_name}`);
});
}
await closeCloudDb();
} catch (error) {
logger.error({ error }, 'Error during rooms check');
} finally {
await close();
}
};
checkRooms();

View File

@@ -7,6 +7,7 @@ const logger = require('../utils/logger');
const fs = require('fs'); const fs = require('fs');
const path = require('path'); const path = require('path');
const { query, close } = require('../db/utils'); // Changed from db, all, close const { query, close } = require('../db/utils'); // Changed from db, all, close
const { initCloudDb, closeCloudDb } = require('../db/cloudDb');
const mdPath = path.resolve(__dirname, '../../../project_management/project.md'); // Adjust relative path to e:\Project_Class\BLS\Web_BLS_SQL_Temp\project_management\project.md const mdPath = path.resolve(__dirname, '../../../project_management/project.md'); // Adjust relative path to e:\Project_Class\BLS\Web_BLS_SQL_Temp\project_management\project.md
const endpoints = parseApiEndpoints(mdPath); const endpoints = parseApiEndpoints(mdPath);
@@ -24,6 +25,7 @@ const main = async () => {
// Phase 1: Init // Phase 1: Init
await initDB(); await initDB();
await initCloudDb();
// Phase 2: Concurrent Data Fetch // Phase 2: Concurrent Data Fetch
logger.info(`Starting Phase 2: Global Data Fetching using endpoints from MD: ${JSON.stringify(endpoints)}`); logger.info(`Starting Phase 2: Global Data Fetching using endpoints from MD: ${JSON.stringify(endpoints)}`);
@@ -45,6 +47,12 @@ const main = async () => {
await saveRoomsTransaction(rooms); await saveRoomsTransaction(rooms);
await saveRoomTypesTransaction(roomTypes); await saveRoomTypesTransaction(roomTypes);
logger.info('Phase 2 Completed: Data saved.'); logger.info('Phase 2 Completed: Data saved.');
// Fetch all cloud data at once for caching
if (apiToggles.roomTypeModalInfo) {
const { fetchAllCloudData } = require('../services/dataService');
await fetchAllCloudData();
}
} catch (error) { } catch (error) {
logger.error({ error }, 'Phase 2 failed. Exiting.'); logger.error({ error }, 'Phase 2 failed. Exiting.');
throw error; throw error;
@@ -117,6 +125,7 @@ const main = async () => {
logger.info(summary); logger.info(summary);
await close(); await close();
await closeCloudDb();
process.exit(0); process.exit(0);
} catch (error) { } catch (error) {

View File

@@ -0,0 +1,59 @@
const { query, close } = require('../db/utils');
const logger = require('../utils/logger');
const verifyData = async () => {
try {
logger.info('Starting data verification...');
// 1. 检查loops表中的数据总数
const countRes = await query('SELECT COUNT(*) as total FROM temporary_project.loops');
logger.info(`Total loops in database: ${countRes.rows[0].total}`);
// 2. 检查loops表中是否有云端数据
const cloudDataRes = await query(`
SELECT COUNT(*) as total FROM temporary_project.loops
WHERE power IS NOT NULL OR rate IS NOT NULL OR temperature IS NOT NULL
`);
logger.info(`Loops with cloud data: ${cloudDataRes.rows[0].total}`);
// 3. 查看前10条loops数据
const sampleRes = await query(`
SELECT id, loop_name, room_type_id, loop_address, loop_type,
type, name, power, rate, temperature, air_type, air_brand, air_model, height, area, heat_loss, remark
FROM temporary_project.loops
LIMIT 10
`);
logger.info(`Sample loops data:`);
sampleRes.rows.forEach((row, index) => {
logger.info(`Row ${index + 1}: ${JSON.stringify(row)}`);
});
// 4. 检查酒店1003和2170的数据
const hotel1003Res = await query(`
SELECT COUNT(*) as total
FROM temporary_project.loops l
JOIN temporary_project.room_type rt ON l.room_type_id = rt.id
JOIN temporary_project.hotels h ON rt.hotel_id = h.hotel_id
WHERE h.hotel_id = 1003
`);
logger.info(`Loops for hotel 1003: ${hotel1003Res.rows[0].total}`);
const hotel2170Res = await query(`
SELECT COUNT(*) as total
FROM temporary_project.loops l
JOIN temporary_project.room_type rt ON l.room_type_id = rt.id
JOIN temporary_project.hotels h ON rt.hotel_id = h.hotel_id
WHERE h.hotel_id = 2170
`);
logger.info(`Loops for hotel 2170: ${hotel2170Res.rows[0].total}`);
logger.info('Data verification completed.');
} catch (error) {
logger.error({ error }, 'Error during data verification');
} finally {
await close();
}
};
verifyData();

View File

@@ -1,10 +1,53 @@
const { v4: uuidv4 } = require('uuid'); const { v4: uuidv4 } = require('uuid');
const { getClient } = require('../db/utils'); const { getClient } = require('../db/utils');
const { queryCloud } = require('../db/cloudDb');
const logger = require('../utils/logger'); const logger = require('../utils/logger');
// 存储云端数据的缓存
let cloudDataCache = null;
// Generate 32-char UUID (no dashes) // Generate 32-char UUID (no dashes)
const generateGuid = () => uuidv4().replace(/-/g, ''); const generateGuid = () => uuidv4().replace(/-/g, '');
// 一次性获取所有云端数据并缓存
const fetchAllCloudData = async () => {
try {
logger.info('Fetching all cloud data...');
const startTime = Date.now();
const cloudRows = await queryCloud('SELECT * FROM tbl_room_type_circuit_powers');
// 构建索引,方便快速查询
const cache = {};
cloudRows.forEach(row => {
const key = `${row.hotel_rcu_code}-${row.room_type_code}-${row.address}`;
cache[key] = row;
});
cloudDataCache = cache;
const duration = Date.now() - startTime;
logger.info(`Fetched ${cloudRows.length} cloud data rows in ${duration}ms`);
logger.info(`Cached ${Object.keys(cache).length} unique cloud data entries`);
return cache;
} catch (error) {
logger.error({ error }, 'Error fetching cloud data');
throw error;
}
};
// 获取云端数据(使用缓存)
const getCloudData = (hotelCode, roomTypeCode, address) => {
if (!cloudDataCache) {
logger.warn('Cloud data cache not initialized');
return null;
}
const key = `${hotelCode}-${roomTypeCode}-${address}`;
return cloudDataCache[key] || null;
};
const validateSchema = (data, requiredFields) => { const validateSchema = (data, requiredFields) => {
if (!data || !Array.isArray(data)) { if (!data || !Array.isArray(data)) {
throw new Error('Invalid data format: expected array'); throw new Error('Invalid data format: expected array');
@@ -109,20 +152,111 @@ const saveRoomTypesTransaction = async (data) => {
}; };
const saveLoopsTransaction = async (data) => { const saveLoopsTransaction = async (data) => {
logger.info(`saveLoopsTransaction called with ${data ? data.length : 0} items`);
if (!data || data.length === 0) {
logger.warn('saveLoopsTransaction: No data to save');
return;
}
validateSchema(data, ['id', 'roomTypeID', 'modalAddress', 'type', 'name']); validateSchema(data, ['id', 'roomTypeID', 'modalAddress', 'type', 'name']);
return saveEntitiesTransaction( logger.info('saveLoopsTransaction: Schema validation passed');
'loops',
data, const client = await getClient();
'id', let successCount = 0;
item => item.id, let errorCount = 0;
'INSERT INTO temporary_project.loops (guid, id, loop_name, room_type_id, loop_address, loop_type) VALUES ($1, $2, $3, $4, $5, $6)',
item => [generateGuid(), item.id, item.name, item.roomTypeID, item.modalAddress, item.type] try {
await client.query('BEGIN');
logger.info('saveLoopsTransaction: Transaction started');
for (let i = 0; i < data.length; i++) {
const item = data[i];
logger.info(`Processing loop item ${i + 1}/${data.length}: id=${item.id}, roomTypeID=${item.roomTypeID}, modalAddress=${item.modalAddress}`);
try {
// 1. Delete existing
const deleteResult = await client.query('DELETE FROM temporary_project.loops WHERE id = $1', [item.id]);
logger.info(`Deleted ${deleteResult.rowCount} existing loop(s) with id=${item.id}`);
// 2. Get room type info to find hotel_id
const roomTypeRes = await client.query(
'SELECT rt.hotel_id, h.hotel_id as hotel_code FROM temporary_project.room_type rt JOIN temporary_project.hotels h ON rt.hotel_id = h.id WHERE rt.id = $1',
[item.roomTypeID]
); );
logger.info(`Found ${roomTypeRes.rows.length} room type(s) for room_type_id=${item.roomTypeID}`);
let cloudData = null;
if (roomTypeRes.rows.length > 0) {
const hotelCode = roomTypeRes.rows[0].hotel_code;
logger.info(`Hotel code for room type: ${hotelCode}`);
// 3. Get cloud data from cache
const cloudRow = getCloudData(hotelCode, item.roomTypeID, item.modalAddress);
if (cloudRow) {
cloudData = cloudRow;
logger.info(`Found cloud data for loop ${item.id} from cache: ${JSON.stringify(cloudData)}`);
} else {
logger.warn(`No cloud data found for loop ${item.id} (hotel: ${hotelCode}, room type: ${item.roomTypeID}, address: ${item.modalAddress})`);
}
} else {
logger.warn(`No room type found for room_type_id=${item.roomTypeID}, skipping cloud data sync`);
}
// 4. Insert with cloud data if available
const insertResult = await client.query(
`INSERT INTO temporary_project.loops (
guid, id, loop_name, room_type_id, loop_address, loop_type,
type, name, power, rate, temperature, air_type, air_brand, air_model, height, area, heat_loss, remark
) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18)`,
[
generateGuid(),
item.id,
item.name,
item.roomTypeID,
item.modalAddress,
item.type,
cloudData?.type || null,
cloudData?.name || null,
cloudData?.power || null,
cloudData?.rate || null,
cloudData?.temperature || null,
cloudData?.air_type || null,
cloudData?.air_brand || null,
cloudData?.air_model || null,
cloudData?.height || null,
cloudData?.area || null,
cloudData?.heat_loss || null,
cloudData?.remark || null
]
);
logger.info(`Inserted loop ${item.id}, rowCount=${insertResult.rowCount}`);
successCount++;
} catch (itemError) {
logger.error({ itemError, item }, `Error processing loop item ${item.id}`);
errorCount++;
}
}
await client.query('COMMIT');
logger.info(`saveLoopsTransaction: Transaction committed. Success: ${successCount}, Errors: ${errorCount}`);
} catch (e) {
await client.query('ROLLBACK');
logger.error({ e }, 'saveLoopsTransaction: Transaction rolled back due to error');
throw e;
} finally {
client.release();
}
}; };
module.exports = { module.exports = {
saveHotelsTransaction, saveHotelsTransaction,
saveRoomsTransaction, saveRoomsTransaction,
saveRoomTypesTransaction, saveRoomTypesTransaction,
saveLoopsTransaction saveLoopsTransaction,
fetchAllCloudData
}; };