Files
Web_BLS_Upgrade_Server/bls-upgrade-backend/dist/index.js

561 lines
19 KiB
JavaScript
Raw Normal View History

import dotenv from "dotenv";
import { Pool } from "pg";
import kafka from "kafka-node";
dotenv.config();
const config = {
port: process.env.PORT || 3001,
logLevel: process.env.LOG_LEVEL || "info",
kafka: {
brokers: process.env.KAFKA_BROKERS || "kafka.blv-oa.com:9092",
clientId: process.env.KAFKA_CLIENT_ID || "bls-upgrade-producer",
groupId: process.env.KAFKA_GROUP_ID || "bls-upgrade-consumer",
testGroupId: process.env.KAFKA_TEST_GROUP_ID || "",
topics: process.env.KAFKA_TOPICS || "blwlog4Nodejs-rcu-upgrade-topic",
fromOffset: process.env.KAFKA_FROM_OFFSET || "latest",
autoCommit: process.env.KAFKA_AUTO_COMMIT === "true",
autoCommitIntervalMs: parseInt(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS || "5000"),
saslEnabled: process.env.KAFKA_SASL_ENABLED === "true",
saslMechanism: process.env.KAFKA_SASL_MECHANISM || "plain",
saslUsername: process.env.KAFKA_SASL_USERNAME || "blwmomo",
saslPassword: process.env.KAFKA_SASL_PASSWORD || "blwmomo",
sslEnabled: process.env.KAFKA_SSL_ENABLED === "true",
consumerInstances: parseInt(process.env.KAFKA_CONSUMER_INSTANCES || "1"),
maxInFlight: parseInt(process.env.KAFKA_MAX_IN_FLIGHT || "5000"),
batchSize: parseInt(process.env.KAFKA_BATCH_SIZE || "1000"),
batchTimeoutMs: parseInt(process.env.KAFKA_BATCH_TIMEOUT_MS || "20"),
commitIntervalMs: parseInt(process.env.KAFKA_COMMIT_INTERVAL_MS || "200"),
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === "true",
fetchMaxBytes: parseInt(process.env.KAFKA_FETCH_MAX_BYTES || "10485760"),
fetchMaxWaitMs: parseInt(process.env.KAFKA_FETCH_MAX_WAIT_MS || "100"),
fetchMinBytes: parseInt(process.env.KAFKA_FETCH_MIN_BYTES || "1")
},
database: {
g5: {
host: process.env.POSTGRES_HOST_G5 || "10.8.8.80",
port: parseInt(process.env.POSTGRES_PORT_G5 || "5434"),
database: process.env.POSTGRES_DATABASE_G5 || "log_platform",
user: process.env.POSTGRES_USER_G5 || "log_admin",
password: process.env.POSTGRES_PASSWORD_G5 || "H3IkLUt8K!x",
idleTimeoutMs: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS_G5 || "30000"),
maxConnections: parseInt(process.env.POSTGRES_MAX_CONNECTIONS_G5 || "2"),
schema: process.env.DB_SCHEMA_G5 || "rcu_upgrade",
table: process.env.DB_TABLE_G5 || "rcu_upgrade_events_g5"
}
},
redis: {
host: process.env.REDIS_HOST || "10.8.8.109",
port: parseInt(process.env.REDIS_PORT || "6379"),
password: process.env.REDIS_PASSWORD || "",
db: parseInt(process.env.REDIS_DB || "15"),
connectTimeoutMs: parseInt(process.env.REDIS_CONNECT_TIMEOUT_MS || "5000"),
projectName: process.env.REDIS_PROJECT_NAME || "bls-onoffline"
},
performance: {
dbWriteIntervalMs: 1e3,
// 限制数据库写入频率为每秒最多1次
batchSize: 1e3
// 批处理大小
}
};
class Logger {
constructor() {
this.logLevel = config.logLevel;
}
log(level, message, data = {}) {
const levels = ["debug", "info", "warn", "error"];
const levelIndex = levels.indexOf(level);
const configLevelIndex = levels.indexOf(this.logLevel);
if (levelIndex >= configLevelIndex) {
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
const logMessage = {
timestamp,
level,
message,
data
};
console.log(JSON.stringify(logMessage));
}
}
debug(message, data = {}) {
this.log("debug", message, data);
}
info(message, data = {}) {
this.log("info", message, data);
}
warn(message, data = {}) {
this.log("warn", message, data);
}
error(message, data = {}) {
this.log("error", message, data);
}
}
const logger = new Logger();
class DatabaseManager {
constructor() {
this.pools = {};
}
init() {
this.pools.g5 = new Pool({
host: config.database.g5.host,
port: config.database.g5.port,
database: config.database.g5.database,
user: config.database.g5.user,
password: config.database.g5.password,
max: config.database.g5.maxConnections,
idleTimeoutMillis: config.database.g5.idleTimeoutMs
});
this.pools.g5.connect((err, client, release) => {
if (err) {
logger.error("Error connecting to G5 database:", { error: err.message });
} else {
logger.info("Successfully connected to G5 database");
release();
}
});
this.pools.g5.on("error", (err) => {
logger.error("Unexpected error on G5 database connection pool:", { error: err.message });
});
}
getPool(dbName) {
return this.pools[dbName];
}
async query(dbName, text, params) {
const pool = this.getPool(dbName);
if (!pool) {
throw new Error(`Database pool ${dbName} not initialized`);
}
const client = await pool.connect();
try {
return await client.query(text, params);
} finally {
client.release();
}
}
async close() {
await Promise.all(
Object.values(this.pools).map((pool) => pool.end())
);
}
}
const databaseManager = new DatabaseManager();
class OffsetTracker {
constructor() {
this.partitions = /* @__PURE__ */ new Map();
}
add(topic, partition, offset) {
const key = `${topic}-${partition}`;
if (!this.partitions.has(key)) {
this.partitions.set(key, { nextCommitOffset: null, done: /* @__PURE__ */ new Set() });
}
const state = this.partitions.get(key);
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return;
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
} else if (numericOffset < state.nextCommitOffset) {
state.nextCommitOffset = numericOffset;
}
}
markDone(topic, partition, offset) {
const key = `${topic}-${partition}`;
const state = this.partitions.get(key);
if (!state) return null;
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return null;
state.done.add(numericOffset);
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
}
let advanced = false;
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
state.done.delete(state.nextCommitOffset);
state.nextCommitOffset += 1;
advanced = true;
}
if (!advanced) return null;
return state.nextCommitOffset;
}
clear() {
this.partitions.clear();
}
}
const { ConsumerGroup } = kafka;
class KafkaConsumer {
constructor() {
this.consumer = null;
this.tracker = new OffsetTracker();
this.pendingCommits = /* @__PURE__ */ new Map();
this.commitTimer = null;
this.inFlight = 0;
this.maxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5e3;
this.commitIntervalMs = Number.isFinite(config.kafka.commitIntervalMs) ? config.kafka.commitIntervalMs : 200;
}
init() {
const kafkaConfig = {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
fromOffset: config.kafka.fromOffset,
protocol: ["roundrobin"],
outOfRangeOffset: "latest",
autoCommit: config.kafka.autoCommit,
autoCommitIntervalMs: config.kafka.autoCommitIntervalMs,
fetchMaxBytes: config.kafka.fetchMaxBytes,
fetchMaxWaitMs: config.kafka.fetchMaxWaitMs,
fetchMinBytes: config.kafka.fetchMinBytes,
sasl: config.kafka.saslEnabled ? {
mechanism: config.kafka.saslMechanism,
username: config.kafka.saslUsername,
password: config.kafka.saslPassword
} : void 0,
ssl: config.kafka.sslEnabled,
connectTimeout: 1e4,
requestTimeout: 1e4
};
logger.info("Initializing Kafka consumer with config:", {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
topics: config.kafka.topics,
fromOffset: config.kafka.fromOffset,
saslEnabled: config.kafka.saslEnabled
});
const topics = config.kafka.topics.split(",").map((topic) => topic.trim()).filter(Boolean);
this.consumer = new ConsumerGroup(kafkaConfig, topics);
this.consumer.on("connect", () => {
logger.info("Kafka consumer connected", {
groupId: config.kafka.groupId,
topics
});
});
this.consumer.on("rebalancing", () => {
logger.info("Kafka consumer rebalancing");
this.tracker.clear();
this.pendingCommits.clear();
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
});
this.consumer.on("rebalanced", () => {
logger.info("Kafka consumer rebalanced");
});
this.consumer.on("message", (message) => {
logger.debug("Received Kafka message:", { messageId: message.offset });
this.inFlight += 1;
this.tracker.add(message.topic, message.partition, message.offset);
if (this.inFlight >= this.maxInFlight && this.consumer.pause) {
this.consumer.pause();
}
Promise.resolve(this.onMessage(message)).then(() => {
if (!config.kafka.autoCommit) {
const commitOffset = this.tracker.markDone(message.topic, message.partition, message.offset);
if (commitOffset !== null) {
const key = `${message.topic}-${message.partition}`;
this.pendingCommits.set(key, {
topic: message.topic,
partition: message.partition,
offset: commitOffset,
metadata: "m"
});
this.scheduleCommitFlush();
}
}
}).catch((err) => {
logger.error("Kafka message handling failed, skip commit", {
error: err.message,
topic: message.topic,
partition: message.partition,
offset: message.offset
});
}).finally(() => {
this.inFlight -= 1;
if (this.inFlight < this.maxInFlight && this.consumer.resume) {
this.consumer.resume();
}
});
});
this.consumer.on("error", (err) => {
logger.error("Kafka consumer error:", { error: err.message, stack: err.stack });
});
this.consumer.on("offsetOutOfRange", (topic) => {
logger.warn("Kafka offset out of range:", { topic: topic.topic, partition: topic.partition });
});
logger.info("Kafka consumer initialized");
this.consumer.on("close", () => {
logger.info("Kafka consumer closed");
});
}
onMessage(message) {
}
scheduleCommitFlush() {
if (this.commitTimer) return;
this.commitTimer = setTimeout(() => {
this.commitTimer = null;
this.flushCommits();
}, this.commitIntervalMs);
}
flushCommits() {
if (!this.consumer || this.pendingCommits.size === 0) return;
const batch = this.pendingCommits;
this.pendingCommits = /* @__PURE__ */ new Map();
this.consumer.sendOffsetCommitRequest(Array.from(batch.values()), (err) => {
if (err) {
for (const [k, v] of batch.entries()) {
this.pendingCommits.set(k, v);
}
logger.error("Failed to commit Kafka offsets", {
error: err.message,
groupId: config.kafka.groupId,
count: batch.size
});
return;
}
logger.info("Kafka offsets committed", {
groupId: config.kafka.groupId,
count: batch.size,
commits: Array.from(batch.values())
});
});
}
close() {
return new Promise((resolve) => {
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
this.flushCommits();
if (!this.consumer) {
resolve();
return;
}
this.consumer.close(true, () => {
logger.info("Kafka consumer closed");
resolve();
});
});
}
}
class DataProcessor {
constructor() {
this.batch = [];
this.lastWriteTime = 0;
this.flushTimer = null;
this.dbRetryDelayMs = 1e3;
this.dbRetryMaxAttempts = 3;
}
async processMessage(message) {
try {
const rawValue = Buffer.isBuffer(message.value) ? message.value.toString("utf8") : String(message.value);
const payload = JSON.parse(rawValue);
const processedData = this.validateAndTransform(payload);
const writeAck = new Promise((resolve, reject) => {
this.batch.push({
data: processedData,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset
},
resolve,
reject
});
});
logger.debug("Message accepted into batch", {
topic: message.topic,
partition: message.partition,
offset: message.offset,
currentBatchSize: this.batch.length
});
this.scheduleFlush();
await this.checkAndWriteBatch();
await writeAck;
} catch (error) {
logger.error("Error processing message:", { error: error.message });
throw error;
}
}
validateAndTransform(data) {
const cleanString = (str) => {
if (str === null || str === void 0) return null;
return String(str).replace(/\0/g, "");
};
const processed = {
ts_ms: typeof data.ts_ms === "number" ? data.ts_ms : parseInt(data.ts_ms) || 0,
hotel_id: this.validateHotelId(data.hotel_id),
room_id: cleanString(data.room_id) || "",
device_id: cleanString(data.device_id) || "",
is_send: parseInt(data.is_send) || 0,
udp_raw: data.udp_raw ? cleanString(Buffer.from(data.udp_raw).toString()) : null,
extra: data.extra ? JSON.stringify(data.extra) : null,
ip: cleanString(data.remote_endpoint) || "",
md5: cleanString(data.md5) || "",
partition: parseInt(data.partition) || null,
file_type: parseInt(data.file_type) || null,
file_path: cleanString(data.file_path) || "",
upgrade_state: parseInt(data.upgrade_state) || null,
app_version: cleanString(data.app_version) || ""
};
return processed;
}
validateHotelId(hotelId) {
const id = parseInt(hotelId);
if (isNaN(id) || id < -32768 || id > 32767) {
return 0;
}
return id;
}
async checkAndWriteBatch() {
const now = Date.now();
const timeSinceLastWrite = now - this.lastWriteTime;
if (this.batch.length >= config.performance.batchSize || timeSinceLastWrite >= config.performance.dbWriteIntervalMs) {
await this.writeBatch();
}
}
scheduleFlush() {
if (this.flushTimer) return;
this.flushTimer = setTimeout(() => {
this.flushTimer = null;
this.writeBatch().catch((error) => {
logger.error("Error in scheduled batch flush:", { error: error.message });
});
}, config.performance.dbWriteIntervalMs);
}
isRetryableDbError(err) {
const code = err == null ? void 0 : err.code;
if (typeof code === "string") {
const retryableCodes = /* @__PURE__ */ new Set([
"ECONNREFUSED",
"ECONNRESET",
"EPIPE",
"ETIMEDOUT",
"ENOTFOUND",
"EHOSTUNREACH",
"ENETUNREACH",
"57P03",
"08006",
"08001",
"08000",
"08003"
]);
if (retryableCodes.has(code)) return true;
}
const message = typeof (err == null ? void 0 : err.message) === "string" ? err.message.toLowerCase() : "";
return message.includes("connection timeout") || message.includes("connection terminated") || message.includes("connection refused") || message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("could not connect") || message.includes("the database system is starting up");
}
async executeQueryWithRetry(query, params) {
let attempt = 0;
while (true) {
try {
return await databaseManager.query("g5", query, params);
} catch (error) {
attempt += 1;
if (!this.isRetryableDbError(error) || attempt > this.dbRetryMaxAttempts) {
throw error;
}
logger.warn("Retrying G5 batch write after transient DB error", {
attempt,
maxAttempts: this.dbRetryMaxAttempts,
error: error.message
});
await new Promise((resolve) => setTimeout(resolve, this.dbRetryDelayMs));
}
}
}
async writeBatch() {
var _a, _b, _c, _d, _e, _f;
if (this.batch.length === 0) return;
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
const batch = [...this.batch];
this.batch = [];
this.lastWriteTime = Date.now();
try {
logger.info("Flushing batch to G5 database", {
batchSize: batch.length,
first: (_a = batch[0]) == null ? void 0 : _a.meta,
last: (_b = batch[batch.length - 1]) == null ? void 0 : _b.meta
});
const values = batch.map((item) => [
item.data.ts_ms,
item.data.hotel_id,
item.data.room_id,
item.data.device_id,
item.data.is_send,
item.data.udp_raw,
item.data.extra,
item.data.ip,
item.data.md5,
item.data.partition,
item.data.file_type,
item.data.file_path,
item.data.upgrade_state,
item.data.app_version
]);
const query = `
INSERT INTO ${config.database.g5.schema}.${config.database.g5.table}
(ts_ms, hotel_id, room_id, device_id, is_send, udp_raw, extra, ip, md5, partition, file_type, file_path, upgrade_state, app_version)
VALUES ${values.map((_, i) => `($${i * 14 + 1}, $${i * 14 + 2}, $${i * 14 + 3}, $${i * 14 + 4}, $${i * 14 + 5}, $${i * 14 + 6}, $${i * 14 + 7}, $${i * 14 + 8}, $${i * 14 + 9}, $${i * 14 + 10}, $${i * 14 + 11}, $${i * 14 + 12}, $${i * 14 + 13}, $${i * 14 + 14})`).join(", ")}
`;
const params = values.flat();
await this.executeQueryWithRetry(query, params);
logger.info("Batch write success", {
batchSize: batch.length,
first: (_c = batch[0]) == null ? void 0 : _c.meta,
last: (_d = batch[batch.length - 1]) == null ? void 0 : _d.meta
});
batch.forEach((item) => item.resolve());
} catch (error) {
logger.error("Error writing batch to database:", {
error: error.message,
batchSize: batch.length,
first: (_e = batch[0]) == null ? void 0 : _e.meta,
last: (_f = batch[batch.length - 1]) == null ? void 0 : _f.meta
});
batch.forEach((item) => item.reject(error));
throw error;
}
}
async flush() {
if (this.batch.length > 0) {
await this.writeBatch();
}
}
}
const dataProcessor = new DataProcessor();
class App {
constructor() {
this.consumer = null;
this.isShuttingDown = false;
}
async init() {
databaseManager.init();
this.consumer = new KafkaConsumer();
this.consumer.onMessage = (message) => dataProcessor.processMessage(message);
this.consumer.init();
process.on("SIGINT", async () => {
await this.shutdown();
});
process.on("SIGTERM", async () => {
await this.shutdown();
});
logger.info(`BLS Upgrade Backend service started on port ${config.port}`);
}
async shutdown() {
if (this.isShuttingDown) return;
this.isShuttingDown = true;
logger.info("Shutting down BLS Upgrade Backend service...");
await dataProcessor.flush();
if (this.consumer) {
await this.consumer.close();
}
await databaseManager.close();
logger.info("Service shutdown completed");
process.exit(0);
}
}
const app = new App();
app.init();