feat: 实现RCU升级后端服务初始版本

- 添加Kafka消费者组件用于消费升级事件数据
- 实现数据处理器进行数据验证和转换
- 添加数据库写入组件支持批量写入G5数据库
- 配置环境变量管理连接参数
- 添加日志记录和错误处理机制
- 实现优雅关闭和流控功能
This commit is contained in:
2026-03-17 19:23:51 +08:00
parent 23ebc6f059
commit a954db752f
21 changed files with 5919 additions and 0 deletions

View File

@@ -0,0 +1,59 @@
import dotenv from 'dotenv';
dotenv.config();
export const config = {
port: process.env.PORT || 3001,
logLevel: process.env.LOG_LEVEL || 'info',
kafka: {
brokers: process.env.KAFKA_BROKERS || 'kafka.blv-oa.com:9092',
clientId: process.env.KAFKA_CLIENT_ID || 'bls-upgrade-producer',
groupId: process.env.KAFKA_GROUP_ID || 'bls-upgrade-consumer',
testGroupId: process.env.KAFKA_TEST_GROUP_ID || '',
topics: process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-upgrade-topic',
fromOffset: process.env.KAFKA_FROM_OFFSET || 'latest',
autoCommit: process.env.KAFKA_AUTO_COMMIT === 'true',
autoCommitIntervalMs: parseInt(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS || '5000'),
saslEnabled: process.env.KAFKA_SASL_ENABLED === 'true',
saslMechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
saslUsername: process.env.KAFKA_SASL_USERNAME || 'blwmomo',
saslPassword: process.env.KAFKA_SASL_PASSWORD || 'blwmomo',
sslEnabled: process.env.KAFKA_SSL_ENABLED === 'true',
consumerInstances: parseInt(process.env.KAFKA_CONSUMER_INSTANCES || '1'),
maxInFlight: parseInt(process.env.KAFKA_MAX_IN_FLIGHT || '5000'),
batchSize: parseInt(process.env.KAFKA_BATCH_SIZE || '1000'),
batchTimeoutMs: parseInt(process.env.KAFKA_BATCH_TIMEOUT_MS || '20'),
commitIntervalMs: parseInt(process.env.KAFKA_COMMIT_INTERVAL_MS || '200'),
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === 'true',
fetchMaxBytes: parseInt(process.env.KAFKA_FETCH_MAX_BYTES || '10485760'),
fetchMaxWaitMs: parseInt(process.env.KAFKA_FETCH_MAX_WAIT_MS || '100'),
fetchMinBytes: parseInt(process.env.KAFKA_FETCH_MIN_BYTES || '1')
},
database: {
g5: {
host: process.env.POSTGRES_HOST_G5 || '10.8.8.80',
port: parseInt(process.env.POSTGRES_PORT_G5 || '5434'),
database: process.env.POSTGRES_DATABASE_G5 || 'log_platform',
user: process.env.POSTGRES_USER_G5 || 'log_admin',
password: process.env.POSTGRES_PASSWORD_G5 || 'H3IkLUt8K!x',
idleTimeoutMs: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS_G5 || '30000'),
maxConnections: parseInt(process.env.POSTGRES_MAX_CONNECTIONS_G5 || '2'),
schema: process.env.DB_SCHEMA_G5 || 'rcu_upgrade',
table: process.env.DB_TABLE_G5 || 'rcu_upgrade_events_g5'
}
},
redis: {
host: process.env.REDIS_HOST || '10.8.8.109',
port: parseInt(process.env.REDIS_PORT || '6379'),
password: process.env.REDIS_PASSWORD || '',
db: parseInt(process.env.REDIS_DB || '15'),
connectTimeoutMs: parseInt(process.env.REDIS_CONNECT_TIMEOUT_MS || '5000'),
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline'
},
performance: {
dbWriteIntervalMs: 1000, // 限制数据库写入频率为每秒最多1次
batchSize: 1000 // 批处理大小
}
};
export default config;

View File

@@ -0,0 +1,64 @@
import { Pool } from 'pg';
import config from '../config/config.js';
import logger from '../utils/logger.js';
class DatabaseManager {
constructor() {
this.pools = {};
}
init() {
// 初始化G5数据库连接池
this.pools.g5 = new Pool({
host: config.database.g5.host,
port: config.database.g5.port,
database: config.database.g5.database,
user: config.database.g5.user,
password: config.database.g5.password,
max: config.database.g5.maxConnections,
idleTimeoutMillis: config.database.g5.idleTimeoutMs
});
// 测试连接
this.pools.g5.connect((err, client, release) => {
if (err) {
logger.error('Error connecting to G5 database:', { error: err.message });
} else {
logger.info('Successfully connected to G5 database');
release();
}
});
// 监听错误
this.pools.g5.on('error', (err) => {
logger.error('Unexpected error on G5 database connection pool:', { error: err.message });
});
}
getPool(dbName) {
return this.pools[dbName];
}
async query(dbName, text, params) {
const pool = this.getPool(dbName);
if (!pool) {
throw new Error(`Database pool ${dbName} not initialized`);
}
const client = await pool.connect();
try {
return await client.query(text, params);
} finally {
client.release();
}
}
async close() {
await Promise.all(
Object.values(this.pools).map(pool => pool.end())
);
}
}
const databaseManager = new DatabaseManager();
export default databaseManager;

View File

@@ -0,0 +1,58 @@
import config from './config/config.js';
import databaseManager from './db/databaseManager.js';
import KafkaConsumer from './kafka/consumer.js';
import dataProcessor from './processor/index.js';
import logger from './utils/logger.js';
class App {
constructor() {
this.consumer = null;
this.isShuttingDown = false;
}
async init() {
// 初始化数据库连接
databaseManager.init();
// 初始化Kafka消费者
this.consumer = new KafkaConsumer();
this.consumer.onMessage = (message) => dataProcessor.processMessage(message);
this.consumer.init();
// 监听进程终止信号
process.on('SIGINT', async () => {
await this.shutdown();
});
process.on('SIGTERM', async () => {
await this.shutdown();
});
logger.info(`BLS Upgrade Backend service started on port ${config.port}`);
}
async shutdown() {
if (this.isShuttingDown) return;
this.isShuttingDown = true;
logger.info('Shutting down BLS Upgrade Backend service...');
// 确保所有数据都被写入
await dataProcessor.flush();
// 关闭Kafka消费者
if (this.consumer) {
await this.consumer.close();
}
// 关闭数据库连接
await databaseManager.close();
logger.info('Service shutdown completed');
process.exit(0);
}
}
// 启动应用
const app = new App();
app.init();

View File

@@ -0,0 +1,191 @@
import kafka from 'kafka-node';
import config from '../config/config.js';
import logger from '../utils/logger.js';
import { OffsetTracker } from './offsetTracker.js';
const { ConsumerGroup } = kafka;
class KafkaConsumer {
constructor() {
this.consumer = null;
this.tracker = new OffsetTracker();
this.pendingCommits = new Map();
this.commitTimer = null;
this.inFlight = 0;
this.maxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5000;
this.commitIntervalMs = Number.isFinite(config.kafka.commitIntervalMs) ? config.kafka.commitIntervalMs : 200;
}
init() {
const kafkaConfig = {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
fromOffset: config.kafka.fromOffset,
protocol: ['roundrobin'],
outOfRangeOffset: 'latest',
autoCommit: config.kafka.autoCommit,
autoCommitIntervalMs: config.kafka.autoCommitIntervalMs,
fetchMaxBytes: config.kafka.fetchMaxBytes,
fetchMaxWaitMs: config.kafka.fetchMaxWaitMs,
fetchMinBytes: config.kafka.fetchMinBytes,
sasl: config.kafka.saslEnabled ? {
mechanism: config.kafka.saslMechanism,
username: config.kafka.saslUsername,
password: config.kafka.saslPassword
} : undefined,
ssl: config.kafka.sslEnabled,
connectTimeout: 10000,
requestTimeout: 10000
};
logger.info('Initializing Kafka consumer with config:', {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
topics: config.kafka.topics,
fromOffset: config.kafka.fromOffset,
saslEnabled: config.kafka.saslEnabled
});
const topics = config.kafka.topics.split(',').map(topic => topic.trim()).filter(Boolean);
this.consumer = new ConsumerGroup(kafkaConfig, topics);
this.consumer.on('connect', () => {
logger.info('Kafka consumer connected', {
groupId: config.kafka.groupId,
topics
});
});
this.consumer.on('rebalancing', () => {
logger.info('Kafka consumer rebalancing');
this.tracker.clear();
this.pendingCommits.clear();
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
});
this.consumer.on('rebalanced', () => {
logger.info('Kafka consumer rebalanced');
});
this.consumer.on('message', (message) => {
logger.debug('Received Kafka message:', { messageId: message.offset });
this.inFlight += 1;
this.tracker.add(message.topic, message.partition, message.offset);
if (this.inFlight >= this.maxInFlight && this.consumer.pause) {
this.consumer.pause();
}
Promise.resolve(this.onMessage(message))
.then(() => {
if (!config.kafka.autoCommit) {
const commitOffset = this.tracker.markDone(message.topic, message.partition, message.offset);
if (commitOffset !== null) {
const key = `${message.topic}-${message.partition}`;
this.pendingCommits.set(key, {
topic: message.topic,
partition: message.partition,
offset: commitOffset,
metadata: 'm'
});
this.scheduleCommitFlush();
}
}
})
.catch((err) => {
logger.error('Kafka message handling failed, skip commit', {
error: err.message,
topic: message.topic,
partition: message.partition,
offset: message.offset
});
})
.finally(() => {
this.inFlight -= 1;
if (this.inFlight < this.maxInFlight && this.consumer.resume) {
this.consumer.resume();
}
});
});
this.consumer.on('error', (err) => {
logger.error('Kafka consumer error:', { error: err.message, stack: err.stack });
});
this.consumer.on('offsetOutOfRange', (topic) => {
logger.warn('Kafka offset out of range:', { topic: topic.topic, partition: topic.partition });
});
logger.info('Kafka consumer initialized');
this.consumer.on('close', () => {
logger.info('Kafka consumer closed');
});
}
onMessage(message) {
// 子类实现
}
scheduleCommitFlush() {
if (this.commitTimer) return;
this.commitTimer = setTimeout(() => {
this.commitTimer = null;
this.flushCommits();
}, this.commitIntervalMs);
}
flushCommits() {
if (!this.consumer || this.pendingCommits.size === 0) return;
const batch = this.pendingCommits;
this.pendingCommits = new Map();
this.consumer.sendOffsetCommitRequest(Array.from(batch.values()), (err) => {
if (err) {
for (const [k, v] of batch.entries()) {
this.pendingCommits.set(k, v);
}
logger.error('Failed to commit Kafka offsets', {
error: err.message,
groupId: config.kafka.groupId,
count: batch.size
});
return;
}
logger.info('Kafka offsets committed', {
groupId: config.kafka.groupId,
count: batch.size,
commits: Array.from(batch.values())
});
});
}
close() {
return new Promise((resolve) => {
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
this.flushCommits();
if (!this.consumer) {
resolve();
return;
}
this.consumer.close(true, () => {
logger.info('Kafka consumer closed');
resolve();
});
});
}
}
export default KafkaConsumer;

View File

@@ -0,0 +1,53 @@
class OffsetTracker {
constructor() {
this.partitions = new Map();
}
add(topic, partition, offset) {
const key = `${topic}-${partition}`;
if (!this.partitions.has(key)) {
this.partitions.set(key, { nextCommitOffset: null, done: new Set() });
}
const state = this.partitions.get(key);
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return;
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
} else if (numericOffset < state.nextCommitOffset) {
state.nextCommitOffset = numericOffset;
}
}
markDone(topic, partition, offset) {
const key = `${topic}-${partition}`;
const state = this.partitions.get(key);
if (!state) return null;
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return null;
state.done.add(numericOffset);
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
}
let advanced = false;
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
state.done.delete(state.nextCommitOffset);
state.nextCommitOffset += 1;
advanced = true;
}
if (!advanced) return null;
return state.nextCommitOffset;
}
clear() {
this.partitions.clear();
}
}
export { OffsetTracker };

View File

@@ -0,0 +1,234 @@
import config from '../config/config.js';
import databaseManager from '../db/databaseManager.js';
import logger from '../utils/logger.js';
class DataProcessor {
constructor() {
this.batch = [];
this.lastWriteTime = 0;
this.flushTimer = null;
this.dbRetryDelayMs = 1000;
this.dbRetryMaxAttempts = 3;
}
async processMessage(message) {
try {
const rawValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: String(message.value);
const payload = JSON.parse(rawValue);
const processedData = this.validateAndTransform(payload);
const writeAck = new Promise((resolve, reject) => {
this.batch.push({
data: processedData,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset
},
resolve,
reject
});
});
logger.debug('Message accepted into batch', {
topic: message.topic,
partition: message.partition,
offset: message.offset,
currentBatchSize: this.batch.length
});
this.scheduleFlush();
// 检查是否需要立即写入数据库
await this.checkAndWriteBatch();
await writeAck;
} catch (error) {
logger.error('Error processing message:', { error: error.message });
throw error;
}
}
validateAndTransform(data) {
const cleanString = (str) => {
if (str === null || str === undefined) return null;
return String(str).replace(/\0/g, '');
};
const processed = {
ts_ms: typeof data.ts_ms === 'number' ? data.ts_ms : (parseInt(data.ts_ms) || 0),
hotel_id: this.validateHotelId(data.hotel_id),
room_id: cleanString(data.room_id) || '',
device_id: cleanString(data.device_id) || '',
is_send: parseInt(data.is_send) || 0,
udp_raw: data.udp_raw ? cleanString(Buffer.from(data.udp_raw).toString()) : null,
extra: data.extra ? JSON.stringify(data.extra) : null,
ip: cleanString(data.remote_endpoint) || '',
md5: cleanString(data.md5) || '',
partition: parseInt(data.partition) || null,
file_type: parseInt(data.file_type) || null,
file_path: cleanString(data.file_path) || '',
upgrade_state: parseInt(data.upgrade_state) || null,
app_version: cleanString(data.app_version) || ''
};
return processed;
}
validateHotelId(hotelId) {
const id = parseInt(hotelId);
// 检查是否在int2范围内 (-32768 到 32767)
if (isNaN(id) || id < -32768 || id > 32767) {
return 0;
}
return id;
}
async checkAndWriteBatch() {
const now = Date.now();
const timeSinceLastWrite = now - this.lastWriteTime;
// 检查是否达到批处理大小或时间间隔
if (this.batch.length >= config.performance.batchSize || timeSinceLastWrite >= config.performance.dbWriteIntervalMs) {
await this.writeBatch();
}
}
scheduleFlush() {
if (this.flushTimer) return;
this.flushTimer = setTimeout(() => {
this.flushTimer = null;
this.writeBatch().catch((error) => {
logger.error('Error in scheduled batch flush:', { error: error.message });
});
}, config.performance.dbWriteIntervalMs);
}
isRetryableDbError(err) {
const code = err?.code;
if (typeof code === 'string') {
const retryableCodes = new Set([
'ECONNREFUSED',
'ECONNRESET',
'EPIPE',
'ETIMEDOUT',
'ENOTFOUND',
'EHOSTUNREACH',
'ENETUNREACH',
'57P03',
'08006',
'08001',
'08000',
'08003'
]);
if (retryableCodes.has(code)) return true;
}
const message = typeof err?.message === 'string' ? err.message.toLowerCase() : '';
return (
message.includes('connection timeout') ||
message.includes('connection terminated') ||
message.includes('connection refused') ||
message.includes('econnrefused') ||
message.includes('econnreset') ||
message.includes('etimedout') ||
message.includes('could not connect') ||
message.includes('the database system is starting up')
);
}
async executeQueryWithRetry(query, params) {
let attempt = 0;
while (true) {
try {
return await databaseManager.query('g5', query, params);
} catch (error) {
attempt += 1;
if (!this.isRetryableDbError(error) || attempt > this.dbRetryMaxAttempts) {
throw error;
}
logger.warn('Retrying G5 batch write after transient DB error', {
attempt,
maxAttempts: this.dbRetryMaxAttempts,
error: error.message
});
await new Promise(resolve => setTimeout(resolve, this.dbRetryDelayMs));
}
}
}
async writeBatch() {
if (this.batch.length === 0) return;
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
const batch = [...this.batch];
this.batch = [];
this.lastWriteTime = Date.now();
try {
logger.info('Flushing batch to G5 database', {
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
// 构建批量插入语句
const values = batch.map(item => [
item.data.ts_ms,
item.data.hotel_id,
item.data.room_id,
item.data.device_id,
item.data.is_send,
item.data.udp_raw,
item.data.extra,
item.data.ip,
item.data.md5,
item.data.partition,
item.data.file_type,
item.data.file_path,
item.data.upgrade_state,
item.data.app_version
]);
const query = `
INSERT INTO ${config.database.g5.schema}.${config.database.g5.table}
(ts_ms, hotel_id, room_id, device_id, is_send, udp_raw, extra, ip, md5, partition, file_type, file_path, upgrade_state, app_version)
VALUES ${values.map((_, i) => `($${i * 14 + 1}, $${i * 14 + 2}, $${i * 14 + 3}, $${i * 14 + 4}, $${i * 14 + 5}, $${i * 14 + 6}, $${i * 14 + 7}, $${i * 14 + 8}, $${i * 14 + 9}, $${i * 14 + 10}, $${i * 14 + 11}, $${i * 14 + 12}, $${i * 14 + 13}, $${i * 14 + 14})`).join(', ')}
`;
// 扁平化values数组
const params = values.flat();
await this.executeQueryWithRetry(query, params);
logger.info('Batch write success', {
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
batch.forEach(item => item.resolve());
} catch (error) {
logger.error('Error writing batch to database:', {
error: error.message,
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
batch.forEach(item => item.reject(error));
throw error;
}
}
async flush() {
// 确保所有数据都被写入
if (this.batch.length > 0) {
await this.writeBatch();
}
}
}
const dataProcessor = new DataProcessor();
export default dataProcessor;

View File

@@ -0,0 +1,13 @@
import dotenv from 'dotenv';
dotenv.config();
const baseGroupId = process.env.KAFKA_GROUP_ID || 'bls-upgrade-consumer';
const testGroupId = process.env.KAFKA_TEST_GROUP_ID || `${baseGroupId}-test-${Date.now()}`;
process.env.KAFKA_GROUP_ID = testGroupId;
process.env.KAFKA_FROM_OFFSET = process.env.KAFKA_FROM_OFFSET || 'earliest';
console.log(`[test-consumer] groupId=${process.env.KAFKA_GROUP_ID}, fromOffset=${process.env.KAFKA_FROM_OFFSET}`);
await import('./index.js');

View File

@@ -0,0 +1,44 @@
import config from '../config/config.js';
class Logger {
constructor() {
this.logLevel = config.logLevel;
}
log(level, message, data = {}) {
const levels = ['debug', 'info', 'warn', 'error'];
const levelIndex = levels.indexOf(level);
const configLevelIndex = levels.indexOf(this.logLevel);
if (levelIndex >= configLevelIndex) {
const timestamp = new Date().toISOString();
const logMessage = {
timestamp,
level,
message,
data
};
console.log(JSON.stringify(logMessage));
}
}
debug(message, data = {}) {
this.log('debug', message, data);
}
info(message, data = {}) {
this.log('info', message, data);
}
warn(message, data = {}) {
this.log('warn', message, data);
}
error(message, data = {}) {
this.log('error', message, data);
}
}
const logger = new Logger();
export default logger;