Files
Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/index.js
XuJiacheng 0e6c5c3cc3 feat: 增强Kafka消费者配置并完善action_type分类逻辑
- 支持多实例Kafka消费者,增加并发处理能力
- 新增Kafka配置参数:最大飞行中消息数、获取字节数、等待时间等
- 完善action_type分类逻辑,根据dev_type映射为"用户操作"或"设备回路状态"
- 临时支持hex格式udp_raw自动转换为base64存储
- 增加extra字段支持上游扩展数据传递
- 优化数据库初始化脚本查找路径
- 更新PM2配置,修改应用名称和端口
- 清理开发环境日志文件,将dist目录加入.gitignore
- 更新相关文档说明
2026-01-30 20:09:46 +08:00

214 lines
7.2 KiB
JavaScript

import cron from 'node-cron';
import { config } from './config/config.js';
import dbManager from './db/databaseManager.js';
import dbInitializer from './db/initializer.js';
import partitionManager from './db/partitionManager.js';
import { createKafkaConsumers } from './kafka/consumer.js';
import { processKafkaMessage } from './processor/index.js';
import { createRedisClient } from './redis/redisClient.js';
import { RedisIntegration } from './redis/redisIntegration.js';
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
import { MetricCollector } from './utils/metricCollector.js';
import { logger } from './utils/logger.js';
const bootstrap = async () => {
// 0. Initialize Database (Create DB, Schema, Table, Partitions)
await dbInitializer.initialize();
// Metric Collector
const metricCollector = new MetricCollector();
// 1. Setup Partition Maintenance Cron Job (Every day at 00:00)
cron.schedule('0 0 * * *', async () => {
logger.info('Running scheduled partition maintenance...');
try {
await partitionManager.ensurePartitions(30);
} catch (err) {
logger.error('Scheduled partition maintenance failed', err);
}
});
// 1.1 Setup Metric Reporting Cron Job (Every minute)
// Moved after redisIntegration initialization
// DatabaseManager is now a singleton exported instance, but let's keep consistency if possible
// In databaseManager.js it exports `dbManager` instance by default.
// The original code was `const dbManager = new DatabaseManager(config.db);` which implies it might have been a class export.
// Let's check `databaseManager.js` content.
// Wait, I imported `dbManager` from `./db/databaseManager.js`.
// If `databaseManager.js` exports an instance as default, I should use that.
// If it exports a class, I should instantiate it.
// Let's assume the previous code `new DatabaseManager` was correct if it was a class.
// BUT I used `dbManager.pool` in `partitionManager.js` assuming it's an instance.
// I need to verify `databaseManager.js`.
const redisClient = await createRedisClient(config.redis);
const redisIntegration = new RedisIntegration(
redisClient,
config.redis.projectName,
config.redis.apiBaseUrl
);
redisIntegration.startHeartbeat();
// 1.1 Setup Metric Reporting Cron Job (Every minute)
cron.schedule('* * * * *', async () => {
const metrics = metricCollector.getAndReset();
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
console.log(report);
logger.info(report, metrics);
try {
await redisIntegration.info('Minute Metrics', metrics);
} catch (err) {
logger.error('Failed to report metrics to Redis', { error: err?.message });
}
});
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
const handleMessage = async (message) => {
if (message.topic) {
metricCollector.increment('kafka_pulled');
}
try {
const messageValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: message.value;
const messageKey = Buffer.isBuffer(message.key)
? message.key.toString('utf8')
: message.key;
if (config.kafka.logMessages) {
logger.info('Kafka message received', {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: messageKey,
value: messageValue
});
} else {
logger.info('Kafka message received', {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: messageKey,
valueLength: typeof messageValue === 'string' ? messageValue.length : null
});
}
const inserted = await processKafkaMessage({ message, dbManager, config });
metricCollector.increment('db_inserted');
logger.info('Kafka message processed', { inserted });
} catch (error) {
if (error.type === 'PARSE_ERROR') {
metricCollector.increment('parse_error');
} else {
metricCollector.increment('db_failed');
}
logger.error('Message processing failed', {
error: error?.message,
type: error?.type,
stack: error?.stack,
rawPayload: error?.rawPayload,
validationIssues: error?.validationIssues,
dbContext: error?.dbContext
});
throw error; // Re-throw to trigger onError
}
};
const handleError = async (error, message) => {
logger.error('Kafka processing error', {
error: error?.message,
type: error?.type,
stack: error?.stack
});
try {
await redisIntegration.error('Kafka processing error', {
module: 'kafka',
stack: error?.stack || error?.message
});
} catch (redisError) {
logger.error('Redis error log failed', { error: redisError?.message });
}
if (message) {
const messageValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: message.value;
try {
await enqueueError(redisClient, errorQueueKey, {
attempts: 0,
value: messageValue,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: message.key
},
timestamp: Date.now()
});
} catch (enqueueError) {
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
}
}
};
const consumers = createKafkaConsumers({
kafkaConfig: config.kafka,
onMessage: handleMessage,
onError: handleError
});
// Start retry worker (non-blocking)
startErrorRetryWorker({
client: redisClient,
queueKey: errorQueueKey,
redisIntegration,
handler: async (item) => {
if (!item?.value) {
throw new Error('Missing value in retry payload');
}
await handleMessage({ value: item.value });
}
}).catch(err => {
logger.error('Retry worker failed', { error: err?.message });
});
// Graceful Shutdown Logic
const shutdown = async (signal) => {
logger.info(`Received ${signal}, shutting down...`);
try {
// 1. Close Kafka Consumer
if (consumers && consumers.length > 0) {
await Promise.all(consumers.map(c => new Promise((resolve) => c.close(true, resolve))));
logger.info('Kafka consumer closed', { count: consumers.length });
}
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
// 3. Close Redis Client
await redisClient.quit();
logger.info('Redis client closed');
// 4. Close Database Pool
await dbManager.close();
logger.info('Database connection closed');
process.exit(0);
} catch (err) {
logger.error('Error during shutdown', { error: err?.message });
process.exit(1);
}
};
process.on('SIGTERM', () => shutdown('SIGTERM'));
process.on('SIGINT', () => shutdown('SIGINT'));
};
bootstrap().catch((error) => {
logger.error('Service bootstrap failed', { error: error?.message });
process.exit(1);
});