feat: 初始化后端服务基础架构与核心组件
- 添加项目基础结构,包括 .gitignore、package.json、Docker 配置和环境变量示例 - 实现核心模块:Kafka 消费者、PostgreSQL 数据库管理器、Redis 客户端与错误队列 - 添加工具类:日志记录器、指标收集器、UUID 生成器 - 实现数据处理器,支持 0x36 上报和 0x0F 命令的解析与存储 - 添加数据库初始化脚本和分区管理,支持按时间范围分区 - 引入 Zod 数据验证和 Vitest 单元测试框架 - 提供完整的项目文档,包括数据库设计、Kafka 格式规范和 Redis 集成协议
This commit is contained in:
203
bls-rcu-action-backend/src/index.js
Normal file
203
bls-rcu-action-backend/src/index.js
Normal file
@@ -0,0 +1,203 @@
|
||||
import cron from 'node-cron';
|
||||
import { config } from './config/config.js';
|
||||
import dbManager from './db/databaseManager.js';
|
||||
import dbInitializer from './db/initializer.js';
|
||||
import partitionManager from './db/partitionManager.js';
|
||||
import { createKafkaConsumer } from './kafka/consumer.js';
|
||||
import { processKafkaMessage } from './processor/index.js';
|
||||
import { createRedisClient } from './redis/redisClient.js';
|
||||
import { RedisIntegration } from './redis/redisIntegration.js';
|
||||
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
|
||||
import { MetricCollector } from './utils/metricCollector.js';
|
||||
import { logger } from './utils/logger.js';
|
||||
|
||||
const bootstrap = async () => {
|
||||
// 0. Initialize Database (Create DB, Schema, Table, Partitions)
|
||||
await dbInitializer.initialize();
|
||||
|
||||
// Metric Collector
|
||||
const metricCollector = new MetricCollector();
|
||||
|
||||
// 1. Setup Partition Maintenance Cron Job (Every day at 00:00)
|
||||
cron.schedule('0 0 * * *', async () => {
|
||||
logger.info('Running scheduled partition maintenance...');
|
||||
try {
|
||||
await partitionManager.ensurePartitions(30);
|
||||
} catch (err) {
|
||||
logger.error('Scheduled partition maintenance failed', err);
|
||||
}
|
||||
});
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
// Moved after redisIntegration initialization
|
||||
|
||||
|
||||
// DatabaseManager is now a singleton exported instance, but let's keep consistency if possible
|
||||
// In databaseManager.js it exports `dbManager` instance by default.
|
||||
// The original code was `const dbManager = new DatabaseManager(config.db);` which implies it might have been a class export.
|
||||
// Let's check `databaseManager.js` content.
|
||||
// Wait, I imported `dbManager` from `./db/databaseManager.js`.
|
||||
// If `databaseManager.js` exports an instance as default, I should use that.
|
||||
// If it exports a class, I should instantiate it.
|
||||
|
||||
// Let's assume the previous code `new DatabaseManager` was correct if it was a class.
|
||||
// BUT I used `dbManager.pool` in `partitionManager.js` assuming it's an instance.
|
||||
// I need to verify `databaseManager.js`.
|
||||
|
||||
const redisClient = await createRedisClient(config.redis);
|
||||
const redisIntegration = new RedisIntegration(
|
||||
redisClient,
|
||||
config.redis.projectName,
|
||||
config.redis.apiBaseUrl
|
||||
);
|
||||
redisIntegration.startHeartbeat();
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
cron.schedule('* * * * *', async () => {
|
||||
const metrics = metricCollector.getAndReset();
|
||||
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
|
||||
console.log(report);
|
||||
logger.info(report, metrics);
|
||||
|
||||
try {
|
||||
await redisIntegration.info('Minute Metrics', metrics);
|
||||
} catch (err) {
|
||||
logger.error('Failed to report metrics to Redis', { error: err?.message });
|
||||
}
|
||||
});
|
||||
|
||||
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
|
||||
|
||||
const handleMessage = async (message) => {
|
||||
if (message.topic) {
|
||||
metricCollector.increment('kafka_pulled');
|
||||
}
|
||||
try {
|
||||
const messageValue = Buffer.isBuffer(message.value)
|
||||
? message.value.toString('utf8')
|
||||
: message.value;
|
||||
const messageKey = Buffer.isBuffer(message.key)
|
||||
? message.key.toString('utf8')
|
||||
: message.key;
|
||||
logger.info('Kafka message received', {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: message.offset,
|
||||
key: messageKey,
|
||||
value: messageValue
|
||||
});
|
||||
const inserted = await processKafkaMessage({ message, dbManager, config });
|
||||
metricCollector.increment('db_inserted');
|
||||
logger.info('Kafka message processed', { inserted });
|
||||
} catch (error) {
|
||||
if (error.type === 'PARSE_ERROR') {
|
||||
metricCollector.increment('parse_error');
|
||||
} else {
|
||||
metricCollector.increment('db_failed');
|
||||
}
|
||||
logger.error('Message processing failed', {
|
||||
error: error?.message,
|
||||
type: error?.type,
|
||||
stack: error?.stack,
|
||||
rawPayload: error?.rawPayload,
|
||||
validationIssues: error?.validationIssues,
|
||||
dbContext: error?.dbContext
|
||||
});
|
||||
throw error; // Re-throw to trigger onError
|
||||
}
|
||||
};
|
||||
|
||||
const handleError = async (error, message) => {
|
||||
logger.error('Kafka processing error', {
|
||||
error: error?.message,
|
||||
type: error?.type,
|
||||
stack: error?.stack
|
||||
});
|
||||
try {
|
||||
await redisIntegration.error('Kafka processing error', {
|
||||
module: 'kafka',
|
||||
stack: error?.stack || error?.message
|
||||
});
|
||||
} catch (redisError) {
|
||||
logger.error('Redis error log failed', { error: redisError?.message });
|
||||
}
|
||||
if (message) {
|
||||
const messageValue = Buffer.isBuffer(message.value)
|
||||
? message.value.toString('utf8')
|
||||
: message.value;
|
||||
try {
|
||||
await enqueueError(redisClient, errorQueueKey, {
|
||||
attempts: 0,
|
||||
value: messageValue,
|
||||
meta: {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: message.offset,
|
||||
key: message.key
|
||||
},
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (enqueueError) {
|
||||
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const consumer = createKafkaConsumer({
|
||||
kafkaConfig: config.kafka,
|
||||
onMessage: handleMessage,
|
||||
onError: handleError
|
||||
});
|
||||
|
||||
// Start retry worker (non-blocking)
|
||||
startErrorRetryWorker({
|
||||
client: redisClient,
|
||||
queueKey: errorQueueKey,
|
||||
redisIntegration,
|
||||
handler: async (item) => {
|
||||
if (!item?.value) {
|
||||
throw new Error('Missing value in retry payload');
|
||||
}
|
||||
await handleMessage({ value: item.value });
|
||||
}
|
||||
}).catch(err => {
|
||||
logger.error('Retry worker failed', { error: err?.message });
|
||||
});
|
||||
|
||||
// Graceful Shutdown Logic
|
||||
const shutdown = async (signal) => {
|
||||
logger.info(`Received ${signal}, shutting down...`);
|
||||
|
||||
try {
|
||||
// 1. Close Kafka Consumer
|
||||
if (consumer) {
|
||||
await new Promise((resolve) => consumer.close(true, resolve));
|
||||
logger.info('Kafka consumer closed');
|
||||
}
|
||||
|
||||
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
|
||||
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
|
||||
|
||||
// 3. Close Redis Client
|
||||
await redisClient.quit();
|
||||
logger.info('Redis client closed');
|
||||
|
||||
// 4. Close Database Pool
|
||||
await dbManager.close();
|
||||
logger.info('Database connection closed');
|
||||
|
||||
process.exit(0);
|
||||
} catch (err) {
|
||||
logger.error('Error during shutdown', { error: err?.message });
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
};
|
||||
|
||||
bootstrap().catch((error) => {
|
||||
logger.error('Service bootstrap failed', { error: error?.message });
|
||||
process.exit(1);
|
||||
});
|
||||
Reference in New Issue
Block a user