2026-01-30 11:05:00 +08:00
|
|
|
import cron from 'node-cron';
|
|
|
|
|
import { config } from './config/config.js';
|
|
|
|
|
import dbManager from './db/databaseManager.js';
|
|
|
|
|
import dbInitializer from './db/initializer.js';
|
|
|
|
|
import partitionManager from './db/partitionManager.js';
|
2026-01-30 20:09:46 +08:00
|
|
|
import { createKafkaConsumers } from './kafka/consumer.js';
|
2026-01-30 11:05:00 +08:00
|
|
|
import { processKafkaMessage } from './processor/index.js';
|
|
|
|
|
import { createRedisClient } from './redis/redisClient.js';
|
|
|
|
|
import { RedisIntegration } from './redis/redisIntegration.js';
|
|
|
|
|
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
|
|
|
|
|
import { MetricCollector } from './utils/metricCollector.js';
|
|
|
|
|
import { logger } from './utils/logger.js';
|
|
|
|
|
|
|
|
|
|
const bootstrap = async () => {
|
|
|
|
|
// 0. Initialize Database (Create DB, Schema, Table, Partitions)
|
|
|
|
|
await dbInitializer.initialize();
|
|
|
|
|
|
|
|
|
|
// Metric Collector
|
|
|
|
|
const metricCollector = new MetricCollector();
|
|
|
|
|
|
|
|
|
|
// 1. Setup Partition Maintenance Cron Job (Every day at 00:00)
|
|
|
|
|
cron.schedule('0 0 * * *', async () => {
|
|
|
|
|
logger.info('Running scheduled partition maintenance...');
|
|
|
|
|
try {
|
|
|
|
|
await partitionManager.ensurePartitions(30);
|
|
|
|
|
} catch (err) {
|
|
|
|
|
logger.error('Scheduled partition maintenance failed', err);
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
|
|
|
|
// Moved after redisIntegration initialization
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// DatabaseManager is now a singleton exported instance, but let's keep consistency if possible
|
|
|
|
|
// In databaseManager.js it exports `dbManager` instance by default.
|
|
|
|
|
// The original code was `const dbManager = new DatabaseManager(config.db);` which implies it might have been a class export.
|
|
|
|
|
// Let's check `databaseManager.js` content.
|
|
|
|
|
// Wait, I imported `dbManager` from `./db/databaseManager.js`.
|
|
|
|
|
// If `databaseManager.js` exports an instance as default, I should use that.
|
|
|
|
|
// If it exports a class, I should instantiate it.
|
|
|
|
|
|
|
|
|
|
// Let's assume the previous code `new DatabaseManager` was correct if it was a class.
|
|
|
|
|
// BUT I used `dbManager.pool` in `partitionManager.js` assuming it's an instance.
|
|
|
|
|
// I need to verify `databaseManager.js`.
|
|
|
|
|
|
|
|
|
|
const redisClient = await createRedisClient(config.redis);
|
|
|
|
|
const redisIntegration = new RedisIntegration(
|
|
|
|
|
redisClient,
|
|
|
|
|
config.redis.projectName,
|
|
|
|
|
config.redis.apiBaseUrl
|
|
|
|
|
);
|
|
|
|
|
redisIntegration.startHeartbeat();
|
|
|
|
|
|
|
|
|
|
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
|
|
|
|
cron.schedule('* * * * *', async () => {
|
|
|
|
|
const metrics = metricCollector.getAndReset();
|
|
|
|
|
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
|
|
|
|
|
console.log(report);
|
|
|
|
|
logger.info(report, metrics);
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
await redisIntegration.info('Minute Metrics', metrics);
|
|
|
|
|
} catch (err) {
|
|
|
|
|
logger.error('Failed to report metrics to Redis', { error: err?.message });
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
|
|
|
|
|
|
|
|
|
|
const handleMessage = async (message) => {
|
|
|
|
|
if (message.topic) {
|
|
|
|
|
metricCollector.increment('kafka_pulled');
|
|
|
|
|
}
|
|
|
|
|
try {
|
|
|
|
|
const messageValue = Buffer.isBuffer(message.value)
|
|
|
|
|
? message.value.toString('utf8')
|
|
|
|
|
: message.value;
|
|
|
|
|
const messageKey = Buffer.isBuffer(message.key)
|
|
|
|
|
? message.key.toString('utf8')
|
|
|
|
|
: message.key;
|
2026-01-30 20:09:46 +08:00
|
|
|
if (config.kafka.logMessages) {
|
|
|
|
|
logger.info('Kafka message received', {
|
|
|
|
|
topic: message.topic,
|
|
|
|
|
partition: message.partition,
|
|
|
|
|
offset: message.offset,
|
|
|
|
|
key: messageKey,
|
|
|
|
|
value: messageValue
|
|
|
|
|
});
|
|
|
|
|
} else {
|
|
|
|
|
logger.info('Kafka message received', {
|
|
|
|
|
topic: message.topic,
|
|
|
|
|
partition: message.partition,
|
|
|
|
|
offset: message.offset,
|
|
|
|
|
key: messageKey,
|
|
|
|
|
valueLength: typeof messageValue === 'string' ? messageValue.length : null
|
|
|
|
|
});
|
|
|
|
|
}
|
2026-01-30 11:05:00 +08:00
|
|
|
const inserted = await processKafkaMessage({ message, dbManager, config });
|
|
|
|
|
metricCollector.increment('db_inserted');
|
|
|
|
|
logger.info('Kafka message processed', { inserted });
|
|
|
|
|
} catch (error) {
|
|
|
|
|
if (error.type === 'PARSE_ERROR') {
|
|
|
|
|
metricCollector.increment('parse_error');
|
|
|
|
|
} else {
|
|
|
|
|
metricCollector.increment('db_failed');
|
|
|
|
|
}
|
|
|
|
|
logger.error('Message processing failed', {
|
|
|
|
|
error: error?.message,
|
|
|
|
|
type: error?.type,
|
|
|
|
|
stack: error?.stack,
|
|
|
|
|
rawPayload: error?.rawPayload,
|
|
|
|
|
validationIssues: error?.validationIssues,
|
|
|
|
|
dbContext: error?.dbContext
|
|
|
|
|
});
|
|
|
|
|
throw error; // Re-throw to trigger onError
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
const handleError = async (error, message) => {
|
|
|
|
|
logger.error('Kafka processing error', {
|
|
|
|
|
error: error?.message,
|
|
|
|
|
type: error?.type,
|
|
|
|
|
stack: error?.stack
|
|
|
|
|
});
|
|
|
|
|
try {
|
|
|
|
|
await redisIntegration.error('Kafka processing error', {
|
|
|
|
|
module: 'kafka',
|
|
|
|
|
stack: error?.stack || error?.message
|
|
|
|
|
});
|
|
|
|
|
} catch (redisError) {
|
|
|
|
|
logger.error('Redis error log failed', { error: redisError?.message });
|
|
|
|
|
}
|
|
|
|
|
if (message) {
|
|
|
|
|
const messageValue = Buffer.isBuffer(message.value)
|
|
|
|
|
? message.value.toString('utf8')
|
|
|
|
|
: message.value;
|
|
|
|
|
try {
|
|
|
|
|
await enqueueError(redisClient, errorQueueKey, {
|
|
|
|
|
attempts: 0,
|
|
|
|
|
value: messageValue,
|
|
|
|
|
meta: {
|
|
|
|
|
topic: message.topic,
|
|
|
|
|
partition: message.partition,
|
|
|
|
|
offset: message.offset,
|
|
|
|
|
key: message.key
|
|
|
|
|
},
|
|
|
|
|
timestamp: Date.now()
|
|
|
|
|
});
|
|
|
|
|
} catch (enqueueError) {
|
|
|
|
|
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2026-01-30 20:09:46 +08:00
|
|
|
const consumers = createKafkaConsumers({
|
2026-01-30 11:05:00 +08:00
|
|
|
kafkaConfig: config.kafka,
|
|
|
|
|
onMessage: handleMessage,
|
|
|
|
|
onError: handleError
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Start retry worker (non-blocking)
|
|
|
|
|
startErrorRetryWorker({
|
|
|
|
|
client: redisClient,
|
|
|
|
|
queueKey: errorQueueKey,
|
|
|
|
|
redisIntegration,
|
|
|
|
|
handler: async (item) => {
|
|
|
|
|
if (!item?.value) {
|
|
|
|
|
throw new Error('Missing value in retry payload');
|
|
|
|
|
}
|
|
|
|
|
await handleMessage({ value: item.value });
|
|
|
|
|
}
|
|
|
|
|
}).catch(err => {
|
|
|
|
|
logger.error('Retry worker failed', { error: err?.message });
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// Graceful Shutdown Logic
|
|
|
|
|
const shutdown = async (signal) => {
|
|
|
|
|
logger.info(`Received ${signal}, shutting down...`);
|
|
|
|
|
|
|
|
|
|
try {
|
|
|
|
|
// 1. Close Kafka Consumer
|
2026-01-30 20:09:46 +08:00
|
|
|
if (consumers && consumers.length > 0) {
|
|
|
|
|
await Promise.all(consumers.map(c => new Promise((resolve) => c.close(true, resolve))));
|
|
|
|
|
logger.info('Kafka consumer closed', { count: consumers.length });
|
2026-01-30 11:05:00 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
|
|
|
|
|
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
|
|
|
|
|
|
|
|
|
|
// 3. Close Redis Client
|
|
|
|
|
await redisClient.quit();
|
|
|
|
|
logger.info('Redis client closed');
|
|
|
|
|
|
|
|
|
|
// 4. Close Database Pool
|
|
|
|
|
await dbManager.close();
|
|
|
|
|
logger.info('Database connection closed');
|
|
|
|
|
|
|
|
|
|
process.exit(0);
|
|
|
|
|
} catch (err) {
|
|
|
|
|
logger.error('Error during shutdown', { error: err?.message });
|
|
|
|
|
process.exit(1);
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
|
|
|
|
process.on('SIGINT', () => shutdown('SIGINT'));
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
bootstrap().catch((error) => {
|
|
|
|
|
logger.error('Service bootstrap failed', { error: error?.message });
|
|
|
|
|
process.exit(1);
|
|
|
|
|
});
|