feat: 初始化 bls-onoffline-backend 项目基础结构

添加 Kafka 消费者、数据库写入、Redis 集成等核心模块,实现设备上下线事件处理
- 创建项目基础目录结构与配置文件
- 实现 Kafka 消费逻辑与手动提交偏移量
- 添加 PostgreSQL 数据库连接与分区表管理
- 集成 Redis 用于错误队列和项目心跳
- 包含数据处理逻辑,区分重启与非重启数据
- 提供数据库初始化脚本与分区创建工具
- 添加单元测试与代码校验脚本
This commit is contained in:
2026-02-04 17:51:50 +08:00
commit a8c7cf74e6
41 changed files with 6760 additions and 0 deletions

857
bls-onoffline-backend/dist/index.js vendored Normal file
View File

@@ -0,0 +1,857 @@
import cron from "node-cron";
import dotenv from "dotenv";
import pg from "pg";
import fs from "fs";
import path from "path";
import { fileURLToPath } from "url";
import kafka from "kafka-node";
import { randomUUID } from "crypto";
import { z } from "zod";
import { createClient } from "redis";
dotenv.config();
const parseNumber = (value, defaultValue) => {
const parsed = Number(value);
return Number.isFinite(parsed) ? parsed : defaultValue;
};
const parseList = (value) => (value || "").split(",").map((item) => item.trim()).filter(Boolean);
const config = {
env: process.env.NODE_ENV || "development",
port: parseNumber(process.env.PORT, 3001),
kafka: {
brokers: parseList(process.env.KAFKA_BROKERS),
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || "blwlog4Nodejs-rcu-onoffline-topic",
groupId: process.env.KAFKA_GROUP_ID || "bls-onoffline-group",
clientId: process.env.KAFKA_CLIENT_ID || "bls-onoffline-client",
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
maxInFlight: parseNumber(process.env.KAFKA_MAX_IN_FLIGHT, 50),
fetchMaxBytes: parseNumber(process.env.KAFKA_FETCH_MAX_BYTES, 10 * 1024 * 1024),
fetchMinBytes: parseNumber(process.env.KAFKA_FETCH_MIN_BYTES, 1),
fetchMaxWaitMs: parseNumber(process.env.KAFKA_FETCH_MAX_WAIT_MS, 100),
autoCommitIntervalMs: parseNumber(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS, 5e3),
logMessages: process.env.KAFKA_LOG_MESSAGES === "true",
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
mechanism: process.env.KAFKA_SASL_MECHANISM || "plain",
username: process.env.KAFKA_SASL_USERNAME,
password: process.env.KAFKA_SASL_PASSWORD
} : void 0
},
db: {
host: process.env.DB_HOST || process.env.POSTGRES_HOST || "localhost",
port: parseNumber(process.env.DB_PORT || process.env.POSTGRES_PORT, 5432),
user: process.env.DB_USER || process.env.POSTGRES_USER || "postgres",
password: process.env.DB_PASSWORD || process.env.POSTGRES_PASSWORD || "",
database: process.env.DB_DATABASE || process.env.POSTGRES_DATABASE || "log_platform",
max: parseNumber(process.env.DB_MAX_CONNECTIONS || process.env.POSTGRES_MAX_CONNECTIONS, 10),
ssl: process.env.DB_SSL === "true" ? { rejectUnauthorized: false } : void 0,
schema: process.env.DB_SCHEMA || "onoffline",
table: process.env.DB_TABLE || "onoffline_record"
},
redis: {
host: process.env.REDIS_HOST || "localhost",
port: parseNumber(process.env.REDIS_PORT, 6379),
password: process.env.REDIS_PASSWORD || void 0,
db: parseNumber(process.env.REDIS_DB, 0),
projectName: process.env.REDIS_PROJECT_NAME || "bls-onoffline",
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3001)}`
}
};
const format = (level, message, context) => {
const payload = {
level,
message,
timestamp: Date.now(),
...context ? { context } : {}
};
return JSON.stringify(payload);
};
const logger$1 = {
info(message, context) {
process.stdout.write(`${format("info", message, context)}
`);
},
error(message, context) {
process.stderr.write(`${format("error", message, context)}
`);
}
};
const { Pool } = pg;
const columns = [
"guid",
"ts_ms",
"write_ts_ms",
"hotel_id",
"mac",
"device_id",
"room_id",
"ip",
"current_status",
"launcher_version",
"reboot_reason"
];
class DatabaseManager {
constructor(dbConfig) {
this.pool = new Pool({
host: dbConfig.host,
port: dbConfig.port,
user: dbConfig.user,
password: dbConfig.password,
database: dbConfig.database,
max: dbConfig.max,
ssl: dbConfig.ssl
});
}
async insertRows({ schema, table, rows }) {
if (!rows || rows.length === 0) {
return;
}
const values = [];
const placeholders = rows.map((row, rowIndex) => {
const offset = rowIndex * columns.length;
columns.forEach((column) => {
values.push(row[column] ?? null);
});
const params = columns.map((_, columnIndex) => `$${offset + columnIndex + 1}`);
return `(${params.join(", ")})`;
});
const statement = `
INSERT INTO ${schema}.${table} (${columns.join(", ")})
VALUES ${placeholders.join(", ")}
ON CONFLICT DO NOTHING
`;
try {
await this.pool.query(statement, values);
} catch (error) {
logger$1.error("Database insert failed", {
error: error?.message,
schema,
table,
rowsLength: rows.length
});
throw error;
}
}
async checkConnection() {
let client;
try {
const connectPromise = this.pool.connect();
const timeoutPromise = new Promise((_, reject) => {
setTimeout(() => reject(new Error("Connection timeout")), 5e3);
});
try {
client = await Promise.race([connectPromise, timeoutPromise]);
} catch (raceError) {
connectPromise.then((c) => c.release()).catch(() => {
});
throw raceError;
}
await client.query("SELECT 1");
return true;
} catch (err) {
logger$1.error("Database check connection failed", { error: err.message });
return false;
} finally {
if (client) {
client.release();
}
}
}
async close() {
await this.pool.end();
}
}
const dbManager = new DatabaseManager(config.db);
class PartitionManager {
/**
* Calculate the start and end timestamps (milliseconds) for a given date.
* @param {Date} date - The date to calculate for.
* @returns {Object} { startMs, endMs, partitionSuffix }
*/
getPartitionInfo(date) {
const yyyy = date.getFullYear();
const mm = String(date.getMonth() + 1).padStart(2, "0");
const dd = String(date.getDate()).padStart(2, "0");
const partitionSuffix = `${yyyy}${mm}${dd}`;
const start = new Date(date);
start.setHours(0, 0, 0, 0);
const startMs = start.getTime();
const end = new Date(date);
end.setDate(end.getDate() + 1);
end.setHours(0, 0, 0, 0);
const endMs = end.getTime();
return { startMs, endMs, partitionSuffix };
}
/**
* Ensure partitions exist for the past M days and next N days.
* @param {number} daysAhead - Number of days to pre-create.
* @param {number} daysBack - Number of days to look back.
*/
async ensurePartitions(daysAhead = 30, daysBack = 15) {
const client = await dbManager.pool.connect();
try {
logger$1.info(`Starting partition check for the past ${daysBack} days and next ${daysAhead} days...`);
console.log(`Starting partition check for the past ${daysBack} days and next ${daysAhead} days...`);
const now = /* @__PURE__ */ new Date();
for (let i = -daysBack; i < daysAhead; i++) {
const targetDate = new Date(now);
targetDate.setDate(now.getDate() + i);
const { startMs, endMs, partitionSuffix } = this.getPartitionInfo(targetDate);
const schema = config.db.schema;
const table = config.db.table;
const partitionName = `${schema}.${table}_${partitionSuffix}`;
const checkSql = `
SELECT to_regclass($1) as exists;
`;
const checkRes = await client.query(checkSql, [partitionName]);
if (!checkRes.rows[0].exists) {
logger$1.info(`Creating partition ${partitionName} for range [${startMs}, ${endMs})`);
console.log(`Creating partition ${partitionName} for range [${startMs}, ${endMs})`);
const createSql = `
CREATE TABLE IF NOT EXISTS ${partitionName}
PARTITION OF ${schema}.${table}
FOR VALUES FROM (${startMs}) TO (${endMs});
`;
await client.query(createSql);
}
}
logger$1.info("Partition check completed.");
} catch (err) {
logger$1.error("Error ensuring partitions:", err);
throw err;
} finally {
client.release();
}
}
}
const partitionManager = new PartitionManager();
const __filename$1 = fileURLToPath(import.meta.url);
const __dirname$1 = path.dirname(__filename$1);
class DatabaseInitializer {
async initialize() {
logger$1.info("Starting database initialization check...");
await this.ensureDatabaseExists();
await this.ensureSchemaAndTable();
await partitionManager.ensurePartitions(30);
console.log("Database initialization completed successfully.");
logger$1.info("Database initialization completed successfully.");
}
async ensureDatabaseExists() {
const { host, port, user, password, database, ssl } = config.db;
console.log(`Checking if database '${database}' exists at ${host}:${port}...`);
const client = new pg.Client({
host,
port,
user,
password,
database: "postgres",
ssl: ssl ? { rejectUnauthorized: false } : false
});
try {
await client.connect();
const checkRes = await client.query(
`SELECT 1 FROM pg_database WHERE datname = $1`,
[database]
);
if (checkRes.rowCount === 0) {
logger$1.info(`Database '${database}' does not exist. Creating...`);
await client.query(`CREATE DATABASE "${database}"`);
console.log(`Database '${database}' created.`);
logger$1.info(`Database '${database}' created.`);
} else {
console.log(`Database '${database}' already exists.`);
logger$1.info(`Database '${database}' already exists.`);
}
} catch (err) {
logger$1.error("Error ensuring database exists:", err);
throw err;
} finally {
await client.end();
}
}
async ensureSchemaAndTable() {
const client = await dbManager.pool.connect();
try {
const sqlPathCandidates = [
path.resolve(process.cwd(), "scripts/init_db.sql"),
path.resolve(__dirname$1, "../scripts/init_db.sql"),
path.resolve(__dirname$1, "../../scripts/init_db.sql")
];
const sqlPath = sqlPathCandidates.find((candidate) => fs.existsSync(candidate));
if (!sqlPath) {
throw new Error(`init_db.sql not found. Candidates: ${sqlPathCandidates.join(" | ")}`);
}
const sql = fs.readFileSync(sqlPath, "utf8");
console.log(`Executing init_db.sql from ${sqlPath}...`);
logger$1.info("Executing init_db.sql...");
await client.query(sql);
console.log("Schema and parent table initialized.");
logger$1.info("Schema and parent table initialized.");
} catch (err) {
logger$1.error("Error initializing schema and table:", err);
throw err;
} finally {
client.release();
}
}
}
const dbInitializer = new DatabaseInitializer();
class OffsetTracker {
constructor() {
this.partitions = /* @__PURE__ */ new Map();
}
// Called when a message is received (before processing)
add(topic, partition, offset) {
const key = `${topic}-${partition}`;
if (!this.partitions.has(key)) {
this.partitions.set(key, []);
}
this.partitions.get(key).push({ offset, done: false });
}
// Called when a message is successfully processed
// Returns the next offset to commit (if any advancement is possible), or null
markDone(topic, partition, offset) {
const key = `${topic}-${partition}`;
const list = this.partitions.get(key);
if (!list) return null;
const item = list.find((i) => i.offset === offset);
if (item) {
item.done = true;
}
let lastDoneOffset = null;
let itemsRemoved = false;
while (list.length > 0 && list[0].done) {
lastDoneOffset = list[0].offset;
list.shift();
itemsRemoved = true;
}
if (itemsRemoved && lastDoneOffset !== null) {
return lastDoneOffset + 1;
}
return null;
}
}
const { ConsumerGroup } = kafka;
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
const kafkaHost = kafkaConfig.brokers.join(",");
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
const id = `${clientId}-${process.pid}-${Date.now()}`;
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 50;
let inFlight = 0;
const tracker = new OffsetTracker();
const consumer = new ConsumerGroup(
{
kafkaHost,
groupId: kafkaConfig.groupId,
clientId,
id,
fromOffset: "earliest",
protocol: ["roundrobin"],
outOfRangeOffset: "latest",
autoCommit: false,
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
fetchMinBytes: kafkaConfig.fetchMinBytes,
fetchMaxWaitMs: kafkaConfig.fetchMaxWaitMs,
sasl: kafkaConfig.sasl
},
kafkaConfig.topic
);
const tryResume = () => {
if (inFlight < maxInFlight && consumer.paused) {
consumer.resume();
}
};
consumer.on("message", (message) => {
inFlight += 1;
tracker.add(message.topic, message.partition, message.offset);
if (inFlight >= maxInFlight) {
consumer.pause();
}
Promise.resolve(onMessage(message)).then(() => {
const commitOffset = tracker.markDone(message.topic, message.partition, message.offset);
if (commitOffset !== null) {
consumer.sendOffsetCommitRequest([{
topic: message.topic,
partition: message.partition,
offset: commitOffset,
metadata: "m"
}], (err) => {
if (err) {
logger$1.error("Kafka commit failed", { error: err?.message, topic: message.topic, partition: message.partition, offset: commitOffset });
}
});
}
}).catch((error) => {
logger$1.error("Kafka message handling failed", { error: error?.message });
if (onError) {
onError(error, message);
}
}).finally(() => {
inFlight -= 1;
tryResume();
});
});
consumer.on("error", (error) => {
logger$1.error("Kafka consumer error", { error: error?.message });
if (onError) {
onError(error);
}
});
consumer.on("connect", () => {
logger$1.info(`Kafka Consumer connected`, {
groupId: kafkaConfig.groupId,
clientId
});
});
consumer.on("rebalancing", () => {
logger$1.info(`Kafka Consumer rebalancing`, {
groupId: kafkaConfig.groupId,
clientId
});
});
consumer.on("rebalanced", () => {
logger$1.info("Kafka Consumer rebalanced", { clientId, groupId: kafkaConfig.groupId });
});
consumer.on("error", (err) => {
logger$1.error("Kafka Consumer Error", { error: err.message });
});
consumer.on("offsetOutOfRange", (err) => {
logger$1.warn("Offset out of range", { error: err.message, topic: err.topic, partition: err.partition });
});
consumer.on("offsetOutOfRange", (error) => {
logger$1.warn(`Kafka Consumer offset out of range`, {
error: error?.message,
groupId: kafkaConfig.groupId,
clientId
});
});
consumer.on("close", () => {
logger$1.warn(`Kafka Consumer closed`, {
groupId: kafkaConfig.groupId,
clientId
});
});
return consumer;
};
const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
const count = Math.max(1, instances);
return Array.from(
{ length: count },
(_, idx) => createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
);
};
const createGuid = () => randomUUID().replace(/-/g, "");
const toNumber = (value) => {
if (value === void 0 || value === null || value === "") {
return value;
}
if (typeof value === "number") {
return value;
}
const parsed = Number(value);
return Number.isFinite(parsed) ? parsed : value;
};
const toStringAllowEmpty = (value) => {
if (value === void 0 || value === null) {
return value;
}
return String(value);
};
const kafkaPayloadSchema = z.object({
HotelCode: z.preprocess(toNumber, z.number()),
MAC: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
HostNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
RoomNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
EndPoint: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
CurrentStatus: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
CurrentTime: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
UnixTime: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
LauncherVersion: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
RebootReason: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable()
});
const normalizeText = (value, maxLength) => {
if (value === void 0 || value === null) {
return null;
}
const str = String(value);
if (maxLength && str.length > maxLength) {
return str.substring(0, maxLength);
}
return str;
};
const buildRowsFromPayload = (rawPayload) => {
const payload = kafkaPayloadSchema.parse(rawPayload);
const rebootReason = normalizeText(payload.RebootReason, 255);
const currentStatusRaw = normalizeText(payload.CurrentStatus, 255);
const hasRebootReason = rebootReason !== null && rebootReason !== "";
const currentStatus = hasRebootReason ? "on" : currentStatusRaw;
let tsMs = payload.UnixTime;
if (typeof tsMs === "number" && tsMs < 1e11) {
tsMs = tsMs * 1e3;
}
if (!tsMs && payload.CurrentTime) {
const parsed = Date.parse(payload.CurrentTime);
if (!isNaN(parsed)) {
tsMs = parsed;
}
}
if (!tsMs) {
tsMs = Date.now();
}
const mac = normalizeText(payload.MAC) || "";
const deviceId = normalizeText(payload.HostNumber) || "";
const roomId = normalizeText(payload.RoomNumber) || "";
const row = {
guid: createGuid(),
ts_ms: tsMs,
write_ts_ms: Date.now(),
hotel_id: payload.HotelCode,
mac,
device_id: deviceId,
room_id: roomId,
ip: normalizeText(payload.EndPoint),
current_status: currentStatus,
launcher_version: normalizeText(payload.LauncherVersion, 255),
reboot_reason: rebootReason
};
return [row];
};
const processKafkaMessage = async ({ message, dbManager: dbManager2, config: config2 }) => {
let rows;
try {
const rawValue = message.value.toString();
let payload;
try {
payload = JSON.parse(rawValue);
} catch (e) {
logger.error("JSON Parse Error", { error: e.message, rawValue });
const error = new Error(`JSON Parse Error: ${e.message}`);
error.type = "PARSE_ERROR";
throw error;
}
const validationResult = kafkaPayloadSchema.safeParse(payload);
if (!validationResult.success) {
logger.error("Schema Validation Failed", {
errors: validationResult.error.errors,
payload
});
const error = new Error(`Schema Validation Failed: ${JSON.stringify(validationResult.error.errors)}`);
error.type = "VALIDATION_ERROR";
throw error;
}
rows = buildRowsFromPayload(payload);
} catch (error) {
throw error;
}
try {
await dbManager2.insertRows({ schema: config2.db.schema, table: config2.db.table, rows });
} catch (error) {
error.type = "DB_ERROR";
const sample = rows?.[0];
error.dbContext = {
rowsLength: rows?.length || 0,
sampleRow: sample ? {
guid: sample.guid,
ts_ms: sample.ts_ms,
mac: sample.mac,
device_id: sample.device_id,
room_id: sample.room_id,
current_status: sample.current_status
} : null
};
throw error;
}
return rows.length;
};
const createRedisClient = async (config2) => {
const client = createClient({
socket: {
host: config2.host,
port: config2.port
},
password: config2.password,
database: config2.db
});
await client.connect();
return client;
};
class RedisIntegration {
constructor(client, projectName, apiBaseUrl) {
this.client = client;
this.projectName = projectName;
this.apiBaseUrl = apiBaseUrl;
this.heartbeatKey = "项目心跳";
this.logKey = `${projectName}_项目控制台`;
}
async info(message, context) {
const payload = {
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
level: "info",
message,
metadata: context || void 0
};
await this.client.rPush(this.logKey, JSON.stringify(payload));
}
async error(message, context) {
const payload = {
timestamp: (/* @__PURE__ */ new Date()).toISOString(),
level: "error",
message,
metadata: context || void 0
};
await this.client.rPush(this.logKey, JSON.stringify(payload));
}
startHeartbeat() {
setInterval(() => {
const payload = {
projectName: this.projectName,
apiBaseUrl: this.apiBaseUrl,
lastActiveAt: Date.now()
};
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
}, 3e3);
}
}
const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
const enqueueError = async (client, queueKey, payload) => {
try {
await client.rPush(queueKey, JSON.stringify(payload));
} catch (error) {
logger$1.error("Redis enqueue error failed", { error: error?.message });
throw error;
}
};
const startErrorRetryWorker = async ({
client,
queueKey,
handler,
redisIntegration,
maxAttempts = 5
}) => {
while (true) {
const result = await client.blPop(queueKey, 0);
const raw = result?.element;
if (!raw) {
continue;
}
let item;
try {
item = JSON.parse(raw);
} catch (error) {
logger$1.error("Invalid error payload", { error: error?.message });
await redisIntegration.error("Invalid error payload", { module: "redis", stack: error?.message });
continue;
}
const attempts = item.attempts || 0;
try {
await handler(item);
} catch (error) {
logger$1.error("Retry handler failed", { error: error?.message, stack: error?.stack });
const nextPayload = {
...item,
attempts: attempts + 1,
lastError: error?.message,
lastAttemptAt: Date.now()
};
if (nextPayload.attempts >= maxAttempts) {
await redisIntegration.error("Retry attempts exceeded", { module: "retry", stack: JSON.stringify(nextPayload) });
} else {
await enqueueError(client, queueKey, nextPayload);
}
}
}
};
class MetricCollector {
constructor() {
this.reset();
}
reset() {
this.metrics = {
kafka_pulled: 0,
parse_error: 0,
db_inserted: 0,
db_failed: 0
};
}
increment(metric, count = 1) {
if (this.metrics.hasOwnProperty(metric)) {
this.metrics[metric] += count;
}
}
getAndReset() {
const current = { ...this.metrics };
this.reset();
return current;
}
}
const bootstrap = async () => {
logger$1.info("Starting application with config", {
env: process.env.NODE_ENV,
db: {
host: config.db.host,
port: config.db.port,
user: config.db.user,
database: config.db.database,
schema: config.db.schema
},
kafka: {
brokers: config.kafka.brokers,
topic: config.kafka.topic,
groupId: config.kafka.groupId
},
redis: {
host: config.redis.host,
port: config.redis.port
}
});
await dbInitializer.initialize();
const metricCollector = new MetricCollector();
cron.schedule("0 0 * * *", async () => {
logger$1.info("Running scheduled partition maintenance...");
try {
await partitionManager.ensurePartitions(30);
} catch (err) {
logger$1.error("Scheduled partition maintenance failed", err);
}
});
const redisClient = await createRedisClient(config.redis);
const redisIntegration = new RedisIntegration(
redisClient,
config.redis.projectName,
config.redis.apiBaseUrl
);
redisIntegration.startHeartbeat();
cron.schedule("* * * * *", async () => {
const metrics = metricCollector.getAndReset();
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
console.log(report);
logger$1.info(report, metrics);
try {
await redisIntegration.info("Minute Metrics", metrics);
} catch (err) {
logger$1.error("Failed to report metrics to Redis", { error: err?.message });
}
});
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
const handleError = async (error, message) => {
logger$1.error("Kafka processing error", {
error: error?.message,
type: error?.type,
stack: error?.stack
});
try {
await redisIntegration.error("Kafka processing error", {
module: "kafka",
stack: error?.stack || error?.message
});
} catch (redisError) {
logger$1.error("Redis error log failed", { error: redisError?.message });
}
if (message) {
const messageValue = Buffer.isBuffer(message.value) ? message.value.toString("utf8") : message.value;
try {
await enqueueError(redisClient, errorQueueKey, {
attempts: 0,
value: messageValue,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: message.key
},
timestamp: Date.now()
});
} catch (enqueueError2) {
logger$1.error("Enqueue error payload failed", { error: enqueueError2?.message });
}
}
};
const handleMessage = async (message) => {
if (message.topic) {
metricCollector.increment("kafka_pulled");
}
const messageValue = Buffer.isBuffer(message.value) ? message.value.toString("utf8") : message.value;
Buffer.isBuffer(message.key) ? message.key.toString("utf8") : message.key;
({
topic: message.topic,
partition: message.partition,
offset: message.offset,
valueLength: !config.kafka.logMessages && typeof messageValue === "string" ? messageValue.length : null
});
while (true) {
try {
const inserted = await processKafkaMessage({ message, dbManager, config });
metricCollector.increment("db_inserted");
return;
} catch (error) {
const isDbConnectionError = error.code && ["ECONNREFUSED", "57P03", "08006", "08001", "EADDRINUSE", "ETIMEDOUT"].includes(error.code) || error.message && (error.message.includes("ECONNREFUSED") || error.message.includes("connection") || error.message.includes("terminated") || error.message.includes("EADDRINUSE") || error.message.includes("ETIMEDOUT") || error.message.includes("The server does not support SSL connections"));
if (isDbConnectionError) {
logger$1.error("Database offline. Pausing consumption for 1 minute...", { error: error.message });
await new Promise((resolve) => setTimeout(resolve, 6e4));
while (true) {
const isConnected = await dbManager.checkConnection();
if (isConnected) {
logger$1.info("Database connection restored. Resuming processing...");
break;
}
logger$1.warn("Database still offline. Waiting 1 minute...");
await new Promise((resolve) => setTimeout(resolve, 6e4));
}
} else {
if (error.type === "PARSE_ERROR") {
metricCollector.increment("parse_error");
} else {
metricCollector.increment("db_failed");
}
logger$1.error("Message processing failed (Data/Logic Error), skipping message", {
error: error?.message,
type: error?.type
});
await handleError(error, message);
return;
}
}
}
};
const consumers = createKafkaConsumers({
kafkaConfig: config.kafka,
onMessage: handleMessage,
onError: handleError
});
startErrorRetryWorker({
client: redisClient,
queueKey: errorQueueKey,
redisIntegration,
handler: async (item) => {
if (!item?.value) {
throw new Error("Missing value in retry payload");
}
await handleMessage({ value: item.value });
}
}).catch((err) => {
logger$1.error("Retry worker failed", { error: err?.message });
});
const shutdown = async (signal) => {
logger$1.info(`Received ${signal}, shutting down...`);
try {
if (consumers && consumers.length > 0) {
await Promise.all(consumers.map((c) => new Promise((resolve) => c.close(true, resolve))));
logger$1.info("Kafka consumer closed", { count: consumers.length });
}
await redisClient.quit();
logger$1.info("Redis client closed");
await dbManager.close();
logger$1.info("Database connection closed");
process.exit(0);
} catch (err) {
logger$1.error("Error during shutdown", { error: err?.message });
process.exit(1);
}
};
process.on("SIGTERM", () => shutdown("SIGTERM"));
process.on("SIGINT", () => shutdown("SIGINT"));
};
bootstrap().catch((error) => {
logger$1.error("Service bootstrap failed", { error: error?.message });
process.exit(1);
});