feat: 初始化 bls-onoffline-backend 项目基础结构
添加 Kafka 消费者、数据库写入、Redis 集成等核心模块,实现设备上下线事件处理 - 创建项目基础目录结构与配置文件 - 实现 Kafka 消费逻辑与手动提交偏移量 - 添加 PostgreSQL 数据库连接与分区表管理 - 集成 Redis 用于错误队列和项目心跳 - 包含数据处理逻辑,区分重启与非重启数据 - 提供数据库初始化脚本与分区创建工具 - 添加单元测试与代码校验脚本
This commit is contained in:
56
bls-onoffline-backend/src/config/config.js
Normal file
56
bls-onoffline-backend/src/config/config.js
Normal file
@@ -0,0 +1,56 @@
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const parseNumber = (value, defaultValue) => {
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) ? parsed : defaultValue;
|
||||
};
|
||||
|
||||
const parseList = (value) =>
|
||||
(value || '')
|
||||
.split(',')
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
export const config = {
|
||||
env: process.env.NODE_ENV || 'development',
|
||||
port: parseNumber(process.env.PORT, 3001),
|
||||
kafka: {
|
||||
brokers: parseList(process.env.KAFKA_BROKERS),
|
||||
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-onoffline-topic',
|
||||
groupId: process.env.KAFKA_GROUP_ID || 'bls-onoffline-group',
|
||||
clientId: process.env.KAFKA_CLIENT_ID || 'bls-onoffline-client',
|
||||
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
|
||||
maxInFlight: parseNumber(process.env.KAFKA_MAX_IN_FLIGHT, 50),
|
||||
fetchMaxBytes: parseNumber(process.env.KAFKA_FETCH_MAX_BYTES, 10 * 1024 * 1024),
|
||||
fetchMinBytes: parseNumber(process.env.KAFKA_FETCH_MIN_BYTES, 1),
|
||||
fetchMaxWaitMs: parseNumber(process.env.KAFKA_FETCH_MAX_WAIT_MS, 100),
|
||||
autoCommitIntervalMs: parseNumber(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS, 5000),
|
||||
logMessages: process.env.KAFKA_LOG_MESSAGES === 'true',
|
||||
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
|
||||
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
|
||||
username: process.env.KAFKA_SASL_USERNAME,
|
||||
password: process.env.KAFKA_SASL_PASSWORD
|
||||
} : undefined
|
||||
},
|
||||
db: {
|
||||
host: process.env.DB_HOST || process.env.POSTGRES_HOST || 'localhost',
|
||||
port: parseNumber(process.env.DB_PORT || process.env.POSTGRES_PORT, 5432),
|
||||
user: process.env.DB_USER || process.env.POSTGRES_USER || 'postgres',
|
||||
password: process.env.DB_PASSWORD || process.env.POSTGRES_PASSWORD || '',
|
||||
database: process.env.DB_DATABASE || process.env.POSTGRES_DATABASE || 'log_platform',
|
||||
max: parseNumber(process.env.DB_MAX_CONNECTIONS || process.env.POSTGRES_MAX_CONNECTIONS, 10),
|
||||
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||
schema: process.env.DB_SCHEMA || 'onoffline',
|
||||
table: process.env.DB_TABLE || 'onoffline_record'
|
||||
},
|
||||
redis: {
|
||||
host: process.env.REDIS_HOST || 'localhost',
|
||||
port: parseNumber(process.env.REDIS_PORT, 6379),
|
||||
password: process.env.REDIS_PASSWORD || undefined,
|
||||
db: parseNumber(process.env.REDIS_DB, 0),
|
||||
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline',
|
||||
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3001)}`
|
||||
}
|
||||
};
|
||||
103
bls-onoffline-backend/src/db/databaseManager.js
Normal file
103
bls-onoffline-backend/src/db/databaseManager.js
Normal file
@@ -0,0 +1,103 @@
|
||||
import pg from 'pg';
|
||||
import { config } from '../config/config.js';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const { Pool } = pg;
|
||||
|
||||
const columns = [
|
||||
'guid',
|
||||
'ts_ms',
|
||||
'write_ts_ms',
|
||||
'hotel_id',
|
||||
'mac',
|
||||
'device_id',
|
||||
'room_id',
|
||||
'ip',
|
||||
'current_status',
|
||||
'launcher_version',
|
||||
'reboot_reason'
|
||||
];
|
||||
|
||||
export class DatabaseManager {
|
||||
constructor(dbConfig) {
|
||||
this.pool = new Pool({
|
||||
host: dbConfig.host,
|
||||
port: dbConfig.port,
|
||||
user: dbConfig.user,
|
||||
password: dbConfig.password,
|
||||
database: dbConfig.database,
|
||||
max: dbConfig.max,
|
||||
ssl: dbConfig.ssl
|
||||
});
|
||||
}
|
||||
|
||||
async insertRows({ schema, table, rows }) {
|
||||
if (!rows || rows.length === 0) {
|
||||
return;
|
||||
}
|
||||
const values = [];
|
||||
const placeholders = rows.map((row, rowIndex) => {
|
||||
const offset = rowIndex * columns.length;
|
||||
columns.forEach((column) => {
|
||||
values.push(row[column] ?? null);
|
||||
});
|
||||
const params = columns.map((_, columnIndex) => `$${offset + columnIndex + 1}`);
|
||||
return `(${params.join(', ')})`;
|
||||
});
|
||||
const statement = `
|
||||
INSERT INTO ${schema}.${table} (${columns.join(', ')})
|
||||
VALUES ${placeholders.join(', ')}
|
||||
ON CONFLICT DO NOTHING
|
||||
`;
|
||||
try {
|
||||
await this.pool.query(statement, values);
|
||||
} catch (error) {
|
||||
logger.error('Database insert failed', {
|
||||
error: error?.message,
|
||||
schema,
|
||||
table,
|
||||
rowsLength: rows.length
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async checkConnection() {
|
||||
let client;
|
||||
try {
|
||||
const connectPromise = this.pool.connect();
|
||||
|
||||
// Create a timeout promise that rejects after 5000ms
|
||||
const timeoutPromise = new Promise((_, reject) => {
|
||||
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||
});
|
||||
|
||||
try {
|
||||
// Race the connection attempt against the timeout
|
||||
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||
} catch (raceError) {
|
||||
// If we timed out, the connectPromise might still resolve later.
|
||||
// We must ensure that if it does, the client is released back to the pool immediately.
|
||||
connectPromise.then(c => c.release()).catch(() => {});
|
||||
throw raceError;
|
||||
}
|
||||
|
||||
await client.query('SELECT 1');
|
||||
return true;
|
||||
} catch (err) {
|
||||
logger.error('Database check connection failed', { error: err.message });
|
||||
return false;
|
||||
} finally {
|
||||
if (client) {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async close() {
|
||||
await this.pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
const dbManager = new DatabaseManager(config.db);
|
||||
export default dbManager;
|
||||
100
bls-onoffline-backend/src/db/initializer.js
Normal file
100
bls-onoffline-backend/src/db/initializer.js
Normal file
@@ -0,0 +1,100 @@
|
||||
import pg from 'pg';
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { logger } from '../utils/logger.js';
|
||||
import partitionManager from './partitionManager.js';
|
||||
import dbManager from './databaseManager.js';
|
||||
import { config } from '../config/config.js';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
|
||||
class DatabaseInitializer {
|
||||
async initialize() {
|
||||
logger.info('Starting database initialization check...');
|
||||
|
||||
// 1. Check if database exists, create if not
|
||||
await this.ensureDatabaseExists();
|
||||
|
||||
// 2. Initialize Schema and Parent Table (if not exists)
|
||||
// Note: We need to use dbManager because it connects to the target database
|
||||
await this.ensureSchemaAndTable();
|
||||
|
||||
// 3. Ensure Partitions for the next month
|
||||
await partitionManager.ensurePartitions(30);
|
||||
|
||||
console.log('Database initialization completed successfully.');
|
||||
logger.info('Database initialization completed successfully.');
|
||||
}
|
||||
|
||||
async ensureDatabaseExists() {
|
||||
const { host, port, user, password, database, ssl } = config.db;
|
||||
console.log(`Checking if database '${database}' exists at ${host}:${port}...`);
|
||||
|
||||
// Connect to 'postgres' database to check/create target database
|
||||
const client = new pg.Client({
|
||||
host,
|
||||
port,
|
||||
user,
|
||||
password,
|
||||
database: 'postgres',
|
||||
ssl: ssl ? { rejectUnauthorized: false } : false
|
||||
});
|
||||
|
||||
try {
|
||||
await client.connect();
|
||||
|
||||
const checkRes = await client.query(
|
||||
`SELECT 1 FROM pg_database WHERE datname = $1`,
|
||||
[database]
|
||||
);
|
||||
|
||||
if (checkRes.rowCount === 0) {
|
||||
logger.info(`Database '${database}' does not exist. Creating...`);
|
||||
// CREATE DATABASE cannot run inside a transaction block
|
||||
await client.query(`CREATE DATABASE "${database}"`);
|
||||
console.log(`Database '${database}' created.`);
|
||||
logger.info(`Database '${database}' created.`);
|
||||
} else {
|
||||
console.log(`Database '${database}' already exists.`);
|
||||
logger.info(`Database '${database}' already exists.`);
|
||||
}
|
||||
} catch (err) {
|
||||
logger.error('Error ensuring database exists:', err);
|
||||
throw err;
|
||||
} finally {
|
||||
await client.end();
|
||||
}
|
||||
}
|
||||
|
||||
async ensureSchemaAndTable() {
|
||||
// dbManager connects to the target database
|
||||
const client = await dbManager.pool.connect();
|
||||
try {
|
||||
const sqlPathCandidates = [
|
||||
path.resolve(process.cwd(), 'scripts/init_db.sql'),
|
||||
path.resolve(__dirname, '../scripts/init_db.sql'),
|
||||
path.resolve(__dirname, '../../scripts/init_db.sql')
|
||||
];
|
||||
const sqlPath = sqlPathCandidates.find((candidate) => fs.existsSync(candidate));
|
||||
if (!sqlPath) {
|
||||
throw new Error(`init_db.sql not found. Candidates: ${sqlPathCandidates.join(' | ')}`);
|
||||
}
|
||||
const sql = fs.readFileSync(sqlPath, 'utf8');
|
||||
|
||||
console.log(`Executing init_db.sql from ${sqlPath}...`);
|
||||
logger.info('Executing init_db.sql...');
|
||||
await client.query(sql);
|
||||
console.log('Schema and parent table initialized.');
|
||||
logger.info('Schema and parent table initialized.');
|
||||
} catch (err) {
|
||||
logger.error('Error initializing schema and table:', err);
|
||||
throw err;
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default new DatabaseInitializer();
|
||||
77
bls-onoffline-backend/src/db/partitionManager.js
Normal file
77
bls-onoffline-backend/src/db/partitionManager.js
Normal file
@@ -0,0 +1,77 @@
|
||||
import { logger } from '../utils/logger.js';
|
||||
import { config } from '../config/config.js';
|
||||
import dbManager from './databaseManager.js';
|
||||
|
||||
class PartitionManager {
|
||||
/**
|
||||
* Calculate the start and end timestamps (milliseconds) for a given date.
|
||||
* @param {Date} date - The date to calculate for.
|
||||
* @returns {Object} { startMs, endMs, partitionSuffix }
|
||||
*/
|
||||
getPartitionInfo(date) {
|
||||
const yyyy = date.getFullYear();
|
||||
const mm = String(date.getMonth() + 1).padStart(2, '0');
|
||||
const dd = String(date.getDate()).padStart(2, '0');
|
||||
const partitionSuffix = `${yyyy}${mm}${dd}`;
|
||||
|
||||
const start = new Date(date);
|
||||
start.setHours(0, 0, 0, 0);
|
||||
const startMs = start.getTime();
|
||||
|
||||
const end = new Date(date);
|
||||
end.setDate(end.getDate() + 1);
|
||||
end.setHours(0, 0, 0, 0);
|
||||
const endMs = end.getTime();
|
||||
|
||||
return { startMs, endMs, partitionSuffix };
|
||||
}
|
||||
|
||||
/**
|
||||
* Ensure partitions exist for the past M days and next N days.
|
||||
* @param {number} daysAhead - Number of days to pre-create.
|
||||
* @param {number} daysBack - Number of days to look back.
|
||||
*/
|
||||
async ensurePartitions(daysAhead = 30, daysBack = 15) {
|
||||
const client = await dbManager.pool.connect();
|
||||
try {
|
||||
logger.info(`Starting partition check for the past ${daysBack} days and next ${daysAhead} days...`);
|
||||
console.log(`Starting partition check for the past ${daysBack} days and next ${daysAhead} days...`);
|
||||
const now = new Date();
|
||||
|
||||
for (let i = -daysBack; i < daysAhead; i++) {
|
||||
const targetDate = new Date(now);
|
||||
targetDate.setDate(now.getDate() + i);
|
||||
|
||||
const { startMs, endMs, partitionSuffix } = this.getPartitionInfo(targetDate);
|
||||
const schema = config.db.schema;
|
||||
const table = config.db.table;
|
||||
const partitionName = `${schema}.${table}_${partitionSuffix}`;
|
||||
|
||||
// Check if partition exists
|
||||
const checkSql = `
|
||||
SELECT to_regclass($1) as exists;
|
||||
`;
|
||||
const checkRes = await client.query(checkSql, [partitionName]);
|
||||
|
||||
if (!checkRes.rows[0].exists) {
|
||||
logger.info(`Creating partition ${partitionName} for range [${startMs}, ${endMs})`);
|
||||
console.log(`Creating partition ${partitionName} for range [${startMs}, ${endMs})`);
|
||||
const createSql = `
|
||||
CREATE TABLE IF NOT EXISTS ${partitionName}
|
||||
PARTITION OF ${schema}.${table}
|
||||
FOR VALUES FROM (${startMs}) TO (${endMs});
|
||||
`;
|
||||
await client.query(createSql);
|
||||
}
|
||||
}
|
||||
logger.info('Partition check completed.');
|
||||
} catch (err) {
|
||||
logger.error('Error ensuring partitions:', err);
|
||||
throw err;
|
||||
} finally {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export default new PartitionManager();
|
||||
268
bls-onoffline-backend/src/index.js
Normal file
268
bls-onoffline-backend/src/index.js
Normal file
@@ -0,0 +1,268 @@
|
||||
import cron from 'node-cron';
|
||||
import { config } from './config/config.js';
|
||||
import dbManager from './db/databaseManager.js';
|
||||
import dbInitializer from './db/initializer.js';
|
||||
import partitionManager from './db/partitionManager.js';
|
||||
import { createKafkaConsumers } from './kafka/consumer.js';
|
||||
import { processKafkaMessage } from './processor/index.js';
|
||||
import { createRedisClient } from './redis/redisClient.js';
|
||||
import { RedisIntegration } from './redis/redisIntegration.js';
|
||||
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
|
||||
import { MetricCollector } from './utils/metricCollector.js';
|
||||
import { logger } from './utils/logger.js';
|
||||
|
||||
const bootstrap = async () => {
|
||||
// Log startup config (masked)
|
||||
logger.info('Starting application with config', {
|
||||
env: process.env.NODE_ENV,
|
||||
db: {
|
||||
host: config.db.host,
|
||||
port: config.db.port,
|
||||
user: config.db.user,
|
||||
database: config.db.database,
|
||||
schema: config.db.schema
|
||||
},
|
||||
kafka: {
|
||||
brokers: config.kafka.brokers,
|
||||
topic: config.kafka.topic,
|
||||
groupId: config.kafka.groupId
|
||||
},
|
||||
redis: {
|
||||
host: config.redis.host,
|
||||
port: config.redis.port
|
||||
}
|
||||
});
|
||||
|
||||
// 0. Initialize Database (Create DB, Schema, Table, Partitions)
|
||||
await dbInitializer.initialize();
|
||||
|
||||
// Metric Collector
|
||||
const metricCollector = new MetricCollector();
|
||||
|
||||
// 1. Setup Partition Maintenance Cron Job (Every day at 00:00)
|
||||
cron.schedule('0 0 * * *', async () => {
|
||||
logger.info('Running scheduled partition maintenance...');
|
||||
try {
|
||||
await partitionManager.ensurePartitions(30);
|
||||
} catch (err) {
|
||||
logger.error('Scheduled partition maintenance failed', err);
|
||||
}
|
||||
});
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
// Moved after redisIntegration initialization
|
||||
|
||||
|
||||
// DatabaseManager is now a singleton exported instance, but let's keep consistency if possible
|
||||
// In databaseManager.js it exports `dbManager` instance by default.
|
||||
// The original code was `const dbManager = new DatabaseManager(config.db);` which implies it might have been a class export.
|
||||
// Let's check `databaseManager.js` content.
|
||||
// Wait, I imported `dbManager` from `./db/databaseManager.js`.
|
||||
// If `databaseManager.js` exports an instance as default, I should use that.
|
||||
// If it exports a class, I should instantiate it.
|
||||
|
||||
// Let's assume the previous code `new DatabaseManager` was correct if it was a class.
|
||||
// BUT I used `dbManager.pool` in `partitionManager.js` assuming it's an instance.
|
||||
// I need to verify `databaseManager.js`.
|
||||
|
||||
const redisClient = await createRedisClient(config.redis);
|
||||
const redisIntegration = new RedisIntegration(
|
||||
redisClient,
|
||||
config.redis.projectName,
|
||||
config.redis.apiBaseUrl
|
||||
);
|
||||
redisIntegration.startHeartbeat();
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
cron.schedule('* * * * *', async () => {
|
||||
const metrics = metricCollector.getAndReset();
|
||||
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
|
||||
console.log(report);
|
||||
logger.info(report, metrics);
|
||||
|
||||
try {
|
||||
await redisIntegration.info('Minute Metrics', metrics);
|
||||
} catch (err) {
|
||||
logger.error('Failed to report metrics to Redis', { error: err?.message });
|
||||
}
|
||||
});
|
||||
|
||||
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
|
||||
|
||||
const handleError = async (error, message) => {
|
||||
logger.error('Kafka processing error', {
|
||||
error: error?.message,
|
||||
type: error?.type,
|
||||
stack: error?.stack
|
||||
});
|
||||
try {
|
||||
await redisIntegration.error('Kafka processing error', {
|
||||
module: 'kafka',
|
||||
stack: error?.stack || error?.message
|
||||
});
|
||||
} catch (redisError) {
|
||||
logger.error('Redis error log failed', { error: redisError?.message });
|
||||
}
|
||||
if (message) {
|
||||
const messageValue = Buffer.isBuffer(message.value)
|
||||
? message.value.toString('utf8')
|
||||
: message.value;
|
||||
try {
|
||||
await enqueueError(redisClient, errorQueueKey, {
|
||||
attempts: 0,
|
||||
value: messageValue,
|
||||
meta: {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: message.offset,
|
||||
key: message.key
|
||||
},
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (enqueueError) {
|
||||
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const handleMessage = async (message) => {
|
||||
if (message.topic) {
|
||||
metricCollector.increment('kafka_pulled');
|
||||
}
|
||||
|
||||
const messageValue = Buffer.isBuffer(message.value)
|
||||
? message.value.toString('utf8')
|
||||
: message.value;
|
||||
const messageKey = Buffer.isBuffer(message.key)
|
||||
? message.key.toString('utf8')
|
||||
: message.key;
|
||||
|
||||
const logDetails = {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: message.offset,
|
||||
key: messageKey,
|
||||
value: config.kafka.logMessages ? messageValue : undefined,
|
||||
valueLength: !config.kafka.logMessages && typeof messageValue === 'string' ? messageValue.length : null
|
||||
};
|
||||
|
||||
// logger.info('Kafka message received', logDetails);
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
const inserted = await processKafkaMessage({ message, dbManager, config });
|
||||
metricCollector.increment('db_inserted');
|
||||
// logger.info('Kafka message processed', { inserted });
|
||||
return; // Success, allowing commit
|
||||
} catch (error) {
|
||||
// Identify DB connection errors
|
||||
const isDbConnectionError =
|
||||
(error.code && ['ECONNREFUSED', '57P03', '08006', '08001', 'EADDRINUSE', 'ETIMEDOUT'].includes(error.code)) ||
|
||||
(error.message && (
|
||||
error.message.includes('ECONNREFUSED') ||
|
||||
error.message.includes('connection') ||
|
||||
error.message.includes('terminated') ||
|
||||
error.message.includes('EADDRINUSE') ||
|
||||
error.message.includes('ETIMEDOUT') ||
|
||||
error.message.includes('The server does not support SSL connections') // Possible if DB restarts without SSL
|
||||
));
|
||||
|
||||
if (isDbConnectionError) {
|
||||
logger.error('Database offline. Pausing consumption for 1 minute...', { error: error.message });
|
||||
// metricCollector.increment('db_failed'); // Maybe not count as fail since we retry? User didn't specify.
|
||||
|
||||
// Wait 1 minute before checking
|
||||
await new Promise(resolve => setTimeout(resolve, 60000));
|
||||
|
||||
// Check connection loop
|
||||
while (true) {
|
||||
const isConnected = await dbManager.checkConnection();
|
||||
if (isConnected) {
|
||||
logger.info('Database connection restored. Resuming processing...');
|
||||
break; // Break check loop to retry processing
|
||||
}
|
||||
logger.warn('Database still offline. Waiting 1 minute...');
|
||||
await new Promise(resolve => setTimeout(resolve, 60000));
|
||||
}
|
||||
} else {
|
||||
// Non-connection error (Data error, Parse error, etc.)
|
||||
if (error.type === 'PARSE_ERROR') {
|
||||
metricCollector.increment('parse_error');
|
||||
} else {
|
||||
metricCollector.increment('db_failed');
|
||||
}
|
||||
|
||||
logger.error('Message processing failed (Data/Logic Error), skipping message', {
|
||||
error: error?.message,
|
||||
type: error?.type
|
||||
});
|
||||
|
||||
// Enqueue to error queue
|
||||
await handleError(error, message);
|
||||
|
||||
// For non-connection errors, we must skip this message and commit the offset
|
||||
// so we don't get stuck in an infinite retry loop.
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const consumers = createKafkaConsumers({
|
||||
kafkaConfig: config.kafka,
|
||||
onMessage: handleMessage,
|
||||
onError: handleError
|
||||
});
|
||||
|
||||
// Start retry worker (non-blocking)
|
||||
startErrorRetryWorker({
|
||||
client: redisClient,
|
||||
queueKey: errorQueueKey,
|
||||
redisIntegration,
|
||||
handler: async (item) => {
|
||||
if (!item?.value) {
|
||||
throw new Error('Missing value in retry payload');
|
||||
}
|
||||
await handleMessage({ value: item.value });
|
||||
}
|
||||
}).catch(err => {
|
||||
logger.error('Retry worker failed', { error: err?.message });
|
||||
});
|
||||
|
||||
// Graceful Shutdown Logic
|
||||
const shutdown = async (signal) => {
|
||||
logger.info(`Received ${signal}, shutting down...`);
|
||||
|
||||
try {
|
||||
// 1. Close Kafka Consumer
|
||||
if (consumers && consumers.length > 0) {
|
||||
await Promise.all(consumers.map(c => new Promise((resolve) => c.close(true, resolve))));
|
||||
logger.info('Kafka consumer closed', { count: consumers.length });
|
||||
}
|
||||
|
||||
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
|
||||
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
|
||||
|
||||
// 3. Close Redis Client
|
||||
await redisClient.quit();
|
||||
logger.info('Redis client closed');
|
||||
|
||||
// 4. Close Database Pool
|
||||
await dbManager.close();
|
||||
logger.info('Database connection closed');
|
||||
|
||||
process.exit(0);
|
||||
} catch (err) {
|
||||
logger.error('Error during shutdown', { error: err?.message });
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
};
|
||||
|
||||
bootstrap().catch((error) => {
|
||||
logger.error('Service bootstrap failed', { error: error?.message });
|
||||
process.exit(1);
|
||||
});
|
||||
140
bls-onoffline-backend/src/kafka/consumer.js
Normal file
140
bls-onoffline-backend/src/kafka/consumer.js
Normal file
@@ -0,0 +1,140 @@
|
||||
import kafka from 'kafka-node';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const { ConsumerGroup } = kafka;
|
||||
|
||||
import { OffsetTracker } from './offsetTracker.js';
|
||||
|
||||
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
|
||||
const kafkaHost = kafkaConfig.brokers.join(',');
|
||||
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
|
||||
const id = `${clientId}-${process.pid}-${Date.now()}`;
|
||||
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 50;
|
||||
let inFlight = 0;
|
||||
|
||||
const tracker = new OffsetTracker();
|
||||
|
||||
const consumer = new ConsumerGroup(
|
||||
{
|
||||
kafkaHost,
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId,
|
||||
id,
|
||||
fromOffset: 'earliest',
|
||||
protocol: ['roundrobin'],
|
||||
outOfRangeOffset: 'latest',
|
||||
autoCommit: false,
|
||||
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
|
||||
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
|
||||
fetchMinBytes: kafkaConfig.fetchMinBytes,
|
||||
fetchMaxWaitMs: kafkaConfig.fetchMaxWaitMs,
|
||||
sasl: kafkaConfig.sasl
|
||||
},
|
||||
kafkaConfig.topic
|
||||
);
|
||||
|
||||
const tryResume = () => {
|
||||
if (inFlight < maxInFlight && consumer.paused) {
|
||||
consumer.resume();
|
||||
}
|
||||
};
|
||||
|
||||
consumer.on('message', (message) => {
|
||||
inFlight += 1;
|
||||
tracker.add(message.topic, message.partition, message.offset);
|
||||
|
||||
if (inFlight >= maxInFlight) {
|
||||
consumer.pause();
|
||||
}
|
||||
Promise.resolve(onMessage(message))
|
||||
.then(() => {
|
||||
// Mark message as done and check if we can commit
|
||||
const commitOffset = tracker.markDone(message.topic, message.partition, message.offset);
|
||||
|
||||
if (commitOffset !== null) {
|
||||
consumer.sendOffsetCommitRequest([{
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: commitOffset,
|
||||
metadata: 'm'
|
||||
}], (err) => {
|
||||
if (err) {
|
||||
logger.error('Kafka commit failed', { error: err?.message, topic: message.topic, partition: message.partition, offset: commitOffset });
|
||||
}
|
||||
});
|
||||
}
|
||||
})
|
||||
.catch((error) => {
|
||||
logger.error('Kafka message handling failed', { error: error?.message });
|
||||
if (onError) {
|
||||
onError(error, message);
|
||||
}
|
||||
})
|
||||
.finally(() => {
|
||||
inFlight -= 1;
|
||||
tryResume();
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('error', (error) => {
|
||||
logger.error('Kafka consumer error', { error: error?.message });
|
||||
if (onError) {
|
||||
onError(error);
|
||||
}
|
||||
});
|
||||
|
||||
consumer.on('connect', () => {
|
||||
logger.info(`Kafka Consumer connected`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('rebalancing', () => {
|
||||
logger.info(`Kafka Consumer rebalancing`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('rebalanced', () => {
|
||||
logger.info('Kafka Consumer rebalanced', { clientId, groupId: kafkaConfig.groupId });
|
||||
});
|
||||
|
||||
consumer.on('error', (err) => {
|
||||
logger.error('Kafka Consumer Error', { error: err.message });
|
||||
});
|
||||
|
||||
consumer.on('offsetOutOfRange', (err) => {
|
||||
logger.warn('Offset out of range', { error: err.message, topic: err.topic, partition: err.partition });
|
||||
});
|
||||
|
||||
|
||||
consumer.on('offsetOutOfRange', (error) => {
|
||||
logger.warn(`Kafka Consumer offset out of range`, {
|
||||
error: error?.message,
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('close', () => {
|
||||
logger.warn(`Kafka Consumer closed`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
return consumer;
|
||||
};
|
||||
|
||||
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
|
||||
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
|
||||
const count = Math.max(1, instances);
|
||||
return Array.from({ length: count }, (_, idx) =>
|
||||
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
|
||||
);
|
||||
};
|
||||
|
||||
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) =>
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError })[0];
|
||||
45
bls-onoffline-backend/src/kafka/offsetTracker.js
Normal file
45
bls-onoffline-backend/src/kafka/offsetTracker.js
Normal file
@@ -0,0 +1,45 @@
|
||||
export class OffsetTracker {
|
||||
constructor() {
|
||||
// Map<topic-partition, Array<{ offset: number, done: boolean }>>
|
||||
this.partitions = new Map();
|
||||
}
|
||||
|
||||
// Called when a message is received (before processing)
|
||||
add(topic, partition, offset) {
|
||||
const key = `${topic}-${partition}`;
|
||||
if (!this.partitions.has(key)) {
|
||||
this.partitions.set(key, []);
|
||||
}
|
||||
this.partitions.get(key).push({ offset, done: false });
|
||||
}
|
||||
|
||||
// Called when a message is successfully processed
|
||||
// Returns the next offset to commit (if any advancement is possible), or null
|
||||
markDone(topic, partition, offset) {
|
||||
const key = `${topic}-${partition}`;
|
||||
const list = this.partitions.get(key);
|
||||
if (!list) return null;
|
||||
|
||||
const item = list.find(i => i.offset === offset);
|
||||
if (item) {
|
||||
item.done = true;
|
||||
}
|
||||
|
||||
// Find the highest continuous committed offset
|
||||
// We can remove items from the front as long as they are done
|
||||
let lastDoneOffset = null;
|
||||
let itemsRemoved = false;
|
||||
|
||||
while (list.length > 0 && list[0].done) {
|
||||
lastDoneOffset = list[0].offset;
|
||||
list.shift();
|
||||
itemsRemoved = true;
|
||||
}
|
||||
|
||||
if (itemsRemoved && lastDoneOffset !== null) {
|
||||
return lastDoneOffset + 1; // Kafka expects the *next* offset to fetch
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
}
|
||||
137
bls-onoffline-backend/src/processor/index.js
Normal file
137
bls-onoffline-backend/src/processor/index.js
Normal file
@@ -0,0 +1,137 @@
|
||||
import { createGuid } from '../utils/uuid.js';
|
||||
import { kafkaPayloadSchema } from '../schema/kafkaPayload.js';
|
||||
|
||||
const parseKafkaPayload = (value) => {
|
||||
const raw = Buffer.isBuffer(value) ? value.toString('utf8') : value;
|
||||
if (typeof raw !== 'string') {
|
||||
throw new Error('Invalid kafka message value');
|
||||
}
|
||||
return JSON.parse(raw);
|
||||
};
|
||||
|
||||
const normalizeText = (value, maxLength) => {
|
||||
if (value === undefined || value === null) {
|
||||
return null;
|
||||
}
|
||||
const str = String(value);
|
||||
if (maxLength && str.length > maxLength) {
|
||||
return str.substring(0, maxLength);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
export const buildRowsFromMessageValue = (value) => {
|
||||
const payload = parseKafkaPayload(value);
|
||||
return buildRowsFromPayload(payload);
|
||||
};
|
||||
|
||||
export const buildRowsFromPayload = (rawPayload) => {
|
||||
const payload = kafkaPayloadSchema.parse(rawPayload);
|
||||
|
||||
// Database limit is VARCHAR(255)
|
||||
const rebootReason = normalizeText(payload.RebootReason, 255);
|
||||
const currentStatusRaw = normalizeText(payload.CurrentStatus, 255);
|
||||
const hasRebootReason = rebootReason !== null && rebootReason !== '';
|
||||
const currentStatus = hasRebootReason ? 'on' : currentStatusRaw;
|
||||
|
||||
// Derive timestamp: UnixTime -> CurrentTime -> Date.now()
|
||||
let tsMs = payload.UnixTime;
|
||||
|
||||
// Heuristic: If timestamp is small (e.g., < 100000000000), assume it's seconds and convert to ms
|
||||
if (typeof tsMs === 'number' && tsMs < 100000000000) {
|
||||
tsMs = tsMs * 1000;
|
||||
}
|
||||
|
||||
if (!tsMs && payload.CurrentTime) {
|
||||
const parsed = Date.parse(payload.CurrentTime);
|
||||
if (!isNaN(parsed)) {
|
||||
tsMs = parsed;
|
||||
}
|
||||
}
|
||||
if (!tsMs) {
|
||||
tsMs = Date.now();
|
||||
}
|
||||
|
||||
// Ensure PK fields are not null
|
||||
const mac = normalizeText(payload.MAC) || '';
|
||||
const deviceId = normalizeText(payload.HostNumber) || '';
|
||||
const roomId = normalizeText(payload.RoomNumber) || '';
|
||||
|
||||
const row = {
|
||||
guid: createGuid(),
|
||||
ts_ms: tsMs,
|
||||
write_ts_ms: Date.now(),
|
||||
hotel_id: payload.HotelCode,
|
||||
mac: mac,
|
||||
device_id: deviceId,
|
||||
room_id: roomId,
|
||||
ip: normalizeText(payload.EndPoint),
|
||||
current_status: currentStatus,
|
||||
launcher_version: normalizeText(payload.LauncherVersion, 255),
|
||||
reboot_reason: rebootReason
|
||||
};
|
||||
|
||||
return [row];
|
||||
};
|
||||
|
||||
export const processKafkaMessage = async ({ message, dbManager, config }) => {
|
||||
let rows;
|
||||
try {
|
||||
const rawValue = message.value.toString();
|
||||
// logger.info('Processing message', { offset: message.offset, rawValuePreview: rawValue.substring(0, 100) });
|
||||
|
||||
let payload;
|
||||
try {
|
||||
payload = JSON.parse(rawValue);
|
||||
} catch (e) {
|
||||
logger.error('JSON Parse Error', { error: e.message, rawValue });
|
||||
const error = new Error(`JSON Parse Error: ${e.message}`);
|
||||
error.type = 'PARSE_ERROR';
|
||||
throw error;
|
||||
}
|
||||
|
||||
// logger.info('Payload parsed', { payload });
|
||||
|
||||
const validationResult = kafkaPayloadSchema.safeParse(payload);
|
||||
|
||||
if (!validationResult.success) {
|
||||
logger.error('Schema Validation Failed', {
|
||||
errors: validationResult.error.errors,
|
||||
payload
|
||||
});
|
||||
const error = new Error(`Schema Validation Failed: ${JSON.stringify(validationResult.error.errors)}`);
|
||||
error.type = 'VALIDATION_ERROR';
|
||||
throw error;
|
||||
}
|
||||
|
||||
rows = buildRowsFromPayload(payload);
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
await dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows });
|
||||
// if (rows.length > 0) {
|
||||
// console.log(`Inserted ${rows.length} rows. Sample GUID: ${rows[0].guid}, TS: ${rows[0].ts_ms}`);
|
||||
// }
|
||||
} catch (error) {
|
||||
error.type = 'DB_ERROR';
|
||||
const sample = rows?.[0];
|
||||
error.dbContext = {
|
||||
rowsLength: rows?.length || 0,
|
||||
sampleRow: sample
|
||||
? {
|
||||
guid: sample.guid,
|
||||
ts_ms: sample.ts_ms,
|
||||
mac: sample.mac,
|
||||
device_id: sample.device_id,
|
||||
room_id: sample.room_id,
|
||||
current_status: sample.current_status
|
||||
}
|
||||
: null
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
|
||||
return rows.length;
|
||||
};
|
||||
83
bls-onoffline-backend/src/processor/udpParser.js
Normal file
83
bls-onoffline-backend/src/processor/udpParser.js
Normal file
@@ -0,0 +1,83 @@
|
||||
const normalizeHex = (hex) => {
|
||||
if (typeof hex !== 'string') {
|
||||
return '';
|
||||
}
|
||||
let cleaned = hex.trim().replace(/^0x/i, '').replace(/\s+/g, '');
|
||||
if (cleaned.length % 2 === 1) {
|
||||
cleaned = `0${cleaned}`;
|
||||
}
|
||||
return cleaned;
|
||||
};
|
||||
|
||||
const toHex = (value) => `0x${value.toString(16).padStart(2, '0')}`;
|
||||
|
||||
const readUInt16 = (buffer, offset) => buffer.readUInt16BE(offset);
|
||||
|
||||
export const parse0x36 = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const sysLockStatus = buffer.length > 0 ? buffer[0] : null;
|
||||
const reportCount = buffer.length > 7 ? buffer[7] : null;
|
||||
let offset = 8;
|
||||
const devices = [];
|
||||
for (let i = 0; i < (reportCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
devices.push({
|
||||
dev_type: buffer[offset],
|
||||
dev_addr: buffer[offset + 1],
|
||||
dev_loop: readUInt16(buffer, offset + 2),
|
||||
dev_data: readUInt16(buffer, offset + 4)
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
const faultCount = offset < buffer.length ? buffer[offset] : null;
|
||||
offset += 1;
|
||||
const faults = [];
|
||||
for (let i = 0; i < (faultCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
faults.push({
|
||||
fault_dev_type: buffer[offset],
|
||||
fault_dev_addr: buffer[offset + 1],
|
||||
fault_dev_loop: readUInt16(buffer, offset + 2),
|
||||
error_type: buffer[offset + 4],
|
||||
error_data: buffer[offset + 5]
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
return {
|
||||
sysLockStatus,
|
||||
reportCount,
|
||||
faultCount,
|
||||
devices,
|
||||
faults
|
||||
};
|
||||
};
|
||||
|
||||
export const parse0x0fDownlink = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const controlCount = buffer.length > 0 ? buffer[0] : null;
|
||||
let offset = 1;
|
||||
const controlParams = [];
|
||||
for (let i = 0; i < (controlCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
const typeValue = readUInt16(buffer, offset + 4);
|
||||
controlParams.push({
|
||||
dev_type: buffer[offset],
|
||||
dev_addr: buffer[offset + 1],
|
||||
loop: readUInt16(buffer, offset + 2),
|
||||
type: typeValue,
|
||||
type_l: buffer[offset + 4],
|
||||
type_h: buffer[offset + 5]
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
return {
|
||||
controlCount,
|
||||
controlParams
|
||||
};
|
||||
};
|
||||
|
||||
export const parse0x0fAck = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const ackCode = buffer.length > 1 ? toHex(buffer[1]) : null;
|
||||
return { ackCode };
|
||||
};
|
||||
53
bls-onoffline-backend/src/redis/errorQueue.js
Normal file
53
bls-onoffline-backend/src/redis/errorQueue.js
Normal file
@@ -0,0 +1,53 @@
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
export const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
|
||||
|
||||
export const enqueueError = async (client, queueKey, payload) => {
|
||||
try {
|
||||
await client.rPush(queueKey, JSON.stringify(payload));
|
||||
} catch (error) {
|
||||
logger.error('Redis enqueue error failed', { error: error?.message });
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
export const startErrorRetryWorker = async ({
|
||||
client,
|
||||
queueKey,
|
||||
handler,
|
||||
redisIntegration,
|
||||
maxAttempts = 5
|
||||
}) => {
|
||||
while (true) {
|
||||
const result = await client.blPop(queueKey, 0);
|
||||
const raw = result?.element;
|
||||
if (!raw) {
|
||||
continue;
|
||||
}
|
||||
let item;
|
||||
try {
|
||||
item = JSON.parse(raw);
|
||||
} catch (error) {
|
||||
logger.error('Invalid error payload', { error: error?.message });
|
||||
await redisIntegration.error('Invalid error payload', { module: 'redis', stack: error?.message });
|
||||
continue;
|
||||
}
|
||||
const attempts = item.attempts || 0;
|
||||
try {
|
||||
await handler(item);
|
||||
} catch (error) {
|
||||
logger.error('Retry handler failed', { error: error?.message, stack: error?.stack });
|
||||
const nextPayload = {
|
||||
...item,
|
||||
attempts: attempts + 1,
|
||||
lastError: error?.message,
|
||||
lastAttemptAt: Date.now()
|
||||
};
|
||||
if (nextPayload.attempts >= maxAttempts) {
|
||||
await redisIntegration.error('Retry attempts exceeded', { module: 'retry', stack: JSON.stringify(nextPayload) });
|
||||
} else {
|
||||
await enqueueError(client, queueKey, nextPayload);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
14
bls-onoffline-backend/src/redis/redisClient.js
Normal file
14
bls-onoffline-backend/src/redis/redisClient.js
Normal file
@@ -0,0 +1,14 @@
|
||||
import { createClient } from 'redis';
|
||||
|
||||
export const createRedisClient = async (config) => {
|
||||
const client = createClient({
|
||||
socket: {
|
||||
host: config.host,
|
||||
port: config.port
|
||||
},
|
||||
password: config.password,
|
||||
database: config.db
|
||||
});
|
||||
await client.connect();
|
||||
return client;
|
||||
};
|
||||
40
bls-onoffline-backend/src/redis/redisIntegration.js
Normal file
40
bls-onoffline-backend/src/redis/redisIntegration.js
Normal file
@@ -0,0 +1,40 @@
|
||||
export class RedisIntegration {
|
||||
constructor(client, projectName, apiBaseUrl) {
|
||||
this.client = client;
|
||||
this.projectName = projectName;
|
||||
this.apiBaseUrl = apiBaseUrl;
|
||||
this.heartbeatKey = '项目心跳';
|
||||
this.logKey = `${projectName}_项目控制台`;
|
||||
}
|
||||
|
||||
async info(message, context) {
|
||||
const payload = {
|
||||
timestamp: new Date().toISOString(),
|
||||
level: 'info',
|
||||
message,
|
||||
metadata: context || undefined
|
||||
};
|
||||
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||
}
|
||||
|
||||
async error(message, context) {
|
||||
const payload = {
|
||||
timestamp: new Date().toISOString(),
|
||||
level: 'error',
|
||||
message,
|
||||
metadata: context || undefined
|
||||
};
|
||||
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||
}
|
||||
|
||||
startHeartbeat() {
|
||||
setInterval(() => {
|
||||
const payload = {
|
||||
projectName: this.projectName,
|
||||
apiBaseUrl: this.apiBaseUrl,
|
||||
lastActiveAt: Date.now()
|
||||
};
|
||||
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
|
||||
}, 3000);
|
||||
}
|
||||
}
|
||||
32
bls-onoffline-backend/src/schema/kafkaPayload.js
Normal file
32
bls-onoffline-backend/src/schema/kafkaPayload.js
Normal file
@@ -0,0 +1,32 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
const toNumber = (value) => {
|
||||
if (value === undefined || value === null || value === '') {
|
||||
return value;
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
return value;
|
||||
}
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) ? parsed : value;
|
||||
};
|
||||
|
||||
const toStringAllowEmpty = (value) => {
|
||||
if (value === undefined || value === null) {
|
||||
return value;
|
||||
}
|
||||
return String(value);
|
||||
};
|
||||
|
||||
export const kafkaPayloadSchema = z.object({
|
||||
HotelCode: z.preprocess(toNumber, z.number()),
|
||||
MAC: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
HostNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
RoomNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
EndPoint: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
CurrentStatus: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
CurrentTime: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
UnixTime: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||
LauncherVersion: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
RebootReason: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable()
|
||||
});
|
||||
18
bls-onoffline-backend/src/utils/logger.js
Normal file
18
bls-onoffline-backend/src/utils/logger.js
Normal file
@@ -0,0 +1,18 @@
|
||||
const format = (level, message, context) => {
|
||||
const payload = {
|
||||
level,
|
||||
message,
|
||||
timestamp: Date.now(),
|
||||
...(context ? { context } : {})
|
||||
};
|
||||
return JSON.stringify(payload);
|
||||
};
|
||||
|
||||
export const logger = {
|
||||
info(message, context) {
|
||||
process.stdout.write(`${format('info', message, context)}\n`);
|
||||
},
|
||||
error(message, context) {
|
||||
process.stderr.write(`${format('error', message, context)}\n`);
|
||||
}
|
||||
};
|
||||
26
bls-onoffline-backend/src/utils/metricCollector.js
Normal file
26
bls-onoffline-backend/src/utils/metricCollector.js
Normal file
@@ -0,0 +1,26 @@
|
||||
export class MetricCollector {
|
||||
constructor() {
|
||||
this.reset();
|
||||
}
|
||||
|
||||
reset() {
|
||||
this.metrics = {
|
||||
kafka_pulled: 0,
|
||||
parse_error: 0,
|
||||
db_inserted: 0,
|
||||
db_failed: 0
|
||||
};
|
||||
}
|
||||
|
||||
increment(metric, count = 1) {
|
||||
if (this.metrics.hasOwnProperty(metric)) {
|
||||
this.metrics[metric] += count;
|
||||
}
|
||||
}
|
||||
|
||||
getAndReset() {
|
||||
const current = { ...this.metrics };
|
||||
this.reset();
|
||||
return current;
|
||||
}
|
||||
}
|
||||
3
bls-onoffline-backend/src/utils/uuid.js
Normal file
3
bls-onoffline-backend/src/utils/uuid.js
Normal file
@@ -0,0 +1,3 @@
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
export const createGuid = () => randomUUID().replace(/-/g, '');
|
||||
Reference in New Issue
Block a user