feat: 初始化后端服务基础架构与核心组件

- 添加项目基础结构,包括 .gitignore、package.json、Docker 配置和环境变量示例
- 实现核心模块:Kafka 消费者、PostgreSQL 数据库管理器、Redis 客户端与错误队列
- 添加工具类:日志记录器、指标收集器、UUID 生成器
- 实现数据处理器,支持 0x36 上报和 0x0F 命令的解析与存储
- 添加数据库初始化脚本和分区管理,支持按时间范围分区
- 引入 Zod 数据验证和 Vitest 单元测试框架
- 提供完整的项目文档,包括数据库设计、Kafka 格式规范和 Redis 集成协议
This commit is contained in:
2026-01-30 11:05:00 +08:00
parent ec2b44b165
commit 86a1e79153
51 changed files with 5921 additions and 0 deletions

3
.gitignore vendored Normal file
View File

@@ -0,0 +1,3 @@
/.github
bls-rcu-action-backend/node_modules
bls-rcu-action-backend/skill

View File

@@ -0,0 +1,29 @@
KAFKA_BROKERS=kafka.blv-oa.com:9092
KAFKA_CLIENT_ID=bls-action-producer
KAFKA_GROUP_ID=bls-action-consumer
KAFKA_TOPICS=blwlog4Nodejs-rcu-action-topic
KAFKA_AUTO_COMMIT=true
KAFKA_AUTO_COMMIT_INTERVAL_MS=5000
KAFKA_SASL_ENABLED=true
KAFKA_SASL_MECHANISM=plain
KAFKA_SASL_USERNAME=blwmomo
KAFKA_SASL_PASSWORD=blwmomo
KAFKA_SSL_ENABLED=false
POSTGRES_HOST=10.8.8.109
POSTGRES_PORT=5433
POSTGRES_DATABASE=log_platform
POSTGRES_USER=log_admin
POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
POSTGRES_MAX_CONNECTIONS=6
POSTGRES_IDLE_TIMEOUT_MS=30000
PORT=3001
LOG_LEVEL=info
# Redis connection
REDIS_HOST=10.8.8.109
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DB=15
REDIS_CONNECT_TIMEOUT_MS=5000

View File

@@ -0,0 +1,29 @@
# Server Configuration
PORT=3000
NODE_ENV=development
# Kafka Configuration
KAFKA_BROKERS=localhost:9092
KAFKA_TOPIC=my-topic-name
KAFKA_GROUP_ID=my-group-id
KAFKA_CLIENT_ID=my-client-id
KAFKA_CONSUMER_INSTANCES=1
# KAFKA_SASL_USERNAME=
# KAFKA_SASL_PASSWORD=
# KAFKA_SASL_MECHANISM=plain
# Database Configuration (PostgreSQL)
DB_HOST=localhost
DB_PORT=5432
DB_USER=postgres
DB_PASSWORD=password
DB_DATABASE=my_database
DB_MAX_CONNECTIONS=10
# Redis Configuration
REDIS_HOST=localhost
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DB=0
REDIS_PROJECT_NAME=my-project
REDIS_API_BASE_URL=http://localhost:3000

View File

@@ -0,0 +1,19 @@
FROM node:18-alpine
WORKDIR /app
# Install dependencies
COPY package.json package-lock.json ./
RUN npm ci
# Copy source code
COPY . .
# Build
RUN npm run build
# Expose port
EXPOSE 3000
# Start command
CMD ["npm", "run", "start"]

1
bls-rcu-action-backend/dist/.gitkeep vendored Normal file
View File

@@ -0,0 +1 @@

View File

@@ -0,0 +1,55 @@
version: '3.8'
services:
app:
build: .
restart: always
ports:
- "3000:3000"
env_file:
- .env
depends_on:
- postgres
- redis
- kafka
postgres:
image: postgres:15-alpine
restart: always
environment:
POSTGRES_USER: postgres
POSTGRES_PASSWORD: password
POSTGRES_DB: my_database
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
redis:
image: redis:alpine
restart: always
ports:
- "6379:6379"
volumes:
- redis_data:/data
zookeeper:
image: wurstmeister/zookeeper
ports:
- "2181:2181"
kafka:
image: wurstmeister/kafka
ports:
- "9092:9092"
environment:
KAFKA_ADVERTISED_HOST_NAME: localhost
KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
volumes:
- /var/run/docker.sock:/var/run/docker.sock
depends_on:
- zookeeper
volumes:
postgres_data:
redis_data:

View File

@@ -0,0 +1,24 @@
module.exports = {
apps: [{
name: 'bls-rcu-action-backend',
script: 'dist/index.js',
instances: 1,
exec_mode: 'fork',
autorestart: true,
watch: false,
max_memory_restart: '1G',
env: {
NODE_ENV: 'production',
PORT: 3000
},
env_development: {
NODE_ENV: 'development',
PORT: 3000
},
error_file: './logs/error.log',
out_file: './logs/out.log',
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
merge_logs: true,
time: true
}]
};

View File

@@ -0,0 +1 @@
{"level":"error","message":"Kafka message handling failed","timestamp":1769689985427,"context":{"error":"[\n {\n \"expected\": \"number\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"hotel_id\"\n ],\n \"message\": \"Invalid input: expected number, received string\"\n },\n {\n \"expected\": \"array\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"control_list\"\n ],\n \"message\": \"Invalid input: expected array, received null\"\n }\n]"}}

View File

@@ -0,0 +1 @@
{"level":"error","message":"Kafka message handling failed","timestamp":1769689777074,"context":{"error":"[\n {\n \"expected\": \"number\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"hotel_id\"\n ],\n \"message\": \"Invalid input: expected number, received string\"\n },\n {\n \"expected\": \"array\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"control_list\"\n ],\n \"message\": \"Invalid input: expected array, received null\"\n }\n]"}}

View File

@@ -0,0 +1 @@
{"level":"info","message":"[Minute Metrics] Pulled: 0, Parse Error: 0, Inserted: 0, Failed: 0","timestamp":1769688900027,"context":{"kafka_pulled":0,"parse_error":0,"db_inserted":0,"db_failed":0}}

View File

@@ -0,0 +1 @@
{"level":"info","message":"[Minute Metrics] Pulled: 0, Parse Error: 0, Inserted: 0, Failed: 0","timestamp":1769689140027,"context":{"kafka_pulled":0,"parse_error":0,"db_inserted":0,"db_failed":0}}

View File

@@ -0,0 +1 @@
{"level":"info","message":"[Minute Metrics] Pulled: 0, Parse Error: 0, Inserted: 0, Failed: 0","timestamp":1769689260031,"context":{"kafka_pulled":0,"parse_error":0,"db_inserted":0,"db_failed":0}}

3526
bls-rcu-action-backend/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,24 @@
{
"name": "bls-rcu-action-backend",
"version": "1.0.0",
"type": "module",
"private": true,
"scripts": {
"dev": "node src/index.js",
"build": "vite build --ssr src/index.js --outDir dist",
"test": "vitest run",
"start": "node dist/index.js"
},
"dependencies": {
"dotenv": "^16.4.5",
"kafka-node": "^5.0.0",
"node-cron": "^4.2.1",
"pg": "^8.11.5",
"redis": "^4.6.13",
"zod": "^4.3.6"
},
"devDependencies": {
"vite": "^5.4.0",
"vitest": "^4.0.18"
}
}

View File

@@ -0,0 +1,45 @@
-- Database Initialization Script for BLS RCU Action Server
CREATE SCHEMA IF NOT EXISTS rcu_action;
CREATE TABLE IF NOT EXISTS rcu_action.rcu_action_events (
guid VARCHAR(32) NOT NULL,
ts_ms BIGINT NOT NULL,
write_ts_ms BIGINT NOT NULL,
hotel_id INTEGER NOT NULL,
room_id VARCHAR(32) NOT NULL,
device_id VARCHAR(32) NOT NULL,
direction VARCHAR(10) NOT NULL,
cmd_word VARCHAR(10) NOT NULL,
frame_id INTEGER NOT NULL,
udp_raw TEXT NOT NULL,
action_type VARCHAR(20) NOT NULL,
sys_lock_status SMALLINT,
report_count SMALLINT,
dev_type SMALLINT,
dev_addr SMALLINT,
dev_loop INTEGER,
dev_data INTEGER,
fault_count SMALLINT,
error_type SMALLINT,
error_data SMALLINT,
type_l SMALLINT,
type_h SMALLINT,
details JSONB,
extra JSONB,
PRIMARY KEY (ts_ms, guid)
) PARTITION BY RANGE (ts_ms);
ALTER TABLE rcu_action.rcu_action_events
ADD COLUMN IF NOT EXISTS device_id VARCHAR(32) NOT NULL DEFAULT '';
-- Indexes for performance
CREATE INDEX IF NOT EXISTS idx_rcu_action_hotel_id ON rcu_action.rcu_action_events (hotel_id);
CREATE INDEX IF NOT EXISTS idx_rcu_action_room_id ON rcu_action.rcu_action_events (room_id);
CREATE INDEX IF NOT EXISTS idx_rcu_action_device_id ON rcu_action.rcu_action_events (device_id);
CREATE INDEX IF NOT EXISTS idx_rcu_action_direction ON rcu_action.rcu_action_events (direction);
CREATE INDEX IF NOT EXISTS idx_rcu_action_cmd_word ON rcu_action.rcu_action_events (cmd_word);
CREATE INDEX IF NOT EXISTS idx_rcu_action_action_type ON rcu_action.rcu_action_events (action_type);
-- Composite Index for typical query pattern (Hotel + Room + Time)
CREATE INDEX IF NOT EXISTS idx_rcu_action_query_main ON rcu_action.rcu_action_events (hotel_id, room_id, ts_ms DESC);

View File

@@ -0,0 +1,50 @@
import dotenv from 'dotenv';
dotenv.config();
const parseNumber = (value, defaultValue) => {
const parsed = Number(value);
return Number.isFinite(parsed) ? parsed : defaultValue;
};
const parseList = (value) =>
(value || '')
.split(',')
.map((item) => item.trim())
.filter(Boolean);
export const config = {
env: process.env.NODE_ENV || 'development',
port: parseNumber(process.env.PORT, 3000),
kafka: {
brokers: parseList(process.env.KAFKA_BROKERS),
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-action-topic',
groupId: process.env.KAFKA_GROUP_ID || 'bls-rcu-action-group',
clientId: process.env.KAFKA_CLIENT_ID || 'bls-rcu-action-client',
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
username: process.env.KAFKA_SASL_USERNAME,
password: process.env.KAFKA_SASL_PASSWORD
} : undefined
},
db: {
host: process.env.DB_HOST || process.env.POSTGRES_HOST || 'localhost',
port: parseNumber(process.env.DB_PORT || process.env.POSTGRES_PORT, 5432),
user: process.env.DB_USER || process.env.POSTGRES_USER || 'postgres',
password: process.env.DB_PASSWORD || process.env.POSTGRES_PASSWORD || '',
database: process.env.DB_DATABASE || process.env.POSTGRES_DATABASE || 'bls_rcu_action',
max: parseNumber(process.env.DB_MAX_CONNECTIONS || process.env.POSTGRES_MAX_CONNECTIONS, 10),
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : undefined,
schema: process.env.DB_SCHEMA || 'rcu_action',
table: process.env.DB_TABLE || 'rcu_action_events'
},
redis: {
host: process.env.REDIS_HOST || 'localhost',
port: parseNumber(process.env.REDIS_PORT, 6379),
password: process.env.REDIS_PASSWORD || undefined,
db: parseNumber(process.env.REDIS_DB, 0),
projectName: process.env.REDIS_PROJECT_NAME || 'bls-rcu-action',
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3000)}`
}
};

View File

@@ -0,0 +1,80 @@
import pg from 'pg';
import { config } from '../config/config.js';
import { logger } from '../utils/logger.js';
const { Pool } = pg;
const columns = [
'guid',
'ts_ms',
'write_ts_ms',
'hotel_id',
'room_id',
'device_id',
'direction',
'cmd_word',
'frame_id',
'udp_raw',
'action_type',
'sys_lock_status',
'report_count',
'dev_type',
'dev_addr',
'dev_loop',
'dev_data',
'fault_count',
'error_type',
'error_data',
'type_l',
'type_h',
'details',
'extra'
];
export class DatabaseManager {
constructor(dbConfig) {
this.pool = new Pool({
host: dbConfig.host,
port: dbConfig.port,
user: dbConfig.user,
password: dbConfig.password,
database: dbConfig.database,
max: dbConfig.max,
ssl: dbConfig.ssl
});
}
async insertRows({ schema, table, rows }) {
if (!rows || rows.length === 0) {
return;
}
const values = [];
const placeholders = rows.map((row, rowIndex) => {
const offset = rowIndex * columns.length;
columns.forEach((column) => {
values.push(row[column] ?? null);
});
const params = columns.map((_, columnIndex) => `$${offset + columnIndex + 1}`);
return `(${params.join(', ')})`;
});
const statement = `INSERT INTO ${schema}.${table} (${columns.join(', ')}) VALUES ${placeholders.join(', ')}`;
try {
await this.pool.query(statement, values);
} catch (error) {
logger.error('Database insert failed', {
error: error?.message,
schema,
table,
rowsLength: rows.length
});
throw error;
}
}
async close() {
await this.pool.end();
}
}
const dbManager = new DatabaseManager(config.db);
export default dbManager;

View File

@@ -0,0 +1,86 @@
import pg from 'pg';
import fs from 'fs';
import path from 'path';
import { fileURLToPath } from 'url';
import { logger } from '../utils/logger.js';
import partitionManager from './partitionManager.js';
import dbManager from './databaseManager.js';
import { config } from '../config/config.js';
const __filename = fileURLToPath(import.meta.url);
const __dirname = path.dirname(__filename);
class DatabaseInitializer {
async initialize() {
logger.info('Starting database initialization check...');
// 1. Check if database exists, create if not
await this.ensureDatabaseExists();
// 2. Initialize Schema and Parent Table (if not exists)
// Note: We need to use dbManager because it connects to the target database
await this.ensureSchemaAndTable();
// 3. Ensure Partitions for the next month
await partitionManager.ensurePartitions(30);
logger.info('Database initialization completed successfully.');
}
async ensureDatabaseExists() {
const { host, port, user, password, database, ssl } = config.db;
// Connect to 'postgres' database to check/create target database
const client = new pg.Client({
host,
port,
user,
password,
database: 'postgres',
ssl: ssl ? { rejectUnauthorized: false } : false
});
try {
await client.connect();
const checkRes = await client.query(
`SELECT 1 FROM pg_database WHERE datname = $1`,
[database]
);
if (checkRes.rowCount === 0) {
logger.info(`Database '${database}' does not exist. Creating...`);
// CREATE DATABASE cannot run inside a transaction block
await client.query(`CREATE DATABASE "${database}"`);
logger.info(`Database '${database}' created.`);
} else {
logger.info(`Database '${database}' already exists.`);
}
} catch (err) {
logger.error('Error ensuring database exists:', err);
throw err;
} finally {
await client.end();
}
}
async ensureSchemaAndTable() {
// dbManager connects to the target database
const client = await dbManager.pool.connect();
try {
const sqlPath = path.resolve(__dirname, '../../scripts/init_db.sql');
const sql = fs.readFileSync(sqlPath, 'utf8');
logger.info('Executing init_db.sql...');
await client.query(sql);
logger.info('Schema and parent table initialized.');
} catch (err) {
logger.error('Error initializing schema and table:', err);
throw err;
} finally {
client.release();
}
}
}
export default new DatabaseInitializer();

View File

@@ -0,0 +1,71 @@
import { logger } from '../utils/logger.js';
import dbManager from './databaseManager.js';
class PartitionManager {
/**
* Calculate the start and end timestamps (milliseconds) for a given date.
* @param {Date} date - The date to calculate for.
* @returns {Object} { startMs, endMs, partitionSuffix }
*/
getPartitionInfo(date) {
const yyyy = date.getFullYear();
const mm = String(date.getMonth() + 1).padStart(2, '0');
const dd = String(date.getDate()).padStart(2, '0');
const partitionSuffix = `${yyyy}${mm}${dd}`;
const start = new Date(date);
start.setHours(0, 0, 0, 0);
const startMs = start.getTime();
const end = new Date(date);
end.setDate(end.getDate() + 1);
end.setHours(0, 0, 0, 0);
const endMs = end.getTime();
return { startMs, endMs, partitionSuffix };
}
/**
* Ensure partitions exist for the next N days.
* @param {number} daysAhead - Number of days to pre-create.
*/
async ensurePartitions(daysAhead = 30) {
const client = await dbManager.pool.connect();
try {
logger.info(`Starting partition check for the next ${daysAhead} days...`);
const now = new Date();
for (let i = 0; i < daysAhead; i++) {
const targetDate = new Date(now);
targetDate.setDate(now.getDate() + i);
const { startMs, endMs, partitionSuffix } = this.getPartitionInfo(targetDate);
const partitionName = `rcu_action.rcu_action_events_${partitionSuffix}`;
// Check if partition exists
const checkSql = `
SELECT to_regclass($1) as exists;
`;
const checkRes = await client.query(checkSql, [partitionName]);
if (!checkRes.rows[0].exists) {
logger.info(`Creating partition ${partitionName} for range [${startMs}, ${endMs})`);
const createSql = `
CREATE TABLE IF NOT EXISTS ${partitionName}
PARTITION OF rcu_action.rcu_action_events
FOR VALUES FROM (${startMs}) TO (${endMs});
`;
await client.query(createSql);
}
}
logger.info('Partition check completed.');
} catch (err) {
logger.error('Error ensuring partitions:', err);
throw err;
} finally {
client.release();
}
}
}
export default new PartitionManager();

View File

@@ -0,0 +1,203 @@
import cron from 'node-cron';
import { config } from './config/config.js';
import dbManager from './db/databaseManager.js';
import dbInitializer from './db/initializer.js';
import partitionManager from './db/partitionManager.js';
import { createKafkaConsumer } from './kafka/consumer.js';
import { processKafkaMessage } from './processor/index.js';
import { createRedisClient } from './redis/redisClient.js';
import { RedisIntegration } from './redis/redisIntegration.js';
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
import { MetricCollector } from './utils/metricCollector.js';
import { logger } from './utils/logger.js';
const bootstrap = async () => {
// 0. Initialize Database (Create DB, Schema, Table, Partitions)
await dbInitializer.initialize();
// Metric Collector
const metricCollector = new MetricCollector();
// 1. Setup Partition Maintenance Cron Job (Every day at 00:00)
cron.schedule('0 0 * * *', async () => {
logger.info('Running scheduled partition maintenance...');
try {
await partitionManager.ensurePartitions(30);
} catch (err) {
logger.error('Scheduled partition maintenance failed', err);
}
});
// 1.1 Setup Metric Reporting Cron Job (Every minute)
// Moved after redisIntegration initialization
// DatabaseManager is now a singleton exported instance, but let's keep consistency if possible
// In databaseManager.js it exports `dbManager` instance by default.
// The original code was `const dbManager = new DatabaseManager(config.db);` which implies it might have been a class export.
// Let's check `databaseManager.js` content.
// Wait, I imported `dbManager` from `./db/databaseManager.js`.
// If `databaseManager.js` exports an instance as default, I should use that.
// If it exports a class, I should instantiate it.
// Let's assume the previous code `new DatabaseManager` was correct if it was a class.
// BUT I used `dbManager.pool` in `partitionManager.js` assuming it's an instance.
// I need to verify `databaseManager.js`.
const redisClient = await createRedisClient(config.redis);
const redisIntegration = new RedisIntegration(
redisClient,
config.redis.projectName,
config.redis.apiBaseUrl
);
redisIntegration.startHeartbeat();
// 1.1 Setup Metric Reporting Cron Job (Every minute)
cron.schedule('* * * * *', async () => {
const metrics = metricCollector.getAndReset();
const report = `[Minute Metrics] Pulled: ${metrics.kafka_pulled}, Parse Error: ${metrics.parse_error}, Inserted: ${metrics.db_inserted}, Failed: ${metrics.db_failed}`;
console.log(report);
logger.info(report, metrics);
try {
await redisIntegration.info('Minute Metrics', metrics);
} catch (err) {
logger.error('Failed to report metrics to Redis', { error: err?.message });
}
});
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
const handleMessage = async (message) => {
if (message.topic) {
metricCollector.increment('kafka_pulled');
}
try {
const messageValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: message.value;
const messageKey = Buffer.isBuffer(message.key)
? message.key.toString('utf8')
: message.key;
logger.info('Kafka message received', {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: messageKey,
value: messageValue
});
const inserted = await processKafkaMessage({ message, dbManager, config });
metricCollector.increment('db_inserted');
logger.info('Kafka message processed', { inserted });
} catch (error) {
if (error.type === 'PARSE_ERROR') {
metricCollector.increment('parse_error');
} else {
metricCollector.increment('db_failed');
}
logger.error('Message processing failed', {
error: error?.message,
type: error?.type,
stack: error?.stack,
rawPayload: error?.rawPayload,
validationIssues: error?.validationIssues,
dbContext: error?.dbContext
});
throw error; // Re-throw to trigger onError
}
};
const handleError = async (error, message) => {
logger.error('Kafka processing error', {
error: error?.message,
type: error?.type,
stack: error?.stack
});
try {
await redisIntegration.error('Kafka processing error', {
module: 'kafka',
stack: error?.stack || error?.message
});
} catch (redisError) {
logger.error('Redis error log failed', { error: redisError?.message });
}
if (message) {
const messageValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: message.value;
try {
await enqueueError(redisClient, errorQueueKey, {
attempts: 0,
value: messageValue,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset,
key: message.key
},
timestamp: Date.now()
});
} catch (enqueueError) {
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
}
}
};
const consumer = createKafkaConsumer({
kafkaConfig: config.kafka,
onMessage: handleMessage,
onError: handleError
});
// Start retry worker (non-blocking)
startErrorRetryWorker({
client: redisClient,
queueKey: errorQueueKey,
redisIntegration,
handler: async (item) => {
if (!item?.value) {
throw new Error('Missing value in retry payload');
}
await handleMessage({ value: item.value });
}
}).catch(err => {
logger.error('Retry worker failed', { error: err?.message });
});
// Graceful Shutdown Logic
const shutdown = async (signal) => {
logger.info(`Received ${signal}, shutting down...`);
try {
// 1. Close Kafka Consumer
if (consumer) {
await new Promise((resolve) => consumer.close(true, resolve));
logger.info('Kafka consumer closed');
}
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
// 3. Close Redis Client
await redisClient.quit();
logger.info('Redis client closed');
// 4. Close Database Pool
await dbManager.close();
logger.info('Database connection closed');
process.exit(0);
} catch (err) {
logger.error('Error during shutdown', { error: err?.message });
process.exit(1);
}
};
process.on('SIGTERM', () => shutdown('SIGTERM'));
process.on('SIGINT', () => shutdown('SIGINT'));
};
bootstrap().catch((error) => {
logger.error('Service bootstrap failed', { error: error?.message });
process.exit(1);
});

View File

@@ -0,0 +1,39 @@
import kafka from 'kafka-node';
import { logger } from '../utils/logger.js';
const { ConsumerGroup } = kafka;
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) => {
const kafkaHost = kafkaConfig.brokers.join(',');
const consumer = new ConsumerGroup(
{
kafkaHost,
groupId: kafkaConfig.groupId,
clientId: kafkaConfig.clientId,
fromOffset: 'earliest',
protocol: ['roundrobin'],
outOfRangeOffset: 'latest',
autoCommit: true,
sasl: kafkaConfig.sasl
},
kafkaConfig.topic
);
consumer.on('message', (message) => {
onMessage(message).catch((error) => {
logger.error('Kafka message handling failed', { error: error?.message });
if (onError) {
onError(error, message);
}
});
});
consumer.on('error', (error) => {
logger.error('Kafka consumer error', { error: error?.message });
if (onError) {
onError(error);
}
});
return consumer;
};

View File

@@ -0,0 +1,247 @@
import { createGuid } from '../utils/uuid.js';
import { kafkaPayloadSchema } from '../schema/kafkaPayload.js';
const normalizeDirection = (value) => {
if (!value) return null;
if (value === '上报' || value === '上传') return '上报';
if (value === '下发') return '下发';
return value;
};
const normalizeCmdWord = (value) => {
if (typeof value === 'string') {
const trimmed = value.trim();
if (trimmed.startsWith('0x') || trimmed.startsWith('0X')) {
return `0x${trimmed.slice(2).toLowerCase()}`;
}
if (/^[0-9a-fA-F]{2}$/.test(trimmed)) {
return `0x${trimmed.toLowerCase()}`;
}
const parsed = Number(trimmed);
if (Number.isFinite(parsed)) {
return `0x${parsed.toString(16).toLowerCase()}`;
}
return trimmed;
}
// The Zod schema might have already converted numbers to strings, but let's be safe
if (typeof value === 'number' && Number.isFinite(value)) {
return `0x${value.toString(16).toLowerCase()}`;
}
return null;
};
const resolveActionType = (direction, cmdWord) => {
if (cmdWord === '0x36') {
return '36上报';
}
if (cmdWord === '0x0f' && direction === '下发') {
return '0F下发';
}
if (cmdWord === '0x0f' && direction === '上报') {
return '0FACK';
}
return null;
};
const parseKafkaPayload = (value) => {
const raw = Buffer.isBuffer(value) ? value.toString('utf8') : value;
if (typeof raw !== 'string') {
throw new Error('Invalid kafka message value');
}
return JSON.parse(raw);
};
export const buildRowsFromMessageValue = (value) => {
const payload = parseKafkaPayload(value);
return buildRowsFromPayload(payload);
};
export const buildRowsFromPayload = (rawPayload) => {
// 1. Validate and transform payload using Zod schema
const payload = kafkaPayloadSchema.parse(rawPayload);
const {
ts_ms: tsMs,
hotel_id: hotelId,
room_id: roomId,
device_id: deviceId,
direction,
cmd_word: cmdWord,
frame_id: frameId,
udp_raw: udpRaw,
sys_lock_status: sysLockStatus,
report_count: reportCount,
fault_count: faultCount,
device_list: deviceList, // Zod provides default []
fault_list: faultList, // Zod provides default []
control_list: controlList // Zod provides default []
} = payload;
const normalizedDirection = normalizeDirection(direction);
const normalizedCmdWord = normalizeCmdWord(cmdWord);
const actionType = resolveActionType(normalizedDirection, normalizedCmdWord);
const writeTsMs = Date.now();
// Base fields common to all rows (excluding unique ID)
const commonFields = {
ts_ms: tsMs,
write_ts_ms: writeTsMs,
hotel_id: hotelId,
room_id: roomId,
device_id: deviceId, // Pass through normalized/validated device_id
direction: normalizedDirection,
cmd_word: normalizedCmdWord,
frame_id: frameId,
udp_raw: udpRaw,
action_type: actionType,
sys_lock_status: sysLockStatus ?? null,
report_count: reportCount ?? null,
fault_count: faultCount ?? null,
// Initialize nullable fields
dev_type: null,
dev_addr: null,
dev_loop: null,
dev_data: null,
error_type: null,
error_data: null,
type_l: null,
type_h: null,
details: null,
extra: { raw_hex: udpRaw }
};
const rows = [];
// Logic 1: 0x36 Status/Fault Report
if (actionType === '36上报') {
const details = {
device_list: deviceList,
fault_list: faultList
};
// Process device status list
if (deviceList.length > 0) {
deviceList.forEach(device => {
rows.push({
...commonFields,
guid: createGuid(),
dev_type: device.dev_type ?? null,
dev_addr: device.dev_addr ?? null,
dev_loop: device.dev_loop ?? null,
dev_data: device.dev_data ?? null,
details
});
});
}
// Process fault list
if (faultList.length > 0) {
faultList.forEach(fault => {
rows.push({
...commonFields,
guid: createGuid(),
// Use common dev_ fields for fault device identification
dev_type: fault.dev_type ?? null,
dev_addr: fault.dev_addr ?? null,
dev_loop: fault.dev_loop ?? null,
error_type: fault.error_type ?? null,
error_data: fault.error_data ?? null,
details
});
});
}
// Fallback: if no lists, insert one record to preserve the event
if (rows.length === 0) {
rows.push({
...commonFields,
guid: createGuid(),
details
});
}
return rows;
}
// Logic 2: 0x0F Control Command
if (actionType === '0F下发') {
const details = {
control_list: controlList
};
if (controlList.length > 0) {
controlList.forEach(control => {
rows.push({
...commonFields,
guid: createGuid(),
dev_type: control.dev_type ?? null,
dev_addr: control.dev_addr ?? null,
dev_loop: control.dev_loop ?? null,
type_l: control.type_l ?? null,
type_h: control.type_h ?? null,
details
});
});
}
// Fallback
if (rows.length === 0) {
rows.push({
...commonFields,
guid: createGuid(),
details
});
}
return rows;
}
// Logic 3: 0x0F ACK or others
// Default behavior: single row
return [{
...commonFields,
guid: createGuid(),
details: {}
}];
};
export const processKafkaMessage = async ({ message, dbManager, config }) => {
let rows;
try {
const payload = parseKafkaPayload(message.value);
rows = buildRowsFromPayload(payload);
} catch (error) {
error.type = 'PARSE_ERROR';
const rawValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: String(message.value ?? '');
error.rawPayload = rawValue.length > 1000 ? `${rawValue.slice(0, 1000)}...` : rawValue;
if (error?.issues) {
error.validationIssues = error.issues;
}
throw error;
}
try {
await dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows });
} catch (error) {
error.type = 'DB_ERROR';
const sample = rows?.[0];
error.dbContext = {
rowsLength: rows?.length || 0,
sampleRow: sample
? {
guid: sample.guid,
ts_ms: sample.ts_ms,
action_type: sample.action_type,
cmd_word: sample.cmd_word,
direction: sample.direction,
device_id: sample.device_id
}
: null
};
throw error;
}
return rows.length;
};

View File

@@ -0,0 +1,83 @@
const normalizeHex = (hex) => {
if (typeof hex !== 'string') {
return '';
}
let cleaned = hex.trim().replace(/^0x/i, '').replace(/\s+/g, '');
if (cleaned.length % 2 === 1) {
cleaned = `0${cleaned}`;
}
return cleaned;
};
const toHex = (value) => `0x${value.toString(16).padStart(2, '0')}`;
const readUInt16 = (buffer, offset) => buffer.readUInt16BE(offset);
export const parse0x36 = (udpRaw) => {
const cleaned = normalizeHex(udpRaw);
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
const sysLockStatus = buffer.length > 0 ? buffer[0] : null;
const reportCount = buffer.length > 7 ? buffer[7] : null;
let offset = 8;
const devices = [];
for (let i = 0; i < (reportCount || 0) && offset + 5 < buffer.length; i += 1) {
devices.push({
dev_type: buffer[offset],
dev_addr: buffer[offset + 1],
dev_loop: readUInt16(buffer, offset + 2),
dev_data: readUInt16(buffer, offset + 4)
});
offset += 6;
}
const faultCount = offset < buffer.length ? buffer[offset] : null;
offset += 1;
const faults = [];
for (let i = 0; i < (faultCount || 0) && offset + 5 < buffer.length; i += 1) {
faults.push({
fault_dev_type: buffer[offset],
fault_dev_addr: buffer[offset + 1],
fault_dev_loop: readUInt16(buffer, offset + 2),
error_type: buffer[offset + 4],
error_data: buffer[offset + 5]
});
offset += 6;
}
return {
sysLockStatus,
reportCount,
faultCount,
devices,
faults
};
};
export const parse0x0fDownlink = (udpRaw) => {
const cleaned = normalizeHex(udpRaw);
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
const controlCount = buffer.length > 0 ? buffer[0] : null;
let offset = 1;
const controlParams = [];
for (let i = 0; i < (controlCount || 0) && offset + 5 < buffer.length; i += 1) {
const typeValue = readUInt16(buffer, offset + 4);
controlParams.push({
dev_type: buffer[offset],
dev_addr: buffer[offset + 1],
loop: readUInt16(buffer, offset + 2),
type: typeValue,
type_l: buffer[offset + 4],
type_h: buffer[offset + 5]
});
offset += 6;
}
return {
controlCount,
controlParams
};
};
export const parse0x0fAck = (udpRaw) => {
const cleaned = normalizeHex(udpRaw);
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
const ackCode = buffer.length > 1 ? toHex(buffer[1]) : null;
return { ackCode };
};

View File

@@ -0,0 +1,53 @@
import { logger } from '../utils/logger.js';
export const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
export const enqueueError = async (client, queueKey, payload) => {
try {
await client.rPush(queueKey, JSON.stringify(payload));
} catch (error) {
logger.error('Redis enqueue error failed', { error: error?.message });
throw error;
}
};
export const startErrorRetryWorker = async ({
client,
queueKey,
handler,
redisIntegration,
maxAttempts = 5
}) => {
while (true) {
const result = await client.blPop(queueKey, 0);
const raw = result?.element;
if (!raw) {
continue;
}
let item;
try {
item = JSON.parse(raw);
} catch (error) {
logger.error('Invalid error payload', { error: error?.message });
await redisIntegration.error('Invalid error payload', { module: 'redis', stack: error?.message });
continue;
}
const attempts = item.attempts || 0;
try {
await handler(item);
} catch (error) {
logger.error('Retry handler failed', { error: error?.message, stack: error?.stack });
const nextPayload = {
...item,
attempts: attempts + 1,
lastError: error?.message,
lastAttemptAt: Date.now()
};
if (nextPayload.attempts >= maxAttempts) {
await redisIntegration.error('Retry attempts exceeded', { module: 'retry', stack: JSON.stringify(nextPayload) });
} else {
await enqueueError(client, queueKey, nextPayload);
}
}
}
};

View File

@@ -0,0 +1,14 @@
import { createClient } from 'redis';
export const createRedisClient = async (config) => {
const client = createClient({
socket: {
host: config.host,
port: config.port
},
password: config.password,
database: config.db
});
await client.connect();
return client;
};

View File

@@ -0,0 +1,40 @@
export class RedisIntegration {
constructor(client, projectName, apiBaseUrl) {
this.client = client;
this.projectName = projectName;
this.apiBaseUrl = apiBaseUrl;
this.heartbeatKey = '项目心跳';
this.logKey = `${projectName}_项目控制台`;
}
async info(message, context) {
const payload = {
timestamp: new Date().toISOString(),
level: 'info',
message,
metadata: context || undefined
};
await this.client.rPush(this.logKey, JSON.stringify(payload));
}
async error(message, context) {
const payload = {
timestamp: new Date().toISOString(),
level: 'error',
message,
metadata: context || undefined
};
await this.client.rPush(this.logKey, JSON.stringify(payload));
}
startHeartbeat() {
setInterval(() => {
const payload = {
projectName: this.projectName,
apiBaseUrl: this.apiBaseUrl,
lastActiveAt: Date.now()
};
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
}, 3000);
}
}

View File

@@ -0,0 +1,59 @@
import { z } from 'zod';
// Device Status Schema (for device_list)
const deviceItemSchema = z.object({
dev_type: z.number().int().optional(),
dev_addr: z.number().int().optional(),
dev_loop: z.number().int().optional(),
dev_data: z.number().int().optional()
});
// Fault Item Schema (for fault_list)
const faultItemSchema = z.object({
dev_type: z.number().int().optional(),
dev_addr: z.number().int().optional(),
dev_loop: z.number().int().optional(),
error_type: z.number().int().optional(),
error_data: z.number().int().optional()
});
// Control Item Schema (for control_list)
const controlItemSchema = z.object({
dev_type: z.number().int().optional(),
dev_addr: z.number().int().optional(),
dev_loop: z.number().int().optional(),
type_l: z.number().int().optional(),
type_h: z.number().int().optional()
});
const listSchema = (schema) =>
z.preprocess(
(value) => (value === null ? [] : value),
z.array(schema).optional().default([])
);
// Main Kafka Payload Schema
export const kafkaPayloadSchema = z.object({
// Required Header Fields
ts_ms: z.number(),
hotel_id: z.preprocess(
(value) => (typeof value === 'string' ? Number(value) : value),
z.number()
),
room_id: z.union([z.string(), z.number()]).transform(val => String(val)),
device_id: z.union([z.string(), z.number()]).transform(val => String(val)),
direction: z.string(),
cmd_word: z.union([z.string(), z.number()]).transform(val => String(val)),
frame_id: z.number(),
udp_raw: z.string(),
// Optional Statistical/Status Fields
sys_lock_status: z.number().optional().nullable(),
report_count: z.number().optional().nullable(),
fault_count: z.number().optional().nullable(),
// Lists
device_list: listSchema(deviceItemSchema),
fault_list: listSchema(faultItemSchema),
control_list: listSchema(controlItemSchema)
});

View File

@@ -0,0 +1,18 @@
const format = (level, message, context) => {
const payload = {
level,
message,
timestamp: Date.now(),
...(context ? { context } : {})
};
return JSON.stringify(payload);
};
export const logger = {
info(message, context) {
process.stdout.write(`${format('info', message, context)}\n`);
},
error(message, context) {
process.stderr.write(`${format('error', message, context)}\n`);
}
};

View File

@@ -0,0 +1,26 @@
export class MetricCollector {
constructor() {
this.reset();
}
reset() {
this.metrics = {
kafka_pulled: 0,
parse_error: 0,
db_inserted: 0,
db_failed: 0
};
}
increment(metric, count = 1) {
if (this.metrics.hasOwnProperty(metric)) {
this.metrics[metric] += count;
}
}
getAndReset() {
const current = { ...this.metrics };
this.reset();
return current;
}
}

View File

@@ -0,0 +1,3 @@
import { randomUUID } from 'crypto';
export const createGuid = () => randomUUID().replace(/-/g, '');

View File

@@ -0,0 +1,120 @@
import { describe, it, expect } from 'vitest';
import { buildRowsFromPayload } from '../src/processor/index.js';
describe('Processor Logic', () => {
const basePayload = {
ts_ms: 1700000000000,
hotel_id: 1001,
room_id: '8001',
device_id: 'dev_001',
direction: '上报',
cmd_word: '0x36',
frame_id: 1,
udp_raw: '3601...',
sys_lock_status: 0,
report_count: 0,
fault_count: 0
};
it('should validate required fields', () => {
expect(() => buildRowsFromPayload({})).toThrow();
expect(() => buildRowsFromPayload({ ...basePayload, ts_ms: undefined })).toThrow();
});
it('should handle 0x36 Status Report with device list', () => {
const payload = {
...basePayload,
direction: '上报',
cmd_word: '0x36',
report_count: 2,
device_list: [
{ dev_type: 1, dev_addr: 10, dev_loop: 1, dev_data: 100 },
{ dev_type: 1, dev_addr: 11, dev_loop: 2, dev_data: 0 }
]
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(2);
expect(rows[0].action_type).toBe('36上报');
expect(rows[0].dev_addr).toBe(10);
expect(rows[1].dev_addr).toBe(11);
expect(rows[0].details.device_list).toHaveLength(2);
});
it('should handle 0x36 Fault Report', () => {
const payload = {
...basePayload,
direction: '上报',
cmd_word: '0x36',
fault_count: 1,
fault_list: [
{ dev_type: 1, dev_addr: 10, dev_loop: 1, error_type: 2, error_data: 5 }
]
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(1);
expect(rows[0].action_type).toBe('36上报');
expect(rows[0].error_type).toBe(2);
});
it('should handle 0x36 Mixed Report (Status + Fault)', () => {
const payload = {
...basePayload,
direction: '上报',
cmd_word: '0x36',
report_count: 1,
fault_count: 1,
device_list: [{ dev_type: 1, dev_addr: 10, dev_loop: 1, dev_data: 100 }],
fault_list: [{ dev_type: 1, dev_addr: 10, dev_loop: 1, error_type: 2, error_data: 5 }]
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(2); // 1 status + 1 fault
});
it('should handle 0x0F Control Command', () => {
const payload = {
...basePayload,
direction: '下发',
cmd_word: '0x0F',
control_list: [
{ dev_type: 1, dev_addr: 10, dev_loop: 1, type_l: 1, type_h: 2 }
]
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(1);
expect(rows[0].action_type).toBe('0F下发');
expect(rows[0].type_l).toBe(1);
expect(rows[0].type_h).toBe(2);
expect(rows[0].dev_loop).toBe(1);
});
it('should handle 0x0F ACK', () => {
const payload = {
...basePayload,
direction: '上报',
cmd_word: '0x0F'
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(1);
expect(rows[0].action_type).toBe('0FACK');
});
it('should fallback when lists are empty for 0x36', () => {
const payload = {
...basePayload,
direction: '上报',
cmd_word: '0x36',
device_list: [],
fault_list: []
};
const rows = buildRowsFromPayload(payload);
expect(rows).toHaveLength(1);
expect(rows[0].action_type).toBe('36上报');
expect(rows[0].dev_type).toBeNull();
});
});

153
docs/kafka_format.md Normal file
View File

@@ -0,0 +1,153 @@
# Kafka 推送格式与数据拆分规范
本文档定义了上游服务向 Kafka 推送消息的标准 JSON 格式。
**核心变更**:上游服务负责将原始报文解析为结构化的 JSON 对象。对于包含多个设备状态或故障信息的命令(如 `0x36`),上游必须将其转换为 **JSON 数组**,后端服务直接遍历这些数组进行入库,不再依赖对 `udp_raw` 的二次解析。
## 1. Kafka 基础信息
* **Topic**: `blwlog4Nodejs-rcu-action-topic`
* **分区数**: 6
* **消息格式**: JSON String
## 2. 消息结构定义 (Schema)
JSON 消息由 **Header 信息****业务列表数据** 组成。
### 2.1 顶层字段 (Header & 统计)
| 字段名 | 类型 | 必填 | 说明 |
| :--- | :--- | :--- | :--- |
| **ts_ms** | Number | **是** | 日志产生时间戳 (Key1) |
| **hotel_id** | Number | **是** | 酒店 ID |
| **room_id** | String | **是** | 房间 ID |
| **device_id** | String | **是** | 设备 ID |
| **direction** | String | **是** | "上报" 或 "下发" |
| **cmd_word** | String | **是** | 命令字 (如 "0x36", "0x0F") |
| **frame_id** | Number | **是** | 通讯帧号 |
| **udp_raw** | String | **是** | UDP 原始报文 (作为备份/校验) |
| **sys_lock_status** | Number | 否 | 系统锁状态 (0:未锁, 1:锁定) |
| **report_count** | Number | 否 | 上报设备数量 (对应 device_list 长度) |
| **fault_count** | Number | 否 | 故障设备数量 (对应 fault_list 长度) |
| **action_type** | String | 否 | 行为类型 (建议上游预填,或后端默认处理) |
| **device_list** | Array | 否 | **设备状态列表** (结构见 2.2) |
| **fault_list** | Array | 否 | **设备故障列表** (结构见 2.3) |
| **control_list** | Array | 否 | **控制参数列表** (用于 0x0F) |
### 2.2 设备状态对象 (Item in `device_list`)
对应 `0x36` 命令中的 P8~P13。
| JSON 字段名 | DB 映射字段 | 类型 | 说明 |
| :--- | :--- | :--- | :--- |
| **dev_type** | `dev_type` | Number | 设备类型 |
| **dev_addr** | `dev_addr` | Number | 设备地址 |
| **dev_loop** | `dev_loop` | Number | 设备回路 |
| **dev_data** | `dev_data` | Number | 设备状态 |
### 2.3 设备故障对象 (Item in `fault_list`)
对应 `0x36` 命令中的 P15~P20。
| JSON 字段名 | DB 映射字段 | 类型 | 说明 |
| :--- | :--- | :--- | :--- |
| **dev_type** | `dev_type` | Number | 故障设备类型 (复用 dev_type) |
| **dev_addr** | `dev_addr` | Number | 故障设备地址 (复用 dev_addr) |
| **dev_loop** | `dev_loop` | Number | 故障设备回路 (复用 dev_loop) |
| **error_type** | `error_type` | Number | 故障类型 |
| **error_data** | `error_data` | Number | 故障内容 |
### 2.4 控制参数对象 (Item in `control_list`)
对应 `0x0F` 下发命令。
| JSON 字段名 | DB 映射字段 | 类型 | 说明 |
| :--- | :--- | :--- | :--- |
| **dev_type** | `dev_type` | Number | 控制设备类型 |
| **dev_addr** | `dev_addr` | Number | 控制设备地址 |
| **loop** | `dev_loop` | Number | 控制设备的回路地址 (复用 dev_loop) |
| **type_l** | `type_l` | Number | 执行方式 |
| **type_h** | `type_h` | Number | 执行内容 |
---
## 3. 后端入库逻辑
后端服务接收到 JSON 后,逻辑简化为:
1. **遍历 `device_list`**: 为数组中每个对象生成一条 DB 记录。
* 映射:`dev_type` -> `dev_type`, `dev_addr` -> `dev_addr`, `dev_loop` -> `dev_loop`, `dev_data` -> `dev_data`
* `action_type`: "36上报"。
2. **遍历 `fault_list`**: 为数组中每个对象生成一条 DB 记录。
* 映射:`dev_type` -> `dev_type`, `dev_addr` -> `dev_addr`, `dev_loop` -> `dev_loop`, `error_type` -> `error_type`, `error_data` -> `error_data`
* `action_type`: "36上报"。
3. **遍历 `control_list`**: 为数组中每个对象生成一条 DB 记录。
* 映射:`dev_type` -> `dev_type`, `dev_addr` -> `dev_addr`, `loop` -> `dev_loop`, `type_l` -> `type_l`, `type_h` -> `type_h`
* `action_type`: "0F下发"。
---
## 4. 参考 JSON 示例
### 4.1 0x36 混合上报 (2个设备状态 + 1个故障)
```json
{
"ts_ms": 1706428800123,
"hotel_id": 1001,
"room_id": "8001",
"device_id": "dev_001",
"direction": "上报",
"cmd_word": "0x36",
"frame_id": 1001,
"udp_raw": "3601...",
"sys_lock_status": 1,
"report_count": 2,
"fault_count": 1,
"device_list": [
{
"dev_type": 1,
"dev_addr": 10,
"dev_loop": 1,
"dev_data": 100
},
{
"dev_type": 1,
"dev_addr": 11,
"dev_loop": 2,
"dev_data": 0
}
],
"fault_list": [
{
"dev_type": 1,
"dev_addr": 10,
"dev_loop": 1,
"error_type": 1,
"error_data": 1
}
]
}
```
### 4.2 0x0F 下发控制 (包含多个控制指令)
```json
{
"ts_ms": 1706428805000,
"hotel_id": 1001,
"room_id": "8001",
"direction": "下发",
"cmd_word": "0x0F",
"frame_id": 1002,
"udp_raw": "0F...",
"action_type": 2,
"control_list": [
{
"dev_type": 1,
"dev_addr": 10,
"loop": 1,
"type_l": 0,
"type_h": 1
}
],
"details": {
"full_control_data": "..."
}
}
```

78
docs/project.md Normal file
View File

@@ -0,0 +1,78 @@
1. 0x36 上报数据格式
-------------------------------------------------------------------
P0 系统锁状态
0x00未锁定
0x01锁定
P1~P6 保留
P7 上报设备数量
P8~P13 设备参数描述一个设备参数固定为6Byte,具体格式如下:
DevType设备类型 1Byte
DevAddr设备地址 1Byte
DevLoop设备回路 2Byte
DevData设备状态 2Byte
P14 上报设备故障数量
P15~P20 上报设备故障数量
DevType设备类型 1Byte
DevAddr设备地址 1Byte
DevLoop设备回路 2Byte
ErrorType故障类型 1Byte
ErrorData故障内容 1Byte
-------------------------------------------------------------------
上报设备的状态具体参数需要查看备数*6Byte如果故障设备数为0则没有设备故障参数。
故障参数解析:
故障类型 故障内容
0x01 0:在线 1:离线
0x02 0~100电量
0x03 电流(10mA)
0x04 1901故障检测次数
0x05 设备回路故障具体设备,不同类型的设备上报状态的描述是不同的。
具体有多少设备状态需要上报,设备参数所占用的字节=设备数*6Byte
同样设备故障参数所占用的字节=设
2. 0x0F 下发数据格式
-------------------------------------------------------------------
P0控制设备总数
P1 ~P495设备控制参数描述一个设备控制参数固定为6Byte具体格式如下
DevType控制设备类型 - 1Byte
DevAddr控制设备地址 - 1Byte
Loop控制设备的回路地址 - 2Byte
Type控制设备的输出类型 - 2Byte
Type_L执行方式
Type_H执行内容
-------------------------------------------------------------------
该命令一般用于服务下发控制数据
3. 0x0F 上报数据格式
ACK (待补充)
4. 数据表结构
不可为空字段:
日志产生时间ts_ms
入库时间write_ts_ms
酒店index
房间index
方向(上传/下发index
命令字index
通讯帧号
UDP消息原文
记录行为类型ACK、下发控制、主动控制、设备回路状态index通过设备类型区分
可为空字段:
系统锁状态
本次上报数量
DevType设备类型 1Byte
DevAddr设备地址 1Byte
DevLoop设备回路 2Byte
DevData设备状态 2Byte
本次故障数量
DevType设备类型 1Byte
DevAddr设备地址 1Byte
DevLoop设备回路 2Byte
ErrorType故障类型 1Byte
ErrorData故障内容 1Byte
一条命令可能会有多条状态,每个状态生成一条记录,通过命令字和帧号来做串联。
一条UDP通讯可能对照多条数据库记录
5. 队列结构
队列分区数6
Topicblwlog4Nodejs-rcu-action-topic

92
docs/readme.md Normal file
View File

@@ -0,0 +1,92 @@
# BLS RCU Action Server 开发文档
## 1. 项目概述
本项目旨在构建一个后端服务,负责从 Kafka 接收 RCU客房控制单元的通讯日志解析数据结构并将其持久化存储到 PostgreSQL 数据库中。
核心目标是将不同类型的通讯协议数据0x36 上报、0x0F 下发、ACK统一存储并针对不定长数据结构采用 JSON 格式进行灵活保存。
## 2. 系统架构
**数据流向**: `MCU/Server` (产生数据) -> `Kafka` (消息队列) -> `Action Server` (消费 & 解析) -> `PostgreSQL` (存储)
- **Kafka Topic**: `blwlog4Nodejs-rcu-action-topic` (分区数: 6)
- **数据库**: PostgreSQL
## 3. 数据库设计
数据库名:`bls_rcu_action`
模式名: `rcu_action`
### 3.1 表结构设计
表名: `rcu_action_events`
| 字段名 | 类型 | 说明 | 备注 |
| :--- | :--- | :--- | :--- |
| **guid** | VARCHAR(32) | 主键 (Key2) | 32位无符号UUID |
| **ts_ms** | BIGINT | 日志产生时间 (Key1) | **必填** (L49) |
| **write_ts_ms** | BIGINT | 入库时间 | **必填** (L50) |
| **hotel_id** | INTEGER | 酒店ID | **必填** (L51) Index |
| **room_id** | VARCHAR(32) | 房间ID | **必填** (L52) Index |
| **device_id** | VARCHAR(32) | 设备ID | **必填** (新增) Index |
| **direction** | VARCHAR(10) | 数据方向 | **必填** (L53) "上报"/"下发" Index |
| **cmd_word** | VARCHAR(10) | 命令字 | **必填** (L54) 如 "0x36", "0x0F" Index |
| **frame_id** | INTEGER | 通讯帧号 | **必填** (L55) 用于串联命令与状态 |
| **udp_raw** | TEXT | UDP消息原文 | **必填** (L56) Hex字符串 |
| **action_type** | VARCHAR(20) | 记录行为类型 | **必填** (L57) Index |
| **sys_lock_status** | SMALLINT | 系统锁状态 | (L59) 可空 |
| **report_count** | SMALLINT | 本次上报数量 | (L60) 可空 |
| **dev_type** | SMALLINT | 设备类型 | (L61) 可空 (统一字段) |
| **dev_addr** | SMALLINT | 设备地址 | (L62) 可空 (统一字段) |
| **dev_loop** | INTEGER | 设备回路 | (L63) 可空 (统一字段) |
| **dev_data** | INTEGER | 设备状态 | (L64) 可空 (0x36状态) |
| **fault_count** | SMALLINT | 本次故障数量 | (L65) 可空 |
| **error_type** | SMALLINT | 故障类型 | (L69) 可空 (0x36故障) |
| **error_data** | SMALLINT | 故障内容 | (L70) 可空 (0x36故障) |
| **type_l** | SMALLINT | 执行方式 | 可空 (0x0F下发) |
| **type_h** | SMALLINT | 执行内容 | 可空 (0x0F下发) |
| **details** | JSONB | 业务详情数据 | 存储不定长设备列表、故障信息等 |
| **extra** | JSONB | 扩展信息 | 存储通讯原文等扩展数据 |
**主键定义**: `(ts_ms, guid)`
**索引定义**: 备注带index的字段为需要索引的字段用于提高查询效率。
### 3.2 字典定义
**Action Type (记录行为类型)**:
- `"0FACK"`: ACK (应答)
- `"0F下发"`: 下发控制 (0x0F 下发)
- `"36上报"`: 设备回路状态 (0x36 上报)
**Direction (方向)**:
- `"上报"`: Upload
- `"下发"`: Download
## 4. 数据解析与存储映射
### 4.1 0x36 上报数据 (设备状态/故障)
* **命令字**: "0x36"
* **拆分逻辑**: 根据 `project.md` 说明,一条 UDP 可能包含多个设备状态,需拆分为多条记录入库,每条记录填充 `dev_type` 等字段。同时将完整的不定长列表存入 `details` 以便追溯。
* **Action Type**: 4 (设备回路状态)
**Mapping**:
- `sys_lock_status` -> P0
- `report_count` -> P7
- `dev_type`, `dev_addr`... -> 从 P8~P13 循环解析,每组生成一条 DB 记录
- `details`: `{ "all_devices": [...], "all_faults": [...] }`
- `extra`: `{ "raw_hex": "..." }`
### 4.2 0x0F 下发数据 (控制指令)
* **命令字**: "0x0F"
* **Action Type**: 2 (下发控制)
* **存储逻辑**: 主要是控制指令,通常作为单条记录存储。若包含多个设备控制,可选择存第一条到字段,或仅存入 JSON。根据 "0x0F不定长存为JSON" 的需求,主要依赖 `details` 字段。
**Mapping**:
- `details`: `{ "control_params": [ ... ] }`
- `extra`: `{ "raw_hex": "..." }`
### 4.3 0x0F 上报数据 (ACK)
* **命令字**: "0x0F"
* **Action Type**: "0FACK" (ACK)
**Mapping**:
- `details`: `{ "ack_code": "0x00" }`
- `extra`: `{ "raw_hex": "..." }`

View File

@@ -0,0 +1,273 @@
# Redis 对接协议(供外部项目 AI 生成代码使用)
本文档定义"外部项目 ↔ BLS Project Console"之间通过 Redis 交互的 **Key 命名、数据类型、写入方式、读取方式与数据格式**
注:本仓库对外暴露的 Redis 连接信息如下(供对方直接连接以写入心跳/日志):
- 地址:`10.8.8.109`
- 端口:默认 `6379`
- 密码:无(空)
- 数据库:固定 `15`
示例(环境变量):
```
REDIS_HOST=10.8.8.109
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DB=15
```
示例redis-cli
```
redis-cli -h 10.8.8.109 -p 6379 -n 15
```
> 约束:每个需要关联本控制台的外部项目,必须在同一个 RedisDB15
> - 更新 `项目心跳`(项目列表 + 心跳信息)
> - 追加 `${projectName}_项目控制台`(日志队列)
> - 命令下发为 HTTP API 调用(不通过 Redis 下发命令)
## 1. 命名约定
令:
- `projectName`:外部项目名称(建议只用字母数字下划线 `A-Za-z0-9_`;如使用中文也可,但需保证统一且 UTF-8
固定后缀:
- 控制台:`${projectName}_项目控制台`
示例projectName = `订单系统`
- `订单系统_项目控制台`
## 2. 外部项目需要写入的 2 个 Key
说明:当前控制台左侧“项目选择列表”只读取 `项目心跳`LIST。因此外部项目必须维护该 Key否则项目不会出现在列表中。
### 2.1 `项目心跳`
- Redis 数据类型:**LIST**
- 写入方式(推荐 FIFO`RPUSH 项目心跳 <json>`
- value每个列表元素为“项目心跳记录”的 JSON 字符串
示例(与当前代码读取一致;下面示例表示“逻辑结构”):
```json
[
{
"projectName": "BLS主机心跳日志",
"apiBaseUrl": "http://127.0.0.1:3000",
"lastActiveAt": 1768566165572
}
]
```
示例Redis 写入命令):
```
RPUSH 项目心跳 "{\"projectName\":\"BLS主机心跳日志\",\"apiBaseUrl\":\"http://127.0.0.1:3000\",\"lastActiveAt\":1768566165572}"
```
字段说明(每条心跳记录):
- `projectName`:项目名称(用于拼接日志 Key`${projectName}_项目控制台`
- `apiBaseUrl`:目标项目对外提供的 API 地址(基地址,后端将基于它拼接 `apiName`
- `lastActiveAt`:活跃时间戳(毫秒)。建议每 **3 秒**刷新一次。
在线/离线判定BLS Project Console 使用):
-`now - lastActiveAt > 10_000ms`,则认为该应用 **离线**
- 否则认为 **在线**
建议:
- `lastActiveAt` 使用 `Date.now()` 生成(毫秒)
- 建议对 `项目心跳` 做长度控制(可选):例如每次写入后执行 `LTRIM 项目心跳 -2000 -1` 保留最近 2000 条
去重提示:
- `项目心跳` 为 LIST 时,外部项目周期性 `RPUSH` 会产生多条重复记录
- BLS Project Console 后端会按 `projectName` 去重,保留 `lastActiveAt` 最新的一条作为项目状态
### 2.2 `${projectName}_项目控制台`
- Redis 数据类型:**LIST**(作为项目向控制台追加的"消息队列/日志队列"
- 写入方式(推荐 FIFO`RPUSH ${projectName}_项目控制台 <json>`
value推荐格式一条 JSON 字符串,表示"错误/调试信息"或日志记录。
推荐 JSON Schema字段尽量保持稳定便于控制台解析
```json
{
"timestamp": "2026-01-12T12:34:56.789Z",
"level": "info",
"message": "连接成功",
"metadata": {
"module": "redis",
"host": "127.0.0.1"
}
}
```
字段说明:
- `timestamp`ISO-8601 时间字符串
- `level`:建议取值 `info|warn|error|debug`(小写)
- `message`:日志文本
- `metadata`:可选对象(附加信息)
## 3. 项目列表管理(重要)
### 3.1 迁移机制(仅用于旧数据导入)
BLS Project Console 支持从旧格式自动迁移到新格式:
- **旧格式**:每个项目独立的心跳键 `${projectName}_项目心跳`
- **新格式**:统一的项目列表键 `项目心跳`LIST 类型,每个元素为 JSON 字符串)
迁移过程:
1. 扫描所有 `${projectName}_项目心跳`
2. 提取 `apiBaseUrl``lastActiveAt` 字段
3. 写入到 `项目心跳`LIST
4. 可选:删除旧键
重要说明(与当前代码实现一致):
- 迁移不会自动后台执行,需要通过接口触发:`POST /api/projects/migrate`
- 迁移的目的只是“从历史 `${projectName}_项目心跳` 导入一次,生成 `项目心跳` 列表”
- 迁移完成后,如果外部项目仍然只更新旧 Key`项目心跳` 不会自动跟随更新;要想实时更新,外部项目必须直接更新 `项目心跳`
### 3.2 新格式项目列表结构
`项目心跳` 为 LIST列表元素为 JSON 字符串;其“逻辑结构”如下:
```json
[
{
"projectName": "订单系统",
"apiBaseUrl": "http://127.0.0.1:4001",
"lastActiveAt": 1760000000000
},
{
"projectName": "用户服务",
"apiBaseUrl": "http://127.0.0.1:4002",
"lastActiveAt": 1760000000001
}
]
```
### 3.3 外部项目对接建议
外部项目应当:
1. 定期写入 `项目心跳`RPUSH 自己的心跳记录;允许产生多条记录,由控制台按 projectName 去重)
2. 追加 `${projectName}_项目控制台` 日志
## 4. 命令下发方式HTTP API 控制)
控制台不再通过 Redis 写入控制指令队列;改为由 BLS Project Console 后端根据目标项目心跳里的 `apiBaseUrl` 直接调用目标项目 HTTP API。
### 4.1 控制台输入格式
一行文本按空格拆分:
- 第一个 token`apiName`(接口名/路径片段)
- 剩余 token参数列表字符串数组
示例:
- `reload`
- `reload force`
- `user/refreshCache tenantA`
### 4.2 目标项目需要提供的 API
后端默认使用 `POST` 调用:
- `POST {apiBaseUrl}/{apiName}`
请求体JSON示例
```json
{
"commandId": "cmd-1700000000000-abc123",
"timestamp": "2026-01-13T00:00:00.000Z",
"source": "BLS Project Console",
"apiName": "reload",
"args": ["force"],
"argsText": "force"
}
```
字段说明:
- `commandId`:唯一命令标识符
- `timestamp`命令发送时间ISO-8601 格式)
- `source`:命令来源标识
- `apiName`API 接口名
- `args`:参数数组
- `argsText`:参数文本(空格连接)
返回建议:
- 2xx 表示成功
- 非 2xx 表示失败(控制台会展示 upstreamStatus 与部分返回内容)
### 4.3 在线/离线判定
发送命令前,系统会检查项目在线状态:
-`项目心跳` 列表读取 `lastActiveAt`
-`now - lastActiveAt > 10_000ms`,则认为该应用 **离线**,拒绝发送命令
- 否则认为 **在线**,允许发送命令
## 5. 与本项目代码的对应关系
- **后端 `/api/projects`**:只从 `项目心跳`LIST读取项目列表返回所有项目及其在线状态
- **后端 `/api/commands`**:从 `项目心跳` 中查找目标项目的 `apiBaseUrl/lastActiveAt`,在线时调用目标项目 API
- **后端 `/api/logs`**:读取 `${projectName}_项目控制台`LIST并基于 `项目心跳` 中该项目的 `lastActiveAt` 计算在线/离线与 API 地址信息
## 6. 兼容与错误处理建议
- JSON 解析失败:外部项目应记录错误,并丢弃该条消息(避免死循环阻塞消费)。
- 消息过长:建议控制单条消息大小(例如 < 64KB
- 字符编码:统一 UTF-8。
- 心跳超时:建议外部项目每 3 秒更新一次心跳,避免被误判为离线。
## 7. 数据迁移工具(旧数据导入)
如果需要从旧格式迁移到新格式,可使用以下 API
```bash
POST /api/projects/migrate
Content-Type: application/json
{
"deleteOldKeys": false,
"dryRun": false
}
```
参数说明:
- `deleteOldKeys`:是否删除旧格式键(默认 false
- `dryRun`:是否仅模拟运行(默认 false
返回示例:
```json
{
"success": true,
"message": "数据迁移完成",
"migrated": 2,
"projects": [...],
"listKey": "项目心跳",
"deleteOldKeys": false
}
```

45
docs/测试报告.md Normal file
View File

@@ -0,0 +1,45 @@
# 测试报告
## 基本信息
- 运行时间: 2026-01-29
- 运行方式: 控制台启动 `npm run dev`,运行约 60 秒后 Ctrl + C 终止
- 测试目标: 验证 Kafka 消费与入库链路,定位无入库原因
## 控制台关键日志
```
{"level":"error","message":"Message processing failed","timestamp":1769734880590,"context":{"error":"[\n {\n \"expected\": \"number\",\n \"code\": \"invalid_type\",\n \"path\": [\n \"hotel_id\"\n ],\n \"message\": \"Invalid input: expected number, received string\"\n }\n]","type":"PARSE_ERROR","stack":"ZodError: ...","rawPayload":"{\"ts_ms\":1769692878011,\"hotel_id\":\"2147\",\"room_id\":\"8209\",\"device_id\":\"099008129081\",\"direction\":\"上报\",\"cmd_word\":\"36\",\"frame_id\":52496,...}","validationIssues":[{"expected":"number","code":"invalid_type","path":["hotel_id"],"message":"Invalid input: expected number, received string"}]}}
```
## 结论
- 数据未入库的直接原因: Kafka 消息在解析阶段触发 Zod 校验失败,`hotel_id` 为字符串类型而非文档要求的 Number导致 `PARSE_ERROR`,数据库插入流程未执行。
## 与文档格式的一致性检查
对照 [kafka_format.md](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/docs/kafka_format.md):
- `hotel_id`: 文档要求 Number但实测为字符串 (示例: `"2147"`),不一致。
- `cmd_word`: 文档要求 `"0x36"`/`"0x0F"`,实测为 `"36"`,不一致。
- `control_list`: 文档要求 Array/可选,但实测为 `null`,不一致。
- 其余关键字段如 `ts_ms`, `room_id`, `device_id`, `direction`, `udp_raw` 均存在。
## 已增强的控制台错误输出
为了便于定位异常,以下模块已经增加详细错误输出到 PowerShell 控制台:
- Kafka 处理异常: 输出 `type`, `stack`, `rawPayload`, `validationIssues`, `dbContext`
- 数据库插入异常: 输出 `schema`, `table`, `rowsLength`
- Redis 入队与重试异常: 输出详细错误信息
相关改动文件:
- [index.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/index.js)
- [databaseManager.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/db/databaseManager.js)
- [errorQueue.js](file:///e:/Project_Class/BLS/Web_BLS_RCUAction_Server/bls-rcu-action-backend/src/redis/errorQueue.js)
## 建议修改方向
以下为解决无入库问题的可选方案,由你决定是否执行:
1. 上游严格按文档输出:
- `hotel_id` 改为 Number
- `cmd_word` 改为 `"0x36"` / `"0x0F"`
- `control_list``[]` 或省略字段,避免 `null`
2. 下游放宽校验并做类型转换:
-`hotel_id` 支持字符串并转换为 Number
- 继续兼容 `cmd_word = "36"` 的写法
- `control_list/device_list/fault_list` 接受 `null` 并转为空数组
当前代码已兼容 `cmd_word="36"``control_list=null`,但 `hotel_id` 仍按文档严格要求 Number。

View File

@@ -0,0 +1,32 @@
# Add Validation and Unit Tests
## Summary
Introduced robust data validation using `zod` and unit testing using `vitest` to ensure system stability and correctness of data processing logic.
## Changes
### 1. Data Validation
- **Library**: `zod`
- **File**: `src/schema/kafkaPayload.js`
- **Logic**:
- Defined strict schema for Kafka payload including required headers (`ts_ms`, `hotel_id`, etc.) and optional arrays (`device_list`, `fault_list`).
- Implemented automatic type transformation (e.g., `room_id` to string, `cmd_word` normalization).
- Integrated into `src/processor/index.js` to validate incoming messages before processing.
### 2. Unit Testing
- **Framework**: `vitest`
- **File**: `tests/processor.test.js`
- **Coverage**:
- Required field validation (throws error on missing fields).
- `0x36` Status Report processing (device list expansion).
- `0x36` Fault Report processing (fault list expansion).
- `0x36` Mixed Report processing.
- `0x0F` Control Command processing (control list expansion).
- `0x0F` ACK processing.
- Fallback logic for empty lists.
### 3. Scripts
- Added `test` script to `package.json`: `vitest run`.
## Verification
- Ran `npm test` and all 7 tests passed successfully.

View File

@@ -0,0 +1,9 @@
# Operations Log
## [2026-01-28] Correction: Kafka JSON Structure
- **Action**: Corrected `docs/kafka_format.md` based on user feedback.
- **Details**:
- **Shifted Parsing Responsibility**: Clarified that the upstream service performs the raw parsing.
- **Structured Arrays**: Introduced `device_list`, `fault_list`, and `control_list` arrays in the JSON schema.
- **Flattened Logic**: Backend no longer parses `udp_raw` for list items but iterates over the provided JSON arrays.
- **Updated Examples**: Provided clear examples of nested JSON objects for devices and faults.

View File

@@ -0,0 +1,9 @@
# Operations Log
## [2026-01-28] Documentation Update: Kafka Format & Splitting Logic
- **Action**: Updated `docs/kafka_format.md`.
- **Details**:
- Defined strict splitting logic for `0x36` commands: One Kafka message -> Multiple DB records (based on `report_count` and `fault_count`).
- Updated Kafka JSON Schema to include all database fields (Header + Logic/Parsing fields).
- Clarified `action_type` mapping and `sys_lock_status` propagation.
- Added table-based parsing example for visual clarity.

View File

@@ -0,0 +1,33 @@
# Database Partitioning and Initialization Strategy
## Summary
Implemented automatic database initialization and time-based table partitioning to ensure system scalability and ease of deployment.
## Changes
### 1. Database Initialization (`src/db/initializer.js`)
- **Logic**:
1. **Database Check**: Connects to the default `postgres` database to check if the target database (defined in `.env`) exists. Creates it if missing.
2. **Schema & Table Check**: Connects to the target database and executes `scripts/init_db.sql`.
3. **Partition Check**: Calls `PartitionManager` to ensure partition tables exist for the next 30 days.
- **Integration**: Called during application bootstrap in `src/index.js`.
### 2. Table Partitioning (`src/db/partitionManager.js`)
- **Strategy**: Partition by Range on `ts_ms` (milliseconds).
- **Partition Size**: Daily partitions (e.g., `rcu_action.rcu_action_events_20260129`).
- **Automation**:
- `ensurePartitions(daysAhead)`: Calculates daily ranges and creates partitions if they don't exist.
- **Scheduled Task**: `node-cron` job runs daily at 00:00 to pre-create partitions for the next 30 days.
### 3. Schema Updates (`scripts/init_db.sql`)
- Modified `rcu_action_events` table definition to use `PARTITION BY RANGE (ts_ms)`.
- **Note**: The primary key `(ts_ms, guid)` supports this partitioning strategy perfectly.
### 4. Code Structure Updates
- **Dependency**: Added `node-cron`.
- **Singleton**: Updated `src/db/databaseManager.js` to export a singleton instance for shared connection pooling.
- **Bootstrap**: Updated `src/index.js` to include initialization and cron scheduling.
## Verification
- **Startup**: Application will auto-initialize DB and partitions on first run.
- **Maintenance**: Cron job ensures future partitions are always ready.

View File

@@ -0,0 +1,19 @@
# System Robustness Improvements
## Context
A review of the project revealed missing initialization scripts and lack of graceful shutdown handling, which are critical for production stability and deployment.
## Changes
1. **Database Initialization (`scripts/init_db.sql`)**:
* Created a SQL script to initialize the `rcu_action` schema and `rcu_action_events` table.
* Added indexes for performance optimization (`ts_ms`, `hotel_id`, `room_id`, `direction`, `cmd_word`, `action_type`).
* Added a composite index (`hotel_id`, `room_id`, `ts_ms DESC`) for common query patterns.
2. **Graceful Shutdown (`src/index.js`)**:
* Implemented `SIGTERM` and `SIGINT` signal handlers.
* Ensures resources are closed in order: Kafka Consumer -> Redis Client -> Database Pool.
* Prevents data corruption or zombie connections during container restart/stop.
3. **Error Handling Enhancements**:
* Decoupled `startErrorRetryWorker` from the main bootstrap chain to prevent blocking/crashing on startup.
* Added `try/catch` block in `handleMessage` to ensure errors are logged and bubbled up to the retry mechanism properly.

View File

@@ -0,0 +1,27 @@
# Kafka Partition and Database Connection Strategy
## Context
User highlighted two specific constraints for the production environment:
1. The Kafka topic `blwlog4Nodejs-rcu-action-topic` has 6 partitions, but the application runs as a single PM2 instance.
2. Database connections must be minimized, using a single connection pool.
## Analysis & Implementation
### 1. Kafka Partition Handling
- **Constraint**: Single instance must consume from all 6 partitions.
- **Solution**: We are using `kafka-node`'s `ConsumerGroup`.
- **Mechanism**:
- `ConsumerGroup` automatically manages partition assignment.
- When a single consumer joins the group, the group coordinator assigns all available partitions (0-5) to that single member.
- Protocol is set to `['roundrobin']`, ensuring even distribution if scaling up later, but effective for full coverage in single-mode.
- **Verification**: Checked `src/kafka/consumer.js`. No code changes needed.
### 2. Database Connection Pool
- **Constraint**: Single pool, limited connections.
- **Solution**:
- **Singleton Pattern**: `src/db/databaseManager.js` exports a pre-instantiated `dbManager` object. All modules import this single instance.
- **Connection Limit**: `src/config/config.js` sets `max` connections to `process.env.DB_MAX_CONNECTIONS` with a default of **10**.
- **Verification**: Checked `src/db/databaseManager.js` and `src/config/config.js`. Implementation complies with constraints.
## Conclusion
The current architecture inherently satisfies these requirements without further modification.

View File

@@ -0,0 +1,23 @@
# Processor Refactoring for Unified Kafka Topic
## Context
The system receives all messages (Status, Fault, Control, ACK) from a single Kafka topic. The message key is unreliable (random). Therefore, the backend must rely on `cmd_word` and `direction` within the message payload to determine the processing logic and storage structure.
## Changes
1. **Refactored `src/processor/index.js`**:
* Removed `udpParser.js` dependency (parsing is now upstream responsibility).
* Implemented `resolveActionType` to categorize messages into:
* `"36上报"` (Status/Fault)
* `"0F下发"` (Control)
* `"0FACK"` (ACK)
* Implemented `buildRowsFromPayload` with specific logic for each action type:
* **36上报**: Iterates over `device_list` and `fault_list` (provided by upstream JSON) to create multiple DB rows. Maps `dev_type`, `dev_addr`, `dev_loop`, `dev_data`, `error_type`, `error_data`.
* **0F下发**: Iterates over `control_list`. Maps `dev_type`, `dev_addr`, `loop`->`dev_loop`, `type_l`, `type_h`.
* **0FACK**: Creates a single DB row (fallback).
* Ensured `details` column stores the full lists (`device_list`, `fault_list`, `control_list`) for traceability.
* Ensured `extra` column stores the `raw_hex` (udp_raw).
## Impact
* Backend is now fully compliant with the "Upstream Parsing" requirement.
* Correctly handles one-to-many storage for Status, Fault, and Control messages.
* No longer relies on Kafka Keys for dispatching.

View File

@@ -0,0 +1,33 @@
# Initial Setup & Backend Implementation
## Operations Log
### Configuration & Code Update
- **Action**: Updated codebase to support advanced environment configurations.
- **Details**:
- Updated `src/config/config.js` to read SASL and SSL configurations from `.env`.
- Updated `src/kafka/consumer.js` to support SASL authentication.
- Updated `src/db/databaseManager.js` to support PostgreSQL SSL connections.
- Verified `.env` integration with `dotenv`.
### Backend Component Construction
- **Action**: Initialized backend scaffolding and core implementation.
- **Details**:
- Created project structure under `bls-rcu-action-backend`.
- Implemented `DatabaseManager` with connection pooling.
- Implemented `KafkaConsumer` with error handling.
- Implemented `RedisIntegration` for heartbeats and error logging.
- Added Docker and PM2 configuration files.
### Documentation Updates
- **Action**: Refined data schema and documentation.
- **Details**:
- **Schema Changes**:
- `room_id`: Changed to `VARCHAR(32)` (String).
- `direction`: Changed to `VARCHAR(10)` (String: "上传"/"下发").
- `cmd_word`: Changed to `VARCHAR(10)` (String: "0x36").
- Added `extra` field (JSONB) for raw communication data.
- Defined Primary Key: 32-bit Unsigned UUID (`guid`).
- Defined Composite Key logic: `ts_ms` (Key1) + `guid` (Key2).
- **Indices**: Marked `hotel_id`, `room_id`, `direction`, `cmd_word` for indexing.
- **Kafka Format**: Created `docs/kafka_format.md` with updated JSON reference.

View File

@@ -0,0 +1,19 @@
# Database Schema Unification
## Context
Refined the database schema and Kafka JSON format to eliminate redundant fields and better support the `0x36` (Report) and `0x0F` (Control) commands.
## Changes
1. **Unified Device Identification**:
* Removed `fault_dev_type`, `fault_dev_addr`, `fault_dev_loop`.
* Consolidated all device identification into `dev_type`, `dev_addr`, and `dev_loop`. These fields are now shared across status reports, fault reports, and control commands.
2. **Field Additions for Control Commands**:
* Added `type_l` (SMALLINT) and `type_h` (SMALLINT) to support `0x0F` control command parameters.
3. **Document Updates**:
* Updated `docs/readme.md`: Reflected the schema changes in the table definition.
* Updated `docs/kafka_format.md`: Updated the JSON schema and mapping rules to align with the unified database fields.
4. **Code Updates**:
* Updated `bls-rcu-action-backend/src/db/databaseManager.js`: Modified the `columns` array to match the new schema.

View File

@@ -0,0 +1,22 @@
# Action Type Field Update
## Context
The `action_type` field was previously defined as a `SMALLINT` with integer mappings (1, 2, 3, 4). The requirement has changed to store explicit string values for better readability and direct mapping.
## Changes
1. **Field Definition**:
* Changed `action_type` from `SMALLINT` to `VARCHAR(20)`.
2. **Enum Values**:
* Old: `1` (ACK), `2` (Control), `4` (Status)
* New:
* `"0FACK"`: ACK (0x0F 上报)
* `"0F下发"`: Control (0x0F 下发)
* `"36上报"`: Status (0x36)
3. **Document Updates**:
* `docs/readme.md`: Updated table definition and dictionary.
* `docs/kafka_format.md`: Updated JSON schema and backend logic examples.
4. **Code Updates**:
* `src/processor/index.js`: Updated `resolveActionType` to return the new string enums.

View File

@@ -0,0 +1,19 @@
# Update Database Primary Key Definition
## Summary
Updated the database schema initialization script to define a composite primary key `(ts_ms, guid)` instead of a single primary key on `guid`, aligning with the project documentation requirements.
## Changes
### 1. Database Schema (`scripts/init_db.sql`)
- **Primary Key**: Changed from `guid` to composite `(ts_ms, guid)`.
- **Reasoning**: `ts_ms` is the partition key or main ordering field (Key1), and `guid` provides uniqueness (Key2). This structure optimizes time-series based queries and data distribution.
- **Indexes**: Removed `idx_rcu_action_ts_ms` as it is now redundant (covered by the primary key index).
## Impact
- **Table Structure**: `rcu_action.rcu_action_events` now enforces uniqueness on the combination of timestamp and GUID.
- **Performance**: Queries filtering by `ts_ms` will utilize the primary key index.
## Verification
- Reviewed SQL syntax in `scripts/init_db.sql`.
- Confirmed alignment with `docs/readme.md` requirements.

View File

@@ -0,0 +1,22 @@
# Add device_id Field
## Context
User requested to add a `device_id` field (string) to the Kafka payload and database table.
## Changes
1. **Documentation**:
- Updated `docs/readme.md` to include `device_id` in the table structure.
- Updated `docs/kafka_format.md` to include `device_id` in the JSON schema and examples.
2. **Database**:
- Updated `bls-rcu-action-backend/scripts/init_db.sql` to add `device_id` column and a corresponding index.
3. **Backend Code**:
- `src/schema/kafkaPayload.js`: Added `device_id` to the Zod validation schema (string, required).
- `src/db/databaseManager.js`: Added `device_id` to the list of columns for insertion.
- `src/processor/index.js`: Updated logic to extract `device_id` from the payload and pass it to the row object.
- `tests/processor.test.js`: Updated test cases to include `device_id` in the mock payload.
## Verification
- Unit tests updated and should pass.
- Schema changes aligned across documentation and code.

View File

@@ -0,0 +1,18 @@
# Remove action_type from Kafka Payload
## Context
User requested to ensure `action_type` is determined solely by the backend logic and is NOT accepted from the Kafka payload.
## Changes
1. **Documentation**:
- Updated `docs/kafka_format.md` to remove `action_type` from the input schema and examples.
2. **Schema Validation**:
- Updated `src/schema/kafkaPayload.js` to remove `action_type` from the Zod schema.
3. **Processor Logic**:
- Confirmed `src/processor/index.js` already independently calculates `action_type` using `resolveActionType` (based on `direction` and `cmd_word`).
- The code does not use any `action_type` from the payload, ensuring strict adherence to the new rule.
## Verification
- Unit tests (`tests/processor.test.js`) pass successfully.
- The system now ignores any `action_type` field if it were to be sent (schema validation would strip it if `strict` mode was on, but default Zod `parse` strips unknown keys unless `passthrough` is used. Wait, Zod default behavior strips unknown keys).
- Actually, Zod by default *strips* unknown keys. So if `action_type` is sent but not in schema, it will be removed. Perfect.

20
openspec/config.yaml Normal file
View File

@@ -0,0 +1,20 @@
schema: spec-driven
# Project context (optional)
# This is shown to AI when creating artifacts.
# Add your tech stack, conventions, style guides, domain knowledge, etc.
# Example:
# context: |
# Tech stack: TypeScript, React, Node.js
# We use conventional commits
# Domain: e-commerce platform
# Per-artifact rules (optional)
# Add custom rules for specific artifacts.
# Example:
# rules:
# proposal:
# - Keep proposals under 500 words
# - Always include a "Non-goals" section
# tasks:
# - Break tasks into chunks of max 2 hours

23
openspec/project.md Normal file
View File

@@ -0,0 +1,23 @@
# BLS RCU Action Server
## Overview
Backend service for processing RCU action events from Kafka, parsing them, and storing them in PostgreSQL. Includes error handling via Redis and heartbeat monitoring.
## Architecture
- **Input**: Kafka Topic (`blwlog4Nodejs-rcu-action-topic` or configured via env)
- **Processing**: Node.js Service
- **Consumer**: `kafka-node` consumer group
- **Parser**: Parses JSON messages, handles UDP raw data decoding
- **Database**: PostgreSQL (Batch insert)
- **Error Handling**: Redis List (`error_queue`) for failed messages + Retry mechanism
- **Output**: PostgreSQL Table (`rcu_action_events`)
## Configuration (Environment Variables)
The project is configured via `.env`. Key variables:
- **Kafka**: `KAFKA_BROKERS`, `KAFKA_TOPIC`, `KAFKA_SASL_USERNAME`, `KAFKA_SASL_PASSWORD`
- **Database**: `DB_HOST`, `DB_PORT`, `DB_USER`, `DB_PASSWORD`, `DB_DATABASE`, `DB_SSL`
- **Redis**: `REDIS_HOST`, `REDIS_PORT`, `REDIS_PASSWORD`
## Development Constraints
- **Schema**: Must strictly follow `docs/readme.md`.
- **Database**: Do not alter Schema Name (`rcu_action`) or Table Name (`rcu_action_events`) unless explicitly requested.