feat: 实现RCU升级后端服务初始版本

- 添加Kafka消费者组件用于消费升级事件数据
- 实现数据处理器进行数据验证和转换
- 添加数据库写入组件支持批量写入G5数据库
- 配置环境变量管理连接参数
- 添加日志记录和错误处理机制
- 实现优雅关闭和流控功能
This commit is contained in:
2026-03-17 19:23:51 +08:00
parent 23ebc6f059
commit a954db752f
21 changed files with 5919 additions and 0 deletions

55
bls-upgrade-backend/.env Normal file
View File

@@ -0,0 +1,55 @@
KAFKA_BROKERS=kafka.blv-oa.com:9092
KAFKA_CLIENT_ID=bls-upgrade-producer
KAFKA_GROUP_ID=bls-upgrade-consumer
KAFKA_TOPICS=blwlog4Nodejs-rcu-upgrade-topic
KAFKA_AUTO_COMMIT=false
KAFKA_AUTO_COMMIT_INTERVAL_MS=5000
KAFKA_SASL_ENABLED=true
KAFKA_SASL_MECHANISM=plain
KAFKA_SASL_USERNAME=blwmomo
KAFKA_SASL_PASSWORD=blwmomo
KAFKA_SSL_ENABLED=false
KAFKA_CONSUMER_INSTANCES=1
KAFKA_MAX_IN_FLIGHT=5000
KAFKA_BATCH_SIZE=1000
KAFKA_BATCH_TIMEOUT_MS=20
KAFKA_COMMIT_INTERVAL_MS=200
KAFKA_COMMIT_ON_ATTEMPT=true
KAFKA_FETCH_MAX_BYTES=10485760
KAFKA_FETCH_MAX_WAIT_MS=100
KAFKA_FETCH_MIN_BYTES=1
#POSTGRES_HOST=10.8.8.109
#POSTGRES_PORT=5433
#POSTGRES_DATABASE=log_platform
#POSTGRES_USER=log_admin
#POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
#POSTGRES_MAX_CONNECTIONS=6
#POSTGRES_IDLE_TIMEOUT_MS=30000
#DB_SCHEMA=onoffline
#DB_TABLE=onoffline_record
# =========================
# PostgreSQL 配置 G5库专用
# =========================
POSTGRES_HOST_G5=10.8.8.80
POSTGRES_PORT_G5=5434
POSTGRES_DATABASE_G5=log_platform
POSTGRES_USER_G5=log_admin
POSTGRES_PASSWORD_G5=H3IkLUt8K!x
POSTGRES_IDLE_TIMEOUT_MS_G5=30000
POSTGRES_MAX_CONNECTIONS_G5=2
DB_SCHEMA_G5=rcu_upgrade
DB_TABLE_G5=rcu_upgrade_events_g5
PORT=3001
LOG_LEVEL=info
# Redis connection
REDIS_HOST=10.8.8.109
REDIS_PORT=6379
REDIS_PASSWORD=
REDIS_DB=15
REDIS_CONNECT_TIMEOUT_MS=5000
REDIS_PROJECT_NAME=bls-onoffline

560
bls-upgrade-backend/dist/index.js vendored Normal file
View File

@@ -0,0 +1,560 @@
import dotenv from "dotenv";
import { Pool } from "pg";
import kafka from "kafka-node";
dotenv.config();
const config = {
port: process.env.PORT || 3001,
logLevel: process.env.LOG_LEVEL || "info",
kafka: {
brokers: process.env.KAFKA_BROKERS || "kafka.blv-oa.com:9092",
clientId: process.env.KAFKA_CLIENT_ID || "bls-upgrade-producer",
groupId: process.env.KAFKA_GROUP_ID || "bls-upgrade-consumer",
testGroupId: process.env.KAFKA_TEST_GROUP_ID || "",
topics: process.env.KAFKA_TOPICS || "blwlog4Nodejs-rcu-upgrade-topic",
fromOffset: process.env.KAFKA_FROM_OFFSET || "latest",
autoCommit: process.env.KAFKA_AUTO_COMMIT === "true",
autoCommitIntervalMs: parseInt(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS || "5000"),
saslEnabled: process.env.KAFKA_SASL_ENABLED === "true",
saslMechanism: process.env.KAFKA_SASL_MECHANISM || "plain",
saslUsername: process.env.KAFKA_SASL_USERNAME || "blwmomo",
saslPassword: process.env.KAFKA_SASL_PASSWORD || "blwmomo",
sslEnabled: process.env.KAFKA_SSL_ENABLED === "true",
consumerInstances: parseInt(process.env.KAFKA_CONSUMER_INSTANCES || "1"),
maxInFlight: parseInt(process.env.KAFKA_MAX_IN_FLIGHT || "5000"),
batchSize: parseInt(process.env.KAFKA_BATCH_SIZE || "1000"),
batchTimeoutMs: parseInt(process.env.KAFKA_BATCH_TIMEOUT_MS || "20"),
commitIntervalMs: parseInt(process.env.KAFKA_COMMIT_INTERVAL_MS || "200"),
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === "true",
fetchMaxBytes: parseInt(process.env.KAFKA_FETCH_MAX_BYTES || "10485760"),
fetchMaxWaitMs: parseInt(process.env.KAFKA_FETCH_MAX_WAIT_MS || "100"),
fetchMinBytes: parseInt(process.env.KAFKA_FETCH_MIN_BYTES || "1")
},
database: {
g5: {
host: process.env.POSTGRES_HOST_G5 || "10.8.8.80",
port: parseInt(process.env.POSTGRES_PORT_G5 || "5434"),
database: process.env.POSTGRES_DATABASE_G5 || "log_platform",
user: process.env.POSTGRES_USER_G5 || "log_admin",
password: process.env.POSTGRES_PASSWORD_G5 || "H3IkLUt8K!x",
idleTimeoutMs: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS_G5 || "30000"),
maxConnections: parseInt(process.env.POSTGRES_MAX_CONNECTIONS_G5 || "2"),
schema: process.env.DB_SCHEMA_G5 || "rcu_upgrade",
table: process.env.DB_TABLE_G5 || "rcu_upgrade_events_g5"
}
},
redis: {
host: process.env.REDIS_HOST || "10.8.8.109",
port: parseInt(process.env.REDIS_PORT || "6379"),
password: process.env.REDIS_PASSWORD || "",
db: parseInt(process.env.REDIS_DB || "15"),
connectTimeoutMs: parseInt(process.env.REDIS_CONNECT_TIMEOUT_MS || "5000"),
projectName: process.env.REDIS_PROJECT_NAME || "bls-onoffline"
},
performance: {
dbWriteIntervalMs: 1e3,
// 限制数据库写入频率为每秒最多1次
batchSize: 1e3
// 批处理大小
}
};
class Logger {
constructor() {
this.logLevel = config.logLevel;
}
log(level, message, data = {}) {
const levels = ["debug", "info", "warn", "error"];
const levelIndex = levels.indexOf(level);
const configLevelIndex = levels.indexOf(this.logLevel);
if (levelIndex >= configLevelIndex) {
const timestamp = (/* @__PURE__ */ new Date()).toISOString();
const logMessage = {
timestamp,
level,
message,
data
};
console.log(JSON.stringify(logMessage));
}
}
debug(message, data = {}) {
this.log("debug", message, data);
}
info(message, data = {}) {
this.log("info", message, data);
}
warn(message, data = {}) {
this.log("warn", message, data);
}
error(message, data = {}) {
this.log("error", message, data);
}
}
const logger = new Logger();
class DatabaseManager {
constructor() {
this.pools = {};
}
init() {
this.pools.g5 = new Pool({
host: config.database.g5.host,
port: config.database.g5.port,
database: config.database.g5.database,
user: config.database.g5.user,
password: config.database.g5.password,
max: config.database.g5.maxConnections,
idleTimeoutMillis: config.database.g5.idleTimeoutMs
});
this.pools.g5.connect((err, client, release) => {
if (err) {
logger.error("Error connecting to G5 database:", { error: err.message });
} else {
logger.info("Successfully connected to G5 database");
release();
}
});
this.pools.g5.on("error", (err) => {
logger.error("Unexpected error on G5 database connection pool:", { error: err.message });
});
}
getPool(dbName) {
return this.pools[dbName];
}
async query(dbName, text, params) {
const pool = this.getPool(dbName);
if (!pool) {
throw new Error(`Database pool ${dbName} not initialized`);
}
const client = await pool.connect();
try {
return await client.query(text, params);
} finally {
client.release();
}
}
async close() {
await Promise.all(
Object.values(this.pools).map((pool) => pool.end())
);
}
}
const databaseManager = new DatabaseManager();
class OffsetTracker {
constructor() {
this.partitions = /* @__PURE__ */ new Map();
}
add(topic, partition, offset) {
const key = `${topic}-${partition}`;
if (!this.partitions.has(key)) {
this.partitions.set(key, { nextCommitOffset: null, done: /* @__PURE__ */ new Set() });
}
const state = this.partitions.get(key);
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return;
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
} else if (numericOffset < state.nextCommitOffset) {
state.nextCommitOffset = numericOffset;
}
}
markDone(topic, partition, offset) {
const key = `${topic}-${partition}`;
const state = this.partitions.get(key);
if (!state) return null;
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return null;
state.done.add(numericOffset);
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
}
let advanced = false;
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
state.done.delete(state.nextCommitOffset);
state.nextCommitOffset += 1;
advanced = true;
}
if (!advanced) return null;
return state.nextCommitOffset;
}
clear() {
this.partitions.clear();
}
}
const { ConsumerGroup } = kafka;
class KafkaConsumer {
constructor() {
this.consumer = null;
this.tracker = new OffsetTracker();
this.pendingCommits = /* @__PURE__ */ new Map();
this.commitTimer = null;
this.inFlight = 0;
this.maxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5e3;
this.commitIntervalMs = Number.isFinite(config.kafka.commitIntervalMs) ? config.kafka.commitIntervalMs : 200;
}
init() {
const kafkaConfig = {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
fromOffset: config.kafka.fromOffset,
protocol: ["roundrobin"],
outOfRangeOffset: "latest",
autoCommit: config.kafka.autoCommit,
autoCommitIntervalMs: config.kafka.autoCommitIntervalMs,
fetchMaxBytes: config.kafka.fetchMaxBytes,
fetchMaxWaitMs: config.kafka.fetchMaxWaitMs,
fetchMinBytes: config.kafka.fetchMinBytes,
sasl: config.kafka.saslEnabled ? {
mechanism: config.kafka.saslMechanism,
username: config.kafka.saslUsername,
password: config.kafka.saslPassword
} : void 0,
ssl: config.kafka.sslEnabled,
connectTimeout: 1e4,
requestTimeout: 1e4
};
logger.info("Initializing Kafka consumer with config:", {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
topics: config.kafka.topics,
fromOffset: config.kafka.fromOffset,
saslEnabled: config.kafka.saslEnabled
});
const topics = config.kafka.topics.split(",").map((topic) => topic.trim()).filter(Boolean);
this.consumer = new ConsumerGroup(kafkaConfig, topics);
this.consumer.on("connect", () => {
logger.info("Kafka consumer connected", {
groupId: config.kafka.groupId,
topics
});
});
this.consumer.on("rebalancing", () => {
logger.info("Kafka consumer rebalancing");
this.tracker.clear();
this.pendingCommits.clear();
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
});
this.consumer.on("rebalanced", () => {
logger.info("Kafka consumer rebalanced");
});
this.consumer.on("message", (message) => {
logger.debug("Received Kafka message:", { messageId: message.offset });
this.inFlight += 1;
this.tracker.add(message.topic, message.partition, message.offset);
if (this.inFlight >= this.maxInFlight && this.consumer.pause) {
this.consumer.pause();
}
Promise.resolve(this.onMessage(message)).then(() => {
if (!config.kafka.autoCommit) {
const commitOffset = this.tracker.markDone(message.topic, message.partition, message.offset);
if (commitOffset !== null) {
const key = `${message.topic}-${message.partition}`;
this.pendingCommits.set(key, {
topic: message.topic,
partition: message.partition,
offset: commitOffset,
metadata: "m"
});
this.scheduleCommitFlush();
}
}
}).catch((err) => {
logger.error("Kafka message handling failed, skip commit", {
error: err.message,
topic: message.topic,
partition: message.partition,
offset: message.offset
});
}).finally(() => {
this.inFlight -= 1;
if (this.inFlight < this.maxInFlight && this.consumer.resume) {
this.consumer.resume();
}
});
});
this.consumer.on("error", (err) => {
logger.error("Kafka consumer error:", { error: err.message, stack: err.stack });
});
this.consumer.on("offsetOutOfRange", (topic) => {
logger.warn("Kafka offset out of range:", { topic: topic.topic, partition: topic.partition });
});
logger.info("Kafka consumer initialized");
this.consumer.on("close", () => {
logger.info("Kafka consumer closed");
});
}
onMessage(message) {
}
scheduleCommitFlush() {
if (this.commitTimer) return;
this.commitTimer = setTimeout(() => {
this.commitTimer = null;
this.flushCommits();
}, this.commitIntervalMs);
}
flushCommits() {
if (!this.consumer || this.pendingCommits.size === 0) return;
const batch = this.pendingCommits;
this.pendingCommits = /* @__PURE__ */ new Map();
this.consumer.sendOffsetCommitRequest(Array.from(batch.values()), (err) => {
if (err) {
for (const [k, v] of batch.entries()) {
this.pendingCommits.set(k, v);
}
logger.error("Failed to commit Kafka offsets", {
error: err.message,
groupId: config.kafka.groupId,
count: batch.size
});
return;
}
logger.info("Kafka offsets committed", {
groupId: config.kafka.groupId,
count: batch.size,
commits: Array.from(batch.values())
});
});
}
close() {
return new Promise((resolve) => {
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
this.flushCommits();
if (!this.consumer) {
resolve();
return;
}
this.consumer.close(true, () => {
logger.info("Kafka consumer closed");
resolve();
});
});
}
}
class DataProcessor {
constructor() {
this.batch = [];
this.lastWriteTime = 0;
this.flushTimer = null;
this.dbRetryDelayMs = 1e3;
this.dbRetryMaxAttempts = 3;
}
async processMessage(message) {
try {
const rawValue = Buffer.isBuffer(message.value) ? message.value.toString("utf8") : String(message.value);
const payload = JSON.parse(rawValue);
const processedData = this.validateAndTransform(payload);
const writeAck = new Promise((resolve, reject) => {
this.batch.push({
data: processedData,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset
},
resolve,
reject
});
});
logger.debug("Message accepted into batch", {
topic: message.topic,
partition: message.partition,
offset: message.offset,
currentBatchSize: this.batch.length
});
this.scheduleFlush();
await this.checkAndWriteBatch();
await writeAck;
} catch (error) {
logger.error("Error processing message:", { error: error.message });
throw error;
}
}
validateAndTransform(data) {
const cleanString = (str) => {
if (str === null || str === void 0) return null;
return String(str).replace(/\0/g, "");
};
const processed = {
ts_ms: typeof data.ts_ms === "number" ? data.ts_ms : parseInt(data.ts_ms) || 0,
hotel_id: this.validateHotelId(data.hotel_id),
room_id: cleanString(data.room_id) || "",
device_id: cleanString(data.device_id) || "",
is_send: parseInt(data.is_send) || 0,
udp_raw: data.udp_raw ? cleanString(Buffer.from(data.udp_raw).toString()) : null,
extra: data.extra ? JSON.stringify(data.extra) : null,
ip: cleanString(data.remote_endpoint) || "",
md5: cleanString(data.md5) || "",
partition: parseInt(data.partition) || null,
file_type: parseInt(data.file_type) || null,
file_path: cleanString(data.file_path) || "",
upgrade_state: parseInt(data.upgrade_state) || null,
app_version: cleanString(data.app_version) || ""
};
return processed;
}
validateHotelId(hotelId) {
const id = parseInt(hotelId);
if (isNaN(id) || id < -32768 || id > 32767) {
return 0;
}
return id;
}
async checkAndWriteBatch() {
const now = Date.now();
const timeSinceLastWrite = now - this.lastWriteTime;
if (this.batch.length >= config.performance.batchSize || timeSinceLastWrite >= config.performance.dbWriteIntervalMs) {
await this.writeBatch();
}
}
scheduleFlush() {
if (this.flushTimer) return;
this.flushTimer = setTimeout(() => {
this.flushTimer = null;
this.writeBatch().catch((error) => {
logger.error("Error in scheduled batch flush:", { error: error.message });
});
}, config.performance.dbWriteIntervalMs);
}
isRetryableDbError(err) {
const code = err == null ? void 0 : err.code;
if (typeof code === "string") {
const retryableCodes = /* @__PURE__ */ new Set([
"ECONNREFUSED",
"ECONNRESET",
"EPIPE",
"ETIMEDOUT",
"ENOTFOUND",
"EHOSTUNREACH",
"ENETUNREACH",
"57P03",
"08006",
"08001",
"08000",
"08003"
]);
if (retryableCodes.has(code)) return true;
}
const message = typeof (err == null ? void 0 : err.message) === "string" ? err.message.toLowerCase() : "";
return message.includes("connection timeout") || message.includes("connection terminated") || message.includes("connection refused") || message.includes("econnrefused") || message.includes("econnreset") || message.includes("etimedout") || message.includes("could not connect") || message.includes("the database system is starting up");
}
async executeQueryWithRetry(query, params) {
let attempt = 0;
while (true) {
try {
return await databaseManager.query("g5", query, params);
} catch (error) {
attempt += 1;
if (!this.isRetryableDbError(error) || attempt > this.dbRetryMaxAttempts) {
throw error;
}
logger.warn("Retrying G5 batch write after transient DB error", {
attempt,
maxAttempts: this.dbRetryMaxAttempts,
error: error.message
});
await new Promise((resolve) => setTimeout(resolve, this.dbRetryDelayMs));
}
}
}
async writeBatch() {
var _a, _b, _c, _d, _e, _f;
if (this.batch.length === 0) return;
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
const batch = [...this.batch];
this.batch = [];
this.lastWriteTime = Date.now();
try {
logger.info("Flushing batch to G5 database", {
batchSize: batch.length,
first: (_a = batch[0]) == null ? void 0 : _a.meta,
last: (_b = batch[batch.length - 1]) == null ? void 0 : _b.meta
});
const values = batch.map((item) => [
item.data.ts_ms,
item.data.hotel_id,
item.data.room_id,
item.data.device_id,
item.data.is_send,
item.data.udp_raw,
item.data.extra,
item.data.ip,
item.data.md5,
item.data.partition,
item.data.file_type,
item.data.file_path,
item.data.upgrade_state,
item.data.app_version
]);
const query = `
INSERT INTO ${config.database.g5.schema}.${config.database.g5.table}
(ts_ms, hotel_id, room_id, device_id, is_send, udp_raw, extra, ip, md5, partition, file_type, file_path, upgrade_state, app_version)
VALUES ${values.map((_, i) => `($${i * 14 + 1}, $${i * 14 + 2}, $${i * 14 + 3}, $${i * 14 + 4}, $${i * 14 + 5}, $${i * 14 + 6}, $${i * 14 + 7}, $${i * 14 + 8}, $${i * 14 + 9}, $${i * 14 + 10}, $${i * 14 + 11}, $${i * 14 + 12}, $${i * 14 + 13}, $${i * 14 + 14})`).join(", ")}
`;
const params = values.flat();
await this.executeQueryWithRetry(query, params);
logger.info("Batch write success", {
batchSize: batch.length,
first: (_c = batch[0]) == null ? void 0 : _c.meta,
last: (_d = batch[batch.length - 1]) == null ? void 0 : _d.meta
});
batch.forEach((item) => item.resolve());
} catch (error) {
logger.error("Error writing batch to database:", {
error: error.message,
batchSize: batch.length,
first: (_e = batch[0]) == null ? void 0 : _e.meta,
last: (_f = batch[batch.length - 1]) == null ? void 0 : _f.meta
});
batch.forEach((item) => item.reject(error));
throw error;
}
}
async flush() {
if (this.batch.length > 0) {
await this.writeBatch();
}
}
}
const dataProcessor = new DataProcessor();
class App {
constructor() {
this.consumer = null;
this.isShuttingDown = false;
}
async init() {
databaseManager.init();
this.consumer = new KafkaConsumer();
this.consumer.onMessage = (message) => dataProcessor.processMessage(message);
this.consumer.init();
process.on("SIGINT", async () => {
await this.shutdown();
});
process.on("SIGTERM", async () => {
await this.shutdown();
});
logger.info(`BLS Upgrade Backend service started on port ${config.port}`);
}
async shutdown() {
if (this.isShuttingDown) return;
this.isShuttingDown = true;
logger.info("Shutting down BLS Upgrade Backend service...");
await dataProcessor.flush();
if (this.consumer) {
await this.consumer.close();
}
await databaseManager.close();
logger.info("Service shutdown completed");
process.exit(0);
}
}
const app = new App();
app.init();

View File

@@ -0,0 +1,22 @@
module.exports = {
apps: [{
name: 'bls-upgrade-backend',
script: 'dist/index.js',
instances: 1,
exec_mode: 'fork',
autorestart: true,
watch: false,
max_memory_restart: '1G',
env_file: '.env',
env: {
NODE_ENV: 'production',
PORT: 3001
},
error_file: './logs/error.log',
out_file: './logs/out.log',
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
merge_logs: true,
kill_timeout: 5000,
time: true
}]
};

View File

@@ -0,0 +1,42 @@
# Agents
## Overview
This document lists the agents involved in the RCU Upgrade Backend project.
## Agents
### 1. System Administrator
- **Responsibilities**: Server setup, network configuration, security
- **Contact**: admin@example.com
### 2. Database Administrator
- **Responsibilities**: Database setup, schema management, performance tuning
- **Contact**: dba@example.com
### 3. Kafka Administrator
- **Responsibilities**: Kafka cluster management, topic configuration
- **Contact**: kafka-admin@example.com
### 4. Developer
- **Responsibilities**: Code implementation, testing, deployment
- **Contact**: developer@example.com
### 5. DevOps Engineer
- **Responsibilities**: CI/CD pipeline, monitoring, deployment automation
- **Contact**: devops@example.com
## Agent Responsibilities Matrix
| Task | System Admin | DBA | Kafka Admin | Developer | DevOps |
|------|-------------|-----|-------------|-----------|--------|
| Server setup | ✅ | | | | |
| Network configuration | ✅ | | | | |
| Database setup | | ✅ | | | |
| Schema management | | ✅ | | | |
| Kafka cluster setup | | | ✅ | | |
| Topic configuration | | | ✅ | | |
| Code implementation | | | | ✅ | |
| Testing | | | | ✅ | |
| CI/CD pipeline | | | | | ✅ |
| Monitoring | | | | | ✅ |
| Deployment automation | | | | | ✅ |

View File

@@ -0,0 +1,50 @@
# Initial Implementation Proposal
## Overview
This proposal outlines the initial implementation of the RCU Upgrade Backend service, which will consume data from Kafka, process it, and write it to the G5 database.
## Background
The service is needed to handle the processing and storage of RCU upgrade events data coming from Kafka, ensuring data integrity and performance.
## Proposed Changes
1. **Project Structure**: Create a complete Node.js project structure with the following components:
- Kafka consumer
- Data processor
- Database writer
- Flow control mechanism
2. **Configuration**: Set up environment variables for:
- Kafka connection
- Database connection
- Performance settings
3. **Data Processing**: Implement:
- Data validation
- hotel_id value range check
- Batch processing
- Flow control
4. **Error Handling**: Implement comprehensive error handling and logging
5. **Testing**: Prepare for unit and integration testing
## Benefits
- Efficient processing of high-volume data
- Data integrity through validation
- Controlled database write frequency
- Comprehensive logging and error handling
## Risks
- Potential performance issues with large batch sizes
- Kafka connection reliability
- Database connection limits
## Mitigation Strategies
- Configurable batch size and write frequency
- Robust error handling and retry mechanisms
- Monitoring and alerting
## Timeline
- Initial implementation: 1 day
- Testing: 1 day
- Deployment: 1 day

View File

@@ -0,0 +1,73 @@
# Initial Implementation Tasks
## Overview
This document outlines the specific tasks required for the initial implementation of the RCU Upgrade Backend service.
## Tasks
### 1. Project Setup
- [x] Create project directory structure
- [x] Set up package.json with dependencies
- [x] Configure environment variables
### 2. Core Components
- [x] Implement Kafka consumer
- [x] Implement data processor
- [x] Implement database writer
- [x] Implement flow control mechanism
### 3. Data Processing
- [x] Implement data validation
- [x] Implement hotel_id value range check
- [x] Implement batch processing
- [x] Implement flow control
### 4. Error Handling and Logging
- [x] Implement error handling
- [x] Implement logging
### 5. Testing
- [ ] Write unit tests
- [ ] Write integration tests
### 6. Deployment
- [ ] Set up build process
- [ ] Create deployment script
## Task Details
### Task 1: Project Setup
- Create directory structure including src, tests, scripts, and openspec
- Set up package.json with required dependencies (kafka-node, pg, dotenv, etc.)
- Configure .env file with connection details
### Task 2: Core Components
- Kafka consumer: Set up connection to Kafka broker and consume messages from blwlog4Nodejs-rcu-upgrade-topic
- Data processor: Validate and transform data according to database schema
- Database writer: Write processed data to G5 database
- Flow control: Limit database write frequency to max 1 time per second
### Task 3: Data Processing
- Data validation: Ensure all fields are present and valid
- hotel_id value range check: Ensure hotel_id is within int2 range (-32768 to 32767), otherwise set to 0
- Batch processing: Process data in batches of 1000 records
- Flow control: Ensure database writes occur at most once per second
### Task 4: Error Handling and Logging
- Error handling: Handle and log errors gracefully
- Logging: Implement structured logging for all operations
### Task 5: Testing
- Unit tests: Test individual components
- Integration tests: Test the entire flow
### Task 6: Deployment
- Build process: Set up Vite build
- Deployment script: Create script for deployment
## Completion Criteria
- All core components are implemented
- Data is correctly processed and written to database
- Flow control is working as expected
- Error handling and logging are in place
- Service can be started and run without errors

View File

@@ -0,0 +1,42 @@
# RCU Upgrade Backend Spec
## Overview
This service is responsible for consuming data from Kafka, processing it, and writing it to the G5 database.
## Architecture
- **Kafka Consumer**: Consumes data from the blwlog4Nodejs-rcu-upgrade-topic topic
- **Data Processor**: Validates and transforms data
- **Database Writer**: Writes processed data to the G5 database
- **Flow Control**: Limits database write frequency to max 1 time per second
## Data Flow
1. Kafka consumer receives messages
2. Messages are parsed and validated
3. Data is transformed to match database schema
4. Data is batched and written to database
## Configuration
All configuration is managed through environment variables in .env file:
- Kafka connection settings
- Database connection settings
- Performance settings
## Data Validation
- hotel_id: Must be within int2 range (-32768 to 32767), otherwise set to 0
- All other fields are validated and default values are provided if missing
## Performance Requirements
- Batch processing: 1000 records per batch
- Database write frequency: max 1 time per second
## Error Handling
- All errors are logged
- Failed batches can be retried
## Monitoring
- Logs are generated for all operations
- Performance metrics can be collected
## Deployment
- Service is deployed as a Node.js application
- Can be run with npm start or as a system service

View File

@@ -0,0 +1,36 @@
# RCU Upgrade Backend Status
## Status: In Progress
## Implementation Progress
### Core Components
- [x] Kafka Consumer: Implemented
- [x] Data Processor: Implemented
- [x] Database Writer: Implemented
- [x] Flow Control: Implemented
### Features
- [x] Data validation
- [x] Batch processing
- [x] Error handling
- [x] Logging
### Configuration
- [x] Environment variables
- [x] Database connection
- [x] Kafka connection
### Testing
- [ ] Unit tests
- [ ] Integration tests
### Deployment
- [ ] Build process
- [ ] Deployment script
## Next Steps
1. Complete unit tests
2. Complete integration tests
3. Finalize deployment process
4. Perform performance testing

4159
bls-upgrade-backend/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,28 @@
{
"name": "bls-upgrade-backend",
"version": "1.0.0",
"type": "module",
"private": true,
"scripts": {
"dev": "node src/index.js",
"dev:test-consumer": "node src/test-consumer.js",
"build": "vite build --ssr src/index.js --outDir dist",
"test": "vitest run",
"lint": "node scripts/lint.js",
"spec:lint": "openspec validate --specs --strict --no-interactive",
"spec:validate": "openspec validate --specs --no-interactive",
"start": "node dist/index.js"
},
"dependencies": {
"dotenv": "^16.4.5",
"kafka-node": "^5.0.0",
"node-cron": "^4.2.1",
"pg": "^8.11.5",
"redis": "^4.6.13",
"zod": "^4.3.6"
},
"devDependencies": {
"vite": "^5.4.0",
"vitest": "^4.0.18"
}
}

View File

@@ -0,0 +1,59 @@
import dotenv from 'dotenv';
dotenv.config();
export const config = {
port: process.env.PORT || 3001,
logLevel: process.env.LOG_LEVEL || 'info',
kafka: {
brokers: process.env.KAFKA_BROKERS || 'kafka.blv-oa.com:9092',
clientId: process.env.KAFKA_CLIENT_ID || 'bls-upgrade-producer',
groupId: process.env.KAFKA_GROUP_ID || 'bls-upgrade-consumer',
testGroupId: process.env.KAFKA_TEST_GROUP_ID || '',
topics: process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-upgrade-topic',
fromOffset: process.env.KAFKA_FROM_OFFSET || 'latest',
autoCommit: process.env.KAFKA_AUTO_COMMIT === 'true',
autoCommitIntervalMs: parseInt(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS || '5000'),
saslEnabled: process.env.KAFKA_SASL_ENABLED === 'true',
saslMechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
saslUsername: process.env.KAFKA_SASL_USERNAME || 'blwmomo',
saslPassword: process.env.KAFKA_SASL_PASSWORD || 'blwmomo',
sslEnabled: process.env.KAFKA_SSL_ENABLED === 'true',
consumerInstances: parseInt(process.env.KAFKA_CONSUMER_INSTANCES || '1'),
maxInFlight: parseInt(process.env.KAFKA_MAX_IN_FLIGHT || '5000'),
batchSize: parseInt(process.env.KAFKA_BATCH_SIZE || '1000'),
batchTimeoutMs: parseInt(process.env.KAFKA_BATCH_TIMEOUT_MS || '20'),
commitIntervalMs: parseInt(process.env.KAFKA_COMMIT_INTERVAL_MS || '200'),
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === 'true',
fetchMaxBytes: parseInt(process.env.KAFKA_FETCH_MAX_BYTES || '10485760'),
fetchMaxWaitMs: parseInt(process.env.KAFKA_FETCH_MAX_WAIT_MS || '100'),
fetchMinBytes: parseInt(process.env.KAFKA_FETCH_MIN_BYTES || '1')
},
database: {
g5: {
host: process.env.POSTGRES_HOST_G5 || '10.8.8.80',
port: parseInt(process.env.POSTGRES_PORT_G5 || '5434'),
database: process.env.POSTGRES_DATABASE_G5 || 'log_platform',
user: process.env.POSTGRES_USER_G5 || 'log_admin',
password: process.env.POSTGRES_PASSWORD_G5 || 'H3IkLUt8K!x',
idleTimeoutMs: parseInt(process.env.POSTGRES_IDLE_TIMEOUT_MS_G5 || '30000'),
maxConnections: parseInt(process.env.POSTGRES_MAX_CONNECTIONS_G5 || '2'),
schema: process.env.DB_SCHEMA_G5 || 'rcu_upgrade',
table: process.env.DB_TABLE_G5 || 'rcu_upgrade_events_g5'
}
},
redis: {
host: process.env.REDIS_HOST || '10.8.8.109',
port: parseInt(process.env.REDIS_PORT || '6379'),
password: process.env.REDIS_PASSWORD || '',
db: parseInt(process.env.REDIS_DB || '15'),
connectTimeoutMs: parseInt(process.env.REDIS_CONNECT_TIMEOUT_MS || '5000'),
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline'
},
performance: {
dbWriteIntervalMs: 1000, // 限制数据库写入频率为每秒最多1次
batchSize: 1000 // 批处理大小
}
};
export default config;

View File

@@ -0,0 +1,64 @@
import { Pool } from 'pg';
import config from '../config/config.js';
import logger from '../utils/logger.js';
class DatabaseManager {
constructor() {
this.pools = {};
}
init() {
// 初始化G5数据库连接池
this.pools.g5 = new Pool({
host: config.database.g5.host,
port: config.database.g5.port,
database: config.database.g5.database,
user: config.database.g5.user,
password: config.database.g5.password,
max: config.database.g5.maxConnections,
idleTimeoutMillis: config.database.g5.idleTimeoutMs
});
// 测试连接
this.pools.g5.connect((err, client, release) => {
if (err) {
logger.error('Error connecting to G5 database:', { error: err.message });
} else {
logger.info('Successfully connected to G5 database');
release();
}
});
// 监听错误
this.pools.g5.on('error', (err) => {
logger.error('Unexpected error on G5 database connection pool:', { error: err.message });
});
}
getPool(dbName) {
return this.pools[dbName];
}
async query(dbName, text, params) {
const pool = this.getPool(dbName);
if (!pool) {
throw new Error(`Database pool ${dbName} not initialized`);
}
const client = await pool.connect();
try {
return await client.query(text, params);
} finally {
client.release();
}
}
async close() {
await Promise.all(
Object.values(this.pools).map(pool => pool.end())
);
}
}
const databaseManager = new DatabaseManager();
export default databaseManager;

View File

@@ -0,0 +1,58 @@
import config from './config/config.js';
import databaseManager from './db/databaseManager.js';
import KafkaConsumer from './kafka/consumer.js';
import dataProcessor from './processor/index.js';
import logger from './utils/logger.js';
class App {
constructor() {
this.consumer = null;
this.isShuttingDown = false;
}
async init() {
// 初始化数据库连接
databaseManager.init();
// 初始化Kafka消费者
this.consumer = new KafkaConsumer();
this.consumer.onMessage = (message) => dataProcessor.processMessage(message);
this.consumer.init();
// 监听进程终止信号
process.on('SIGINT', async () => {
await this.shutdown();
});
process.on('SIGTERM', async () => {
await this.shutdown();
});
logger.info(`BLS Upgrade Backend service started on port ${config.port}`);
}
async shutdown() {
if (this.isShuttingDown) return;
this.isShuttingDown = true;
logger.info('Shutting down BLS Upgrade Backend service...');
// 确保所有数据都被写入
await dataProcessor.flush();
// 关闭Kafka消费者
if (this.consumer) {
await this.consumer.close();
}
// 关闭数据库连接
await databaseManager.close();
logger.info('Service shutdown completed');
process.exit(0);
}
}
// 启动应用
const app = new App();
app.init();

View File

@@ -0,0 +1,191 @@
import kafka from 'kafka-node';
import config from '../config/config.js';
import logger from '../utils/logger.js';
import { OffsetTracker } from './offsetTracker.js';
const { ConsumerGroup } = kafka;
class KafkaConsumer {
constructor() {
this.consumer = null;
this.tracker = new OffsetTracker();
this.pendingCommits = new Map();
this.commitTimer = null;
this.inFlight = 0;
this.maxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5000;
this.commitIntervalMs = Number.isFinite(config.kafka.commitIntervalMs) ? config.kafka.commitIntervalMs : 200;
}
init() {
const kafkaConfig = {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
fromOffset: config.kafka.fromOffset,
protocol: ['roundrobin'],
outOfRangeOffset: 'latest',
autoCommit: config.kafka.autoCommit,
autoCommitIntervalMs: config.kafka.autoCommitIntervalMs,
fetchMaxBytes: config.kafka.fetchMaxBytes,
fetchMaxWaitMs: config.kafka.fetchMaxWaitMs,
fetchMinBytes: config.kafka.fetchMinBytes,
sasl: config.kafka.saslEnabled ? {
mechanism: config.kafka.saslMechanism,
username: config.kafka.saslUsername,
password: config.kafka.saslPassword
} : undefined,
ssl: config.kafka.sslEnabled,
connectTimeout: 10000,
requestTimeout: 10000
};
logger.info('Initializing Kafka consumer with config:', {
kafkaHost: config.kafka.brokers,
clientId: config.kafka.clientId,
groupId: config.kafka.groupId,
topics: config.kafka.topics,
fromOffset: config.kafka.fromOffset,
saslEnabled: config.kafka.saslEnabled
});
const topics = config.kafka.topics.split(',').map(topic => topic.trim()).filter(Boolean);
this.consumer = new ConsumerGroup(kafkaConfig, topics);
this.consumer.on('connect', () => {
logger.info('Kafka consumer connected', {
groupId: config.kafka.groupId,
topics
});
});
this.consumer.on('rebalancing', () => {
logger.info('Kafka consumer rebalancing');
this.tracker.clear();
this.pendingCommits.clear();
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
});
this.consumer.on('rebalanced', () => {
logger.info('Kafka consumer rebalanced');
});
this.consumer.on('message', (message) => {
logger.debug('Received Kafka message:', { messageId: message.offset });
this.inFlight += 1;
this.tracker.add(message.topic, message.partition, message.offset);
if (this.inFlight >= this.maxInFlight && this.consumer.pause) {
this.consumer.pause();
}
Promise.resolve(this.onMessage(message))
.then(() => {
if (!config.kafka.autoCommit) {
const commitOffset = this.tracker.markDone(message.topic, message.partition, message.offset);
if (commitOffset !== null) {
const key = `${message.topic}-${message.partition}`;
this.pendingCommits.set(key, {
topic: message.topic,
partition: message.partition,
offset: commitOffset,
metadata: 'm'
});
this.scheduleCommitFlush();
}
}
})
.catch((err) => {
logger.error('Kafka message handling failed, skip commit', {
error: err.message,
topic: message.topic,
partition: message.partition,
offset: message.offset
});
})
.finally(() => {
this.inFlight -= 1;
if (this.inFlight < this.maxInFlight && this.consumer.resume) {
this.consumer.resume();
}
});
});
this.consumer.on('error', (err) => {
logger.error('Kafka consumer error:', { error: err.message, stack: err.stack });
});
this.consumer.on('offsetOutOfRange', (topic) => {
logger.warn('Kafka offset out of range:', { topic: topic.topic, partition: topic.partition });
});
logger.info('Kafka consumer initialized');
this.consumer.on('close', () => {
logger.info('Kafka consumer closed');
});
}
onMessage(message) {
// 子类实现
}
scheduleCommitFlush() {
if (this.commitTimer) return;
this.commitTimer = setTimeout(() => {
this.commitTimer = null;
this.flushCommits();
}, this.commitIntervalMs);
}
flushCommits() {
if (!this.consumer || this.pendingCommits.size === 0) return;
const batch = this.pendingCommits;
this.pendingCommits = new Map();
this.consumer.sendOffsetCommitRequest(Array.from(batch.values()), (err) => {
if (err) {
for (const [k, v] of batch.entries()) {
this.pendingCommits.set(k, v);
}
logger.error('Failed to commit Kafka offsets', {
error: err.message,
groupId: config.kafka.groupId,
count: batch.size
});
return;
}
logger.info('Kafka offsets committed', {
groupId: config.kafka.groupId,
count: batch.size,
commits: Array.from(batch.values())
});
});
}
close() {
return new Promise((resolve) => {
if (this.commitTimer) {
clearTimeout(this.commitTimer);
this.commitTimer = null;
}
this.flushCommits();
if (!this.consumer) {
resolve();
return;
}
this.consumer.close(true, () => {
logger.info('Kafka consumer closed');
resolve();
});
});
}
}
export default KafkaConsumer;

View File

@@ -0,0 +1,53 @@
class OffsetTracker {
constructor() {
this.partitions = new Map();
}
add(topic, partition, offset) {
const key = `${topic}-${partition}`;
if (!this.partitions.has(key)) {
this.partitions.set(key, { nextCommitOffset: null, done: new Set() });
}
const state = this.partitions.get(key);
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return;
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
} else if (numericOffset < state.nextCommitOffset) {
state.nextCommitOffset = numericOffset;
}
}
markDone(topic, partition, offset) {
const key = `${topic}-${partition}`;
const state = this.partitions.get(key);
if (!state) return null;
const numericOffset = Number(offset);
if (!Number.isFinite(numericOffset)) return null;
state.done.add(numericOffset);
if (state.nextCommitOffset === null) {
state.nextCommitOffset = numericOffset;
}
let advanced = false;
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
state.done.delete(state.nextCommitOffset);
state.nextCommitOffset += 1;
advanced = true;
}
if (!advanced) return null;
return state.nextCommitOffset;
}
clear() {
this.partitions.clear();
}
}
export { OffsetTracker };

View File

@@ -0,0 +1,234 @@
import config from '../config/config.js';
import databaseManager from '../db/databaseManager.js';
import logger from '../utils/logger.js';
class DataProcessor {
constructor() {
this.batch = [];
this.lastWriteTime = 0;
this.flushTimer = null;
this.dbRetryDelayMs = 1000;
this.dbRetryMaxAttempts = 3;
}
async processMessage(message) {
try {
const rawValue = Buffer.isBuffer(message.value)
? message.value.toString('utf8')
: String(message.value);
const payload = JSON.parse(rawValue);
const processedData = this.validateAndTransform(payload);
const writeAck = new Promise((resolve, reject) => {
this.batch.push({
data: processedData,
meta: {
topic: message.topic,
partition: message.partition,
offset: message.offset
},
resolve,
reject
});
});
logger.debug('Message accepted into batch', {
topic: message.topic,
partition: message.partition,
offset: message.offset,
currentBatchSize: this.batch.length
});
this.scheduleFlush();
// 检查是否需要立即写入数据库
await this.checkAndWriteBatch();
await writeAck;
} catch (error) {
logger.error('Error processing message:', { error: error.message });
throw error;
}
}
validateAndTransform(data) {
const cleanString = (str) => {
if (str === null || str === undefined) return null;
return String(str).replace(/\0/g, '');
};
const processed = {
ts_ms: typeof data.ts_ms === 'number' ? data.ts_ms : (parseInt(data.ts_ms) || 0),
hotel_id: this.validateHotelId(data.hotel_id),
room_id: cleanString(data.room_id) || '',
device_id: cleanString(data.device_id) || '',
is_send: parseInt(data.is_send) || 0,
udp_raw: data.udp_raw ? cleanString(Buffer.from(data.udp_raw).toString()) : null,
extra: data.extra ? JSON.stringify(data.extra) : null,
ip: cleanString(data.remote_endpoint) || '',
md5: cleanString(data.md5) || '',
partition: parseInt(data.partition) || null,
file_type: parseInt(data.file_type) || null,
file_path: cleanString(data.file_path) || '',
upgrade_state: parseInt(data.upgrade_state) || null,
app_version: cleanString(data.app_version) || ''
};
return processed;
}
validateHotelId(hotelId) {
const id = parseInt(hotelId);
// 检查是否在int2范围内 (-32768 到 32767)
if (isNaN(id) || id < -32768 || id > 32767) {
return 0;
}
return id;
}
async checkAndWriteBatch() {
const now = Date.now();
const timeSinceLastWrite = now - this.lastWriteTime;
// 检查是否达到批处理大小或时间间隔
if (this.batch.length >= config.performance.batchSize || timeSinceLastWrite >= config.performance.dbWriteIntervalMs) {
await this.writeBatch();
}
}
scheduleFlush() {
if (this.flushTimer) return;
this.flushTimer = setTimeout(() => {
this.flushTimer = null;
this.writeBatch().catch((error) => {
logger.error('Error in scheduled batch flush:', { error: error.message });
});
}, config.performance.dbWriteIntervalMs);
}
isRetryableDbError(err) {
const code = err?.code;
if (typeof code === 'string') {
const retryableCodes = new Set([
'ECONNREFUSED',
'ECONNRESET',
'EPIPE',
'ETIMEDOUT',
'ENOTFOUND',
'EHOSTUNREACH',
'ENETUNREACH',
'57P03',
'08006',
'08001',
'08000',
'08003'
]);
if (retryableCodes.has(code)) return true;
}
const message = typeof err?.message === 'string' ? err.message.toLowerCase() : '';
return (
message.includes('connection timeout') ||
message.includes('connection terminated') ||
message.includes('connection refused') ||
message.includes('econnrefused') ||
message.includes('econnreset') ||
message.includes('etimedout') ||
message.includes('could not connect') ||
message.includes('the database system is starting up')
);
}
async executeQueryWithRetry(query, params) {
let attempt = 0;
while (true) {
try {
return await databaseManager.query('g5', query, params);
} catch (error) {
attempt += 1;
if (!this.isRetryableDbError(error) || attempt > this.dbRetryMaxAttempts) {
throw error;
}
logger.warn('Retrying G5 batch write after transient DB error', {
attempt,
maxAttempts: this.dbRetryMaxAttempts,
error: error.message
});
await new Promise(resolve => setTimeout(resolve, this.dbRetryDelayMs));
}
}
}
async writeBatch() {
if (this.batch.length === 0) return;
if (this.flushTimer) {
clearTimeout(this.flushTimer);
this.flushTimer = null;
}
const batch = [...this.batch];
this.batch = [];
this.lastWriteTime = Date.now();
try {
logger.info('Flushing batch to G5 database', {
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
// 构建批量插入语句
const values = batch.map(item => [
item.data.ts_ms,
item.data.hotel_id,
item.data.room_id,
item.data.device_id,
item.data.is_send,
item.data.udp_raw,
item.data.extra,
item.data.ip,
item.data.md5,
item.data.partition,
item.data.file_type,
item.data.file_path,
item.data.upgrade_state,
item.data.app_version
]);
const query = `
INSERT INTO ${config.database.g5.schema}.${config.database.g5.table}
(ts_ms, hotel_id, room_id, device_id, is_send, udp_raw, extra, ip, md5, partition, file_type, file_path, upgrade_state, app_version)
VALUES ${values.map((_, i) => `($${i * 14 + 1}, $${i * 14 + 2}, $${i * 14 + 3}, $${i * 14 + 4}, $${i * 14 + 5}, $${i * 14 + 6}, $${i * 14 + 7}, $${i * 14 + 8}, $${i * 14 + 9}, $${i * 14 + 10}, $${i * 14 + 11}, $${i * 14 + 12}, $${i * 14 + 13}, $${i * 14 + 14})`).join(', ')}
`;
// 扁平化values数组
const params = values.flat();
await this.executeQueryWithRetry(query, params);
logger.info('Batch write success', {
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
batch.forEach(item => item.resolve());
} catch (error) {
logger.error('Error writing batch to database:', {
error: error.message,
batchSize: batch.length,
first: batch[0]?.meta,
last: batch[batch.length - 1]?.meta
});
batch.forEach(item => item.reject(error));
throw error;
}
}
async flush() {
// 确保所有数据都被写入
if (this.batch.length > 0) {
await this.writeBatch();
}
}
}
const dataProcessor = new DataProcessor();
export default dataProcessor;

View File

@@ -0,0 +1,13 @@
import dotenv from 'dotenv';
dotenv.config();
const baseGroupId = process.env.KAFKA_GROUP_ID || 'bls-upgrade-consumer';
const testGroupId = process.env.KAFKA_TEST_GROUP_ID || `${baseGroupId}-test-${Date.now()}`;
process.env.KAFKA_GROUP_ID = testGroupId;
process.env.KAFKA_FROM_OFFSET = process.env.KAFKA_FROM_OFFSET || 'earliest';
console.log(`[test-consumer] groupId=${process.env.KAFKA_GROUP_ID}, fromOffset=${process.env.KAFKA_FROM_OFFSET}`);
await import('./index.js');

View File

@@ -0,0 +1,44 @@
import config from '../config/config.js';
class Logger {
constructor() {
this.logLevel = config.logLevel;
}
log(level, message, data = {}) {
const levels = ['debug', 'info', 'warn', 'error'];
const levelIndex = levels.indexOf(level);
const configLevelIndex = levels.indexOf(this.logLevel);
if (levelIndex >= configLevelIndex) {
const timestamp = new Date().toISOString();
const logMessage = {
timestamp,
level,
message,
data
};
console.log(JSON.stringify(logMessage));
}
}
debug(message, data = {}) {
this.log('debug', message, data);
}
info(message, data = {}) {
this.log('info', message, data);
}
warn(message, data = {}) {
this.log('warn', message, data);
}
error(message, data = {}) {
this.log('error', message, data);
}
}
const logger = new Logger();
export default logger;