feat: 添加 Kafka 消费者和消息处理功能
- 新增 Kafka 消费者实现,支持消息处理和错误处理。 - 实现 OffsetTracker 类,用于跟踪消息偏移量。 - 新增消息解析和数据库插入逻辑,支持从 Kafka 消息构建数据库行。 - 实现 UDP 数据包解析功能,支持不同类型的 UDP 消息。 - 新增 Redis 错误队列处理,支持错误重试机制。 - 实现 Redis 客户端和集成类,支持日志记录和心跳机制。 - 添加 Zod 验证模式,确保 Kafka 消息有效性。 - 新增日志记录和指标收集工具,支持系统监控。 - 添加 UUID 生成工具,支持唯一标识符生成。 - 编写处理器逻辑的单元测试,确保功能正确性。 - 配置 Vite 构建工具,支持 Node.js 环境下的构建。
This commit is contained in:
1
.gitignore
vendored
Normal file
1
.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
/bls-register-backend/node_modules
|
||||||
51
bls-register-backend/.env
Normal file
51
bls-register-backend/.env
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
KAFKA_BROKERS=kafka.blv-oa.com:9092
|
||||||
|
KAFKA_CLIENT_ID=bls-register-producer
|
||||||
|
KAFKA_GROUP_ID=bls-register-consumer
|
||||||
|
KAFKA_TOPICS=blwlog4Nodejs-rcu-register-topic
|
||||||
|
KAFKA_AUTO_COMMIT=false
|
||||||
|
KAFKA_AUTO_COMMIT_INTERVAL_MS=5000
|
||||||
|
KAFKA_SASL_ENABLED=true
|
||||||
|
KAFKA_SASL_MECHANISM=plain
|
||||||
|
KAFKA_SASL_USERNAME=blwmomo
|
||||||
|
KAFKA_SASL_PASSWORD=blwmomo
|
||||||
|
KAFKA_SSL_ENABLED=false
|
||||||
|
KAFKA_CONSUMER_INSTANCES=3
|
||||||
|
KAFKA_MAX_IN_FLIGHT=5000
|
||||||
|
KAFKA_BATCH_SIZE=1000
|
||||||
|
KAFKA_BATCH_TIMEOUT_MS=20
|
||||||
|
KAFKA_COMMIT_INTERVAL_MS=200
|
||||||
|
KAFKA_COMMIT_ON_ATTEMPT=true
|
||||||
|
KAFKA_FETCH_MAX_BYTES=10485760
|
||||||
|
KAFKA_FETCH_MAX_WAIT_MS=100
|
||||||
|
KAFKA_FETCH_MIN_BYTES=1
|
||||||
|
|
||||||
|
POSTGRES_HOST=10.8.8.109
|
||||||
|
POSTGRES_PORT=5433
|
||||||
|
POSTGRES_DATABASE=log_platform
|
||||||
|
POSTGRES_USER=log_admin
|
||||||
|
POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
|
||||||
|
POSTGRES_MAX_CONNECTIONS=6
|
||||||
|
POSTGRES_IDLE_TIMEOUT_MS=30000
|
||||||
|
DB_SCHEMA=rcu_info
|
||||||
|
DB_TABLE=rcu_info_events_g5
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# PostgreSQL 配置 G5库专用
|
||||||
|
# =========================
|
||||||
|
POSTGRES_HOST_G5=10.8.8.80
|
||||||
|
POSTGRES_PORT_G5=5434
|
||||||
|
POSTGRES_DATABASE_G5=log_platform
|
||||||
|
POSTGRES_USER_G5=log_admin
|
||||||
|
POSTGRES_PASSWORD_G5=H3IkLUt8K!x
|
||||||
|
POSTGRES_IDLE_TIMEOUT_MS_G5=30000
|
||||||
|
|
||||||
|
PORT=3001
|
||||||
|
LOG_LEVEL=info
|
||||||
|
|
||||||
|
# Redis connection
|
||||||
|
REDIS_HOST=10.8.8.109
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=
|
||||||
|
REDIS_DB=15
|
||||||
|
REDIS_CONNECT_TIMEOUT_MS=5000
|
||||||
|
REDIS_PROJECT_NAME=bls-onoffline
|
||||||
31
bls-register-backend/.env.example
Normal file
31
bls-register-backend/.env.example
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Server Configuration
|
||||||
|
PORT=3001
|
||||||
|
NODE_ENV=development
|
||||||
|
|
||||||
|
# Kafka Configuration
|
||||||
|
KAFKA_BROKERS=localhost:9092
|
||||||
|
KAFKA_TOPIC=blwlog4Nodejs-rcu-onoffline-topic
|
||||||
|
KAFKA_GROUP_ID=bls-onoffline-group
|
||||||
|
KAFKA_CLIENT_ID=bls-onoffline-client
|
||||||
|
KAFKA_CONSUMER_INSTANCES=1
|
||||||
|
# KAFKA_SASL_USERNAME=
|
||||||
|
# KAFKA_SASL_PASSWORD=
|
||||||
|
# KAFKA_SASL_MECHANISM=plain
|
||||||
|
|
||||||
|
# Database Configuration (PostgreSQL)
|
||||||
|
DB_HOST=localhost
|
||||||
|
DB_PORT=5432
|
||||||
|
DB_USER=postgres
|
||||||
|
DB_PASSWORD=password
|
||||||
|
DB_DATABASE=log_platform
|
||||||
|
DB_SCHEMA=public
|
||||||
|
DB_TABLE=onoffline_record
|
||||||
|
DB_MAX_CONNECTIONS=10
|
||||||
|
|
||||||
|
# Redis Configuration
|
||||||
|
REDIS_HOST=localhost
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=
|
||||||
|
REDIS_DB=0
|
||||||
|
REDIS_PROJECT_NAME=bls-onoffline
|
||||||
|
REDIS_API_BASE_URL=http://localhost:3001
|
||||||
18
bls-register-backend/AGENTS.md
Normal file
18
bls-register-backend/AGENTS.md
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!-- OPENSPEC:START -->
|
||||||
|
# OpenSpec Instructions
|
||||||
|
|
||||||
|
These instructions are for AI assistants working in this project.
|
||||||
|
|
||||||
|
Always open `@/openspec/AGENTS.md` when the request:
|
||||||
|
- Mentions planning or proposals (words like proposal, spec, change, plan)
|
||||||
|
- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work
|
||||||
|
- Sounds ambiguous and you need the authoritative spec before coding
|
||||||
|
|
||||||
|
Use `@/openspec/AGENTS.md` to learn:
|
||||||
|
- How to create and apply change proposals
|
||||||
|
- Spec format and conventions
|
||||||
|
- Project structure and guidelines
|
||||||
|
|
||||||
|
Keep this managed block so 'openspec update' can refresh the instructions.
|
||||||
|
|
||||||
|
<!-- OPENSPEC:END -->
|
||||||
26
bls-register-backend/README.md
Normal file
26
bls-register-backend/README.md
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
bls-register-backend
|
||||||
|
|
||||||
|
项目功能
|
||||||
|
- 从 Kafka 主题 blwlog4Nodejs-rcu-register-topic 消费 Register 数据。
|
||||||
|
- 对字段做类型转换和值域保护(hotel_id 超出 int2 时写 0)。
|
||||||
|
- 每 3 秒执行一次批量写库。
|
||||||
|
- 双写 G5 库:
|
||||||
|
- rcu_info.rcu_info_events_g5 全量事件入库。
|
||||||
|
- room_status.room_status_moment_g5 仅更新 app_version、launcher_version、config_version、upgrade_ts_ms、register_ts_ms。
|
||||||
|
|
||||||
|
安装与运行
|
||||||
|
- Node.js 22+
|
||||||
|
- npm install
|
||||||
|
- npm run dev
|
||||||
|
|
||||||
|
构建与测试
|
||||||
|
- npm run build
|
||||||
|
- npm run test
|
||||||
|
- npm run lint
|
||||||
|
|
||||||
|
OpenSpec
|
||||||
|
- npm run spec:lint
|
||||||
|
- npm run spec:validate
|
||||||
|
|
||||||
|
环境变量
|
||||||
|
- 使用现有 .env(当前仓库已配置可用)。
|
||||||
1017
bls-register-backend/dist/index.js
vendored
Normal file
1017
bls-register-backend/dist/index.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
bls-register-backend/ecosystem.config.cjs
Normal file
22
bls-register-backend/ecosystem.config.cjs
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
module.exports = {
|
||||||
|
apps: [{
|
||||||
|
name: 'bls-register',
|
||||||
|
script: 'dist/index.js',
|
||||||
|
instances: 1,
|
||||||
|
exec_mode: 'fork',
|
||||||
|
autorestart: true,
|
||||||
|
watch: false,
|
||||||
|
max_memory_restart: '1G',
|
||||||
|
env_file: '.env',
|
||||||
|
env: {
|
||||||
|
NODE_ENV: 'production',
|
||||||
|
PORT: 3001
|
||||||
|
},
|
||||||
|
error_file: './logs/error.log',
|
||||||
|
out_file: './logs/out.log',
|
||||||
|
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
|
||||||
|
merge_logs: true,
|
||||||
|
kill_timeout: 5000,
|
||||||
|
time: true
|
||||||
|
}]
|
||||||
|
};
|
||||||
456
bls-register-backend/openspec/AGENTS.md
Normal file
456
bls-register-backend/openspec/AGENTS.md
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
# OpenSpec Instructions
|
||||||
|
|
||||||
|
Instructions for AI coding assistants using OpenSpec for spec-driven development.
|
||||||
|
|
||||||
|
## TL;DR Quick Checklist
|
||||||
|
|
||||||
|
- Search existing work: `openspec spec list --long`, `openspec list` (use `rg` only for full-text search)
|
||||||
|
- Decide scope: new capability vs modify existing capability
|
||||||
|
- Pick a unique `change-id`: kebab-case, verb-led (`add-`, `update-`, `remove-`, `refactor-`)
|
||||||
|
- Scaffold: `proposal.md`, `tasks.md`, `design.md` (only if needed), and delta specs per affected capability
|
||||||
|
- Write deltas: use `## ADDED|MODIFIED|REMOVED|RENAMED Requirements`; include at least one `#### Scenario:` per requirement
|
||||||
|
- Validate: `openspec validate [change-id] --strict` and fix issues
|
||||||
|
- Request approval: Do not start implementation until proposal is approved
|
||||||
|
|
||||||
|
## Three-Stage Workflow
|
||||||
|
|
||||||
|
### Stage 1: Creating Changes
|
||||||
|
Create proposal when you need to:
|
||||||
|
- Add features or functionality
|
||||||
|
- Make breaking changes (API, schema)
|
||||||
|
- Change architecture or patterns
|
||||||
|
- Optimize performance (changes behavior)
|
||||||
|
- Update security patterns
|
||||||
|
|
||||||
|
Triggers (examples):
|
||||||
|
- "Help me create a change proposal"
|
||||||
|
- "Help me plan a change"
|
||||||
|
- "Help me create a proposal"
|
||||||
|
- "I want to create a spec proposal"
|
||||||
|
- "I want to create a spec"
|
||||||
|
|
||||||
|
Loose matching guidance:
|
||||||
|
- Contains one of: `proposal`, `change`, `spec`
|
||||||
|
- With one of: `create`, `plan`, `make`, `start`, `help`
|
||||||
|
|
||||||
|
Skip proposal for:
|
||||||
|
- Bug fixes (restore intended behavior)
|
||||||
|
- Typos, formatting, comments
|
||||||
|
- Dependency updates (non-breaking)
|
||||||
|
- Configuration changes
|
||||||
|
- Tests for existing behavior
|
||||||
|
|
||||||
|
**Workflow**
|
||||||
|
1. Review `openspec/project.md`, `openspec list`, and `openspec list --specs` to understand current context.
|
||||||
|
2. Choose a unique verb-led `change-id` and scaffold `proposal.md`, `tasks.md`, optional `design.md`, and spec deltas under `openspec/changes/<id>/`.
|
||||||
|
3. Draft spec deltas using `## ADDED|MODIFIED|REMOVED Requirements` with at least one `#### Scenario:` per requirement.
|
||||||
|
4. Run `openspec validate <id> --strict` and resolve any issues before sharing the proposal.
|
||||||
|
|
||||||
|
### Stage 2: Implementing Changes
|
||||||
|
Track these steps as TODOs and complete them one by one.
|
||||||
|
1. **Read proposal.md** - Understand what's being built
|
||||||
|
2. **Read design.md** (if exists) - Review technical decisions
|
||||||
|
3. **Read tasks.md** - Get implementation checklist
|
||||||
|
4. **Implement tasks sequentially** - Complete in order
|
||||||
|
5. **Confirm completion** - Ensure every item in `tasks.md` is finished before updating statuses
|
||||||
|
6. **Update checklist** - After all work is done, set every task to `- [x]` so the list reflects reality
|
||||||
|
7. **Approval gate** - Do not start implementation until the proposal is reviewed and approved
|
||||||
|
|
||||||
|
### Stage 3: Archiving Changes
|
||||||
|
After deployment, create separate PR to:
|
||||||
|
- Move `changes/[name]/` → `changes/archive/YYYY-MM-DD-[name]/`
|
||||||
|
- Update `specs/` if capabilities changed
|
||||||
|
- Use `openspec archive <change-id> --skip-specs --yes` for tooling-only changes (always pass the change ID explicitly)
|
||||||
|
- Run `openspec validate --strict` to confirm the archived change passes checks
|
||||||
|
|
||||||
|
## Before Any Task
|
||||||
|
|
||||||
|
**Context Checklist:**
|
||||||
|
- [ ] Read relevant specs in `specs/[capability]/spec.md`
|
||||||
|
- [ ] Check pending changes in `changes/` for conflicts
|
||||||
|
- [ ] Read `openspec/project.md` for conventions
|
||||||
|
- [ ] Run `openspec list` to see active changes
|
||||||
|
- [ ] Run `openspec list --specs` to see existing capabilities
|
||||||
|
|
||||||
|
**Before Creating Specs:**
|
||||||
|
- Always check if capability already exists
|
||||||
|
- Prefer modifying existing specs over creating duplicates
|
||||||
|
- Use `openspec show [spec]` to review current state
|
||||||
|
- If request is ambiguous, ask 1–2 clarifying questions before scaffolding
|
||||||
|
|
||||||
|
### Search Guidance
|
||||||
|
- Enumerate specs: `openspec spec list --long` (or `--json` for scripts)
|
||||||
|
- Enumerate changes: `openspec list` (or `openspec change list --json` - deprecated but available)
|
||||||
|
- Show details:
|
||||||
|
- Spec: `openspec show <spec-id> --type spec` (use `--json` for filters)
|
||||||
|
- Change: `openspec show <change-id> --json --deltas-only`
|
||||||
|
- Full-text search (use ripgrep): `rg -n "Requirement:|Scenario:" openspec/specs`
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### CLI Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Essential commands
|
||||||
|
openspec list # List active changes
|
||||||
|
openspec list --specs # List specifications
|
||||||
|
openspec show [item] # Display change or spec
|
||||||
|
openspec validate [item] # Validate changes or specs
|
||||||
|
openspec archive <change-id> [--yes|-y] # Archive after deployment (add --yes for non-interactive runs)
|
||||||
|
|
||||||
|
# Project management
|
||||||
|
openspec init [path] # Initialize OpenSpec
|
||||||
|
openspec update [path] # Update instruction files
|
||||||
|
|
||||||
|
# Interactive mode
|
||||||
|
openspec show # Prompts for selection
|
||||||
|
openspec validate # Bulk validation mode
|
||||||
|
|
||||||
|
# Debugging
|
||||||
|
openspec show [change] --json --deltas-only
|
||||||
|
openspec validate [change] --strict
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Flags
|
||||||
|
|
||||||
|
- `--json` - Machine-readable output
|
||||||
|
- `--type change|spec` - Disambiguate items
|
||||||
|
- `--strict` - Comprehensive validation
|
||||||
|
- `--no-interactive` - Disable prompts
|
||||||
|
- `--skip-specs` - Archive without spec updates
|
||||||
|
- `--yes`/`-y` - Skip confirmation prompts (non-interactive archive)
|
||||||
|
|
||||||
|
## Directory Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
openspec/
|
||||||
|
├── project.md # Project conventions
|
||||||
|
├── specs/ # Current truth - what IS built
|
||||||
|
│ └── [capability]/ # Single focused capability
|
||||||
|
│ ├── spec.md # Requirements and scenarios
|
||||||
|
│ └── design.md # Technical patterns
|
||||||
|
├── changes/ # Proposals - what SHOULD change
|
||||||
|
│ ├── [change-name]/
|
||||||
|
│ │ ├── proposal.md # Why, what, impact
|
||||||
|
│ │ ├── tasks.md # Implementation checklist
|
||||||
|
│ │ ├── design.md # Technical decisions (optional; see criteria)
|
||||||
|
│ │ └── specs/ # Delta changes
|
||||||
|
│ │ └── [capability]/
|
||||||
|
│ │ └── spec.md # ADDED/MODIFIED/REMOVED
|
||||||
|
│ └── archive/ # Completed changes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating Change Proposals
|
||||||
|
|
||||||
|
### Decision Tree
|
||||||
|
|
||||||
|
```
|
||||||
|
New request?
|
||||||
|
├─ Bug fix restoring spec behavior? → Fix directly
|
||||||
|
├─ Typo/format/comment? → Fix directly
|
||||||
|
├─ New feature/capability? → Create proposal
|
||||||
|
├─ Breaking change? → Create proposal
|
||||||
|
├─ Architecture change? → Create proposal
|
||||||
|
└─ Unclear? → Create proposal (safer)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Proposal Structure
|
||||||
|
|
||||||
|
1. **Create directory:** `changes/[change-id]/` (kebab-case, verb-led, unique)
|
||||||
|
|
||||||
|
2. **Write proposal.md:**
|
||||||
|
```markdown
|
||||||
|
# Change: [Brief description of change]
|
||||||
|
|
||||||
|
## Why
|
||||||
|
[1-2 sentences on problem/opportunity]
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- [Bullet list of changes]
|
||||||
|
- [Mark breaking changes with **BREAKING**]
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: [list capabilities]
|
||||||
|
- Affected code: [key files/systems]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Create spec deltas:** `specs/[capability]/spec.md`
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: New Feature
|
||||||
|
The system SHALL provide...
|
||||||
|
|
||||||
|
#### Scenario: Success case
|
||||||
|
- **WHEN** user performs action
|
||||||
|
- **THEN** expected result
|
||||||
|
|
||||||
|
## MODIFIED Requirements
|
||||||
|
### Requirement: Existing Feature
|
||||||
|
[Complete modified requirement]
|
||||||
|
|
||||||
|
## REMOVED Requirements
|
||||||
|
### Requirement: Old Feature
|
||||||
|
**Reason**: [Why removing]
|
||||||
|
**Migration**: [How to handle]
|
||||||
|
```
|
||||||
|
If multiple capabilities are affected, create multiple delta files under `changes/[change-id]/specs/<capability>/spec.md`—one per capability.
|
||||||
|
|
||||||
|
4. **Create tasks.md:**
|
||||||
|
```markdown
|
||||||
|
## 1. Implementation
|
||||||
|
- [ ] 1.1 Create database schema
|
||||||
|
- [ ] 1.2 Implement API endpoint
|
||||||
|
- [ ] 1.3 Add frontend component
|
||||||
|
- [ ] 1.4 Write tests
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Create design.md when needed:**
|
||||||
|
Create `design.md` if any of the following apply; otherwise omit it:
|
||||||
|
- Cross-cutting change (multiple services/modules) or a new architectural pattern
|
||||||
|
- New external dependency or significant data model changes
|
||||||
|
- Security, performance, or migration complexity
|
||||||
|
- Ambiguity that benefits from technical decisions before coding
|
||||||
|
|
||||||
|
Minimal `design.md` skeleton:
|
||||||
|
```markdown
|
||||||
|
## Context
|
||||||
|
[Background, constraints, stakeholders]
|
||||||
|
|
||||||
|
## Goals / Non-Goals
|
||||||
|
- Goals: [...]
|
||||||
|
- Non-Goals: [...]
|
||||||
|
|
||||||
|
## Decisions
|
||||||
|
- Decision: [What and why]
|
||||||
|
- Alternatives considered: [Options + rationale]
|
||||||
|
|
||||||
|
## Risks / Trade-offs
|
||||||
|
- [Risk] → Mitigation
|
||||||
|
|
||||||
|
## Migration Plan
|
||||||
|
[Steps, rollback]
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
- [...]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Spec File Format
|
||||||
|
|
||||||
|
### Critical: Scenario Formatting
|
||||||
|
|
||||||
|
**CORRECT** (use #### headers):
|
||||||
|
```markdown
|
||||||
|
#### Scenario: User login success
|
||||||
|
- **WHEN** valid credentials provided
|
||||||
|
- **THEN** return JWT token
|
||||||
|
```
|
||||||
|
|
||||||
|
**WRONG** (don't use bullets or bold):
|
||||||
|
```markdown
|
||||||
|
- **Scenario: User login** ❌
|
||||||
|
**Scenario**: User login ❌
|
||||||
|
### Scenario: User login ❌
|
||||||
|
```
|
||||||
|
|
||||||
|
Every requirement MUST have at least one scenario.
|
||||||
|
|
||||||
|
### Requirement Wording
|
||||||
|
- Use SHALL/MUST for normative requirements (avoid should/may unless intentionally non-normative)
|
||||||
|
|
||||||
|
### Delta Operations
|
||||||
|
|
||||||
|
- `## ADDED Requirements` - New capabilities
|
||||||
|
- `## MODIFIED Requirements` - Changed behavior
|
||||||
|
- `## REMOVED Requirements` - Deprecated features
|
||||||
|
- `## RENAMED Requirements` - Name changes
|
||||||
|
|
||||||
|
Headers matched with `trim(header)` - whitespace ignored.
|
||||||
|
|
||||||
|
#### When to use ADDED vs MODIFIED
|
||||||
|
- ADDED: Introduces a new capability or sub-capability that can stand alone as a requirement. Prefer ADDED when the change is orthogonal (e.g., adding "Slash Command Configuration") rather than altering the semantics of an existing requirement.
|
||||||
|
- MODIFIED: Changes the behavior, scope, or acceptance criteria of an existing requirement. Always paste the full, updated requirement content (header + all scenarios). The archiver will replace the entire requirement with what you provide here; partial deltas will drop previous details.
|
||||||
|
- RENAMED: Use when only the name changes. If you also change behavior, use RENAMED (name) plus MODIFIED (content) referencing the new name.
|
||||||
|
|
||||||
|
Common pitfall: Using MODIFIED to add a new concern without including the previous text. This causes loss of detail at archive time. If you aren’t explicitly changing the existing requirement, add a new requirement under ADDED instead.
|
||||||
|
|
||||||
|
Authoring a MODIFIED requirement correctly:
|
||||||
|
1) Locate the existing requirement in `openspec/specs/<capability>/spec.md`.
|
||||||
|
2) Copy the entire requirement block (from `### Requirement: ...` through its scenarios).
|
||||||
|
3) Paste it under `## MODIFIED Requirements` and edit to reflect the new behavior.
|
||||||
|
4) Ensure the header text matches exactly (whitespace-insensitive) and keep at least one `#### Scenario:`.
|
||||||
|
|
||||||
|
Example for RENAMED:
|
||||||
|
```markdown
|
||||||
|
## RENAMED Requirements
|
||||||
|
- FROM: `### Requirement: Login`
|
||||||
|
- TO: `### Requirement: User Authentication`
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Errors
|
||||||
|
|
||||||
|
**"Change must have at least one delta"**
|
||||||
|
- Check `changes/[name]/specs/` exists with .md files
|
||||||
|
- Verify files have operation prefixes (## ADDED Requirements)
|
||||||
|
|
||||||
|
**"Requirement must have at least one scenario"**
|
||||||
|
- Check scenarios use `#### Scenario:` format (4 hashtags)
|
||||||
|
- Don't use bullet points or bold for scenario headers
|
||||||
|
|
||||||
|
**Silent scenario parsing failures**
|
||||||
|
- Exact format required: `#### Scenario: Name`
|
||||||
|
- Debug with: `openspec show [change] --json --deltas-only`
|
||||||
|
|
||||||
|
### Validation Tips
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Always use strict mode for comprehensive checks
|
||||||
|
openspec validate [change] --strict
|
||||||
|
|
||||||
|
# Debug delta parsing
|
||||||
|
openspec show [change] --json | jq '.deltas'
|
||||||
|
|
||||||
|
# Check specific requirement
|
||||||
|
openspec show [spec] --json -r 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Happy Path Script
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1) Explore current state
|
||||||
|
openspec spec list --long
|
||||||
|
openspec list
|
||||||
|
# Optional full-text search:
|
||||||
|
# rg -n "Requirement:|Scenario:" openspec/specs
|
||||||
|
# rg -n "^#|Requirement:" openspec/changes
|
||||||
|
|
||||||
|
# 2) Choose change id and scaffold
|
||||||
|
CHANGE=add-two-factor-auth
|
||||||
|
mkdir -p openspec/changes/$CHANGE/{specs/auth}
|
||||||
|
printf "## Why\n...\n\n## What Changes\n- ...\n\n## Impact\n- ...\n" > openspec/changes/$CHANGE/proposal.md
|
||||||
|
printf "## 1. Implementation\n- [ ] 1.1 ...\n" > openspec/changes/$CHANGE/tasks.md
|
||||||
|
|
||||||
|
# 3) Add deltas (example)
|
||||||
|
cat > openspec/changes/$CHANGE/specs/auth/spec.md << 'EOF'
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: Two-Factor Authentication
|
||||||
|
Users MUST provide a second factor during login.
|
||||||
|
|
||||||
|
#### Scenario: OTP required
|
||||||
|
- **WHEN** valid credentials are provided
|
||||||
|
- **THEN** an OTP challenge is required
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# 4) Validate
|
||||||
|
openspec validate $CHANGE --strict
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Capability Example
|
||||||
|
|
||||||
|
```
|
||||||
|
openspec/changes/add-2fa-notify/
|
||||||
|
├── proposal.md
|
||||||
|
├── tasks.md
|
||||||
|
└── specs/
|
||||||
|
├── auth/
|
||||||
|
│ └── spec.md # ADDED: Two-Factor Authentication
|
||||||
|
└── notifications/
|
||||||
|
└── spec.md # ADDED: OTP email notification
|
||||||
|
```
|
||||||
|
|
||||||
|
auth/spec.md
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: Two-Factor Authentication
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
notifications/spec.md
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: OTP Email Notification
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### Simplicity First
|
||||||
|
- Default to <100 lines of new code
|
||||||
|
- Single-file implementations until proven insufficient
|
||||||
|
- Avoid frameworks without clear justification
|
||||||
|
- Choose boring, proven patterns
|
||||||
|
|
||||||
|
### Complexity Triggers
|
||||||
|
Only add complexity with:
|
||||||
|
- Performance data showing current solution too slow
|
||||||
|
- Concrete scale requirements (>1000 users, >100MB data)
|
||||||
|
- Multiple proven use cases requiring abstraction
|
||||||
|
|
||||||
|
### Clear References
|
||||||
|
- Use `file.ts:42` format for code locations
|
||||||
|
- Reference specs as `specs/auth/spec.md`
|
||||||
|
- Link related changes and PRs
|
||||||
|
|
||||||
|
### Capability Naming
|
||||||
|
- Use verb-noun: `user-auth`, `payment-capture`
|
||||||
|
- Single purpose per capability
|
||||||
|
- 10-minute understandability rule
|
||||||
|
- Split if description needs "AND"
|
||||||
|
|
||||||
|
### Change ID Naming
|
||||||
|
- Use kebab-case, short and descriptive: `add-two-factor-auth`
|
||||||
|
- Prefer verb-led prefixes: `add-`, `update-`, `remove-`, `refactor-`
|
||||||
|
- Ensure uniqueness; if taken, append `-2`, `-3`, etc.
|
||||||
|
|
||||||
|
## Tool Selection Guide
|
||||||
|
|
||||||
|
| Task | Tool | Why |
|
||||||
|
|------|------|-----|
|
||||||
|
| Find files by pattern | Glob | Fast pattern matching |
|
||||||
|
| Search code content | Grep | Optimized regex search |
|
||||||
|
| Read specific files | Read | Direct file access |
|
||||||
|
| Explore unknown scope | Task | Multi-step investigation |
|
||||||
|
|
||||||
|
## Error Recovery
|
||||||
|
|
||||||
|
### Change Conflicts
|
||||||
|
1. Run `openspec list` to see active changes
|
||||||
|
2. Check for overlapping specs
|
||||||
|
3. Coordinate with change owners
|
||||||
|
4. Consider combining proposals
|
||||||
|
|
||||||
|
### Validation Failures
|
||||||
|
1. Run with `--strict` flag
|
||||||
|
2. Check JSON output for details
|
||||||
|
3. Verify spec file format
|
||||||
|
4. Ensure scenarios properly formatted
|
||||||
|
|
||||||
|
### Missing Context
|
||||||
|
1. Read project.md first
|
||||||
|
2. Check related specs
|
||||||
|
3. Review recent archives
|
||||||
|
4. Ask for clarification
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
### Stage Indicators
|
||||||
|
- `changes/` - Proposed, not yet built
|
||||||
|
- `specs/` - Built and deployed
|
||||||
|
- `archive/` - Completed changes
|
||||||
|
|
||||||
|
### File Purposes
|
||||||
|
- `proposal.md` - Why and what
|
||||||
|
- `tasks.md` - Implementation steps
|
||||||
|
- `design.md` - Technical decisions
|
||||||
|
- `spec.md` - Requirements and behavior
|
||||||
|
|
||||||
|
### CLI Essentials
|
||||||
|
```bash
|
||||||
|
openspec list # What's in progress?
|
||||||
|
openspec show [item] # View details
|
||||||
|
openspec validate --strict # Is it correct?
|
||||||
|
openspec archive <change-id> [--yes|-y] # Mark complete (add --yes for automation)
|
||||||
|
```
|
||||||
|
|
||||||
|
Remember: Specs are truth. Changes are proposals. Keep them in sync.
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
# Proposal: Build RCU Register Kafka Consumer (G5)
|
||||||
|
|
||||||
|
## Why
|
||||||
|
当前 `bls-register-backend` 需要从 Kafka 主题 `blwlog4Nodejs-rcu-register-topic` 消费高吞吐注册数据,并写入 G5 库。现有模板项目的字段模型与落库目标不匹配,且缺少 3 秒固定频率落库和双写更新策略。
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
1. 新增 Register 数据模型解析与字段类型校验,兼容 C# RegisterInfo 字段命名。
|
||||||
|
2. 新增值域保护策略:`hotel_id` 超出 PostgreSQL `int2` 值域时强制写 `0`。
|
||||||
|
3. 落库改为固定 3 秒全局批量 flush 一次。
|
||||||
|
4. 双写策略:
|
||||||
|
- 完整写入 `rcu_info.rcu_info_events_g5`。
|
||||||
|
- 更新 `room_status.room_status_moment_g5` 的 `app_version`、`launcher_version`、`config_version`、`upgrade_ts_ms`、`register_ts_ms`,按 `(hotel_id, room_id)` 定位,缺失记录忽略。
|
||||||
|
5. 保持 Kafka 至少一次语义:消息处理 Promise 在 flush 完成后才 resolve。
|
||||||
|
|
||||||
|
## npm Package Strategy
|
||||||
|
- 继续复用成熟依赖,不重复造轮子:
|
||||||
|
- `kafka-node@^5.0.0` 负责 Kafka ConsumerGroup。
|
||||||
|
- `pg@^8.11.5` 负责 PostgreSQL 批量写入。
|
||||||
|
- `zod@^4.3.6` 负责结构与类型预处理校验。
|
||||||
|
- 本次不新增第三方依赖,降低变更风险。
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- 提升入库一致性(按库字段约束进行落库转换)。
|
||||||
|
- 控制数据库写入频率,满足生产限制。
|
||||||
|
- 降低失效数据对消费位点推进的阻塞风险。
|
||||||
@@ -0,0 +1,46 @@
|
|||||||
|
# Spec: rcu-register-consumer
|
||||||
|
|
||||||
|
## Requirement: Kafka Register 消费
|
||||||
|
系统 SHALL 从 `blwlog4Nodejs-rcu-register-topic` 消费 Register 数据并执行结构化校验。
|
||||||
|
|
||||||
|
### Scenario: 解析 C# RegisterInfo 字段
|
||||||
|
- **GIVEN** Kafka 消息为 RegisterInfo JSON
|
||||||
|
- **WHEN** 消息被解析
|
||||||
|
- **THEN** 字段转换为数据库字段类型后参与落库
|
||||||
|
|
||||||
|
## Requirement: 值域与类型保护
|
||||||
|
系统 SHALL 对写库字段执行类型转换和值域保护。
|
||||||
|
|
||||||
|
### Scenario: hotel_id 超范围保护
|
||||||
|
- **GIVEN** `hotel_id` 超出 `int2` 范围
|
||||||
|
- **WHEN** 数据准备写入数据库
|
||||||
|
- **THEN** `hotel_id` 被写为 `0`
|
||||||
|
|
||||||
|
## Requirement: 固定 3 秒写库
|
||||||
|
系统 SHALL 采用全局缓冲并每 3 秒执行一次批量写库。
|
||||||
|
|
||||||
|
### Scenario: 高频消息输入
|
||||||
|
- **GIVEN** 3 秒内收到多条消息
|
||||||
|
- **WHEN** 到达 flush 时刻
|
||||||
|
- **THEN** 系统执行一次批量写入并统一确认消费
|
||||||
|
|
||||||
|
## Requirement: 双写目标库
|
||||||
|
系统 SHALL 双写 G5 库中的两个目标表。
|
||||||
|
|
||||||
|
### Scenario: 完整事件入库
|
||||||
|
- **GIVEN** 合法 Register 数据
|
||||||
|
- **WHEN** 执行写库
|
||||||
|
- **THEN** 完整写入 `rcu_info.rcu_info_events_g5`
|
||||||
|
|
||||||
|
### Scenario: 房态瞬时表追加更新
|
||||||
|
- **GIVEN** 合法 Register 数据
|
||||||
|
- **WHEN** 执行写库
|
||||||
|
- **THEN** 更新 `room_status.room_status_moment_g5` 的 `app_version`、`launcher_version`、`config_version`、`upgrade_ts_ms`、`register_ts_ms`
|
||||||
|
|
||||||
|
## Requirement: room_status 不新增行
|
||||||
|
系统 SHALL 仅更新已存在的 `(hotel_id, room_id)` 行,不允许新增记录。
|
||||||
|
|
||||||
|
### Scenario: 主键不存在
|
||||||
|
- **GIVEN** `room_status_moment_g5` 中不存在对应 `(hotel_id, room_id)`
|
||||||
|
- **WHEN** 执行更新
|
||||||
|
- **THEN** 忽略该条 room_status 更新且继续推进 Kafka 消费位点
|
||||||
@@ -0,0 +1,16 @@
|
|||||||
|
## 1. Specification
|
||||||
|
- [x] 1.1 定义 Register 消费与双写需求规格
|
||||||
|
- [x] 1.2 定义 3 秒固定写库频率需求
|
||||||
|
- [x] 1.3 定义 room_status 仅更新不新增行需求
|
||||||
|
|
||||||
|
## 2. Implementation
|
||||||
|
- [x] 2.1 改造配置默认值为 register 主题与 G5 目标表
|
||||||
|
- [x] 2.2 实现 Register payload 类型转换与值域保护
|
||||||
|
- [x] 2.3 实现 rcu_info_events_g5 批量插入
|
||||||
|
- [x] 2.4 实现 room_status_moment_g5 批量更新(仅已存在记录)
|
||||||
|
- [x] 2.5 实现全局 3 秒 flush 与 Kafka 回调对齐
|
||||||
|
|
||||||
|
## 3. Verification
|
||||||
|
- [x] 3.1 更新处理器单测(hotel_id、字段映射、类型转换)
|
||||||
|
- [x] 3.2 运行 `npm run test`
|
||||||
|
- [x] 3.3 运行 `npm run spec:validate`
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
# Change: Fix Kafka Partitioning and Schema Issues
|
||||||
|
|
||||||
|
## Why
|
||||||
|
Production deployment revealed issues with data ingestion:
|
||||||
|
1. Kafka Topic name changed to include partition suffix.
|
||||||
|
2. Legacy data contains second-level timestamps (1970s) causing partition lookup failures in PostgreSQL (which expects ms).
|
||||||
|
3. Variable-length fields (reboot reason, status) exceeded VARCHAR(10) limits, causing crashes.
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- **Modified Requirement**: Update Kafka Topic to `blwlog4Nodejs-rcu-onoffline-topic-0`.
|
||||||
|
- **New Requirement**: Implement heuristic timestamp conversion (Sec -> MS) for values < 100B.
|
||||||
|
- **New Requirement**: Truncate specific fields to VARCHAR(255) to prevent DB rejection.
|
||||||
|
- **Modified Requirement**: Update DB Schema to VARCHAR(255) for robustness.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `onoffline`
|
||||||
|
- Affected code: `src/processor/index.js`, `scripts/init_db.sql`
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
## MODIFIED Requirements
|
||||||
|
### Requirement: 消费并落库
|
||||||
|
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||||
|
|
||||||
|
#### Scenario: 非重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为空或不存在
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||||
|
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: 字段长度限制与截断
|
||||||
|
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||||
|
|
||||||
|
#### Scenario: 超长字段处理
|
||||||
|
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 字段被截断为前 255 个字符并入库
|
||||||
|
|
||||||
|
### Requirement: 时间戳单位自动识别
|
||||||
|
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||||
|
|
||||||
|
#### Scenario: 秒级时间戳转换
|
||||||
|
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||||
|
- **WHEN** 解析时间戳
|
||||||
|
- **THEN** 自动乘以 1000 转换为毫秒
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [x] Update Kafka Topic in .env and config
|
||||||
|
- [x] Implement timestamp unit detection and conversion in processor
|
||||||
|
- [x] Implement field truncation logic in processor
|
||||||
|
- [x] Update database schema definition (init_db.sql) to VARCHAR(255)
|
||||||
|
- [x] Verify data ingestion with production stream
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
# Change: Optimize Kafka Consumption Performance
|
||||||
|
|
||||||
|
## Why
|
||||||
|
User reports extremely slow Kafka consumption. Current implementation processes and inserts messages one-by-one, which creates a bottleneck at the database network round-trip time (RTT).
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- **New Requirement**: Implement Batch Processing for Kafka messages.
|
||||||
|
- **Refactor**: Decouple message parsing from insertion in `processor`.
|
||||||
|
- **Logic**:
|
||||||
|
- Accumulate messages in a buffer (e.g., 500ms or 500 items).
|
||||||
|
- Perform Batch Insert into PostgreSQL.
|
||||||
|
- Implement Row-by-Row fallback for batch failures (to isolate bad data).
|
||||||
|
- Handle DB connection errors with retry loop at batch level.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `onoffline`
|
||||||
|
- Affected code: `src/index.js`, `src/processor/index.js`
|
||||||
|
- Performance: Expected 10x-100x throughput increase.
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [ ] Refactor `src/processor/index.js` to export `parseMessageToRows`
|
||||||
|
- [ ] Implement `BatchProcessor` logic in `src/index.js`
|
||||||
|
- [ ] Update `handleMessage` to use `BatchProcessor`
|
||||||
|
- [ ] Verify performance improvement
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
# Proposal: Refactor Partition Indexes
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
利用 PostgreSQL 默认的支持,改变每日分区创立时的索引策略,不再在代码中对每个分区单独创建索引。
|
||||||
|
|
||||||
|
## Context
|
||||||
|
当前 `PartitionManager` 在动态创建子分区后,会隐式调用查询在子分区上创建六个单列索引。由于我们使用的是 PostgreSQL 11+,且我们在初始化脚本中的主分区表 `onoffline.onoffline_record` 上已经创建了所有的索引,此主表上的索引会自动应用于所有的子分区,不需要我们在创建分区时另外手动添加。
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
1. 在 `src/db/partitionManager.js` 中移除子分区显式创建索引的方法 `ensurePartitionIndexes` 以及针对已有子分区的循环索引检查函数 `ensureIndexesForExistingPartitions`。
|
||||||
|
2. 在更新分区流程 `ensurePartitions` 以及 `ensurePartitionsForTimestamps` 中,移除对 `ensurePartitionIndexes` 的调用。
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
# Spec Delta: onoffline-backend
|
||||||
|
|
||||||
|
## MODIFIED Requirements
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||||
|
|
||||||
|
#### Scenario: 分区预创建
|
||||||
|
- **GIVEN** 系统启动或每日凌晨
|
||||||
|
- **WHEN** 运行分区维护任务
|
||||||
|
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
# Tasks: Refactor Partition Indexes
|
||||||
|
|
||||||
|
- [x] refactor `src/db/partitionManager.js`: remove `ensurePartitionIndexes` and `ensureIndexesForExistingPartitions`.
|
||||||
|
- [x] refactor `src/db/partitionManager.js`: update `ensurePartitions` and `ensurePartitionsForTimestamps` to remove calls to `ensurePartitionIndexes`.
|
||||||
|
- [x] refactor `src/db/initializer.js` (and any other occurrences) to reflect the removal.
|
||||||
|
- [x] update openspec requirements to clarify that index propagation relies on PostgreSQL parent-table indexes.
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
# Change: remove runtime db provisioning
|
||||||
|
|
||||||
|
## Why
|
||||||
|
当前服务在运行时承担了建库、建表和分区维护职责,导致服务职责边界不清晰,也会引入启动阶段 DDL 风险。现已将该能力剥离到根目录 `SQL_Script/`,需要通过 OpenSpec 正式记录为规范变更。
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- 移除服务启动阶段的数据库初始化与定时分区维护要求。
|
||||||
|
- 移除服务在写入失败时自动创建缺失分区的要求。
|
||||||
|
- 明确数据库结构与分区维护由外部脚本(`SQL_Script/`)负责。
|
||||||
|
- 保留服务的核心职责:Kafka 消费、解析、写库、重试与监控。
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `openspec/specs/onoffline/spec.md`
|
||||||
|
- Affected code: `src/index.js`, `src/config/config.js`, `src/db/initializer.js`, `src/db/partitionManager.js`, `scripts/init_db.sql`, `scripts/verify_partitions.js`, `../SQL_Script/*`
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
## MODIFIED Requirements
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区;运行服务本身 SHALL NOT 执行建库、建表、分区创建或定时分区维护。
|
||||||
|
|
||||||
|
#### Scenario: 服务启动不执行 DDL
|
||||||
|
- **GIVEN** 服务进程启动
|
||||||
|
- **WHEN** 进入 bootstrap 过程
|
||||||
|
- **THEN** 仅初始化消费、处理、监控相关能力,不执行数据库创建、表结构初始化与分区创建
|
||||||
|
|
||||||
|
#### Scenario: 分区由外部脚本维护
|
||||||
|
- **GIVEN** 需要创建数据库对象或新增未来分区
|
||||||
|
- **WHEN** 执行外部 SQL/JS 工具
|
||||||
|
- **THEN** 通过根目录 `SQL_Script/` 完成建库和分区维护,而不是由服务运行时自动执行
|
||||||
|
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
|
|
||||||
|
#### Scenario: 分区缺失错误处理
|
||||||
|
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||||
|
- **WHEN** 服务处理该错误
|
||||||
|
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [x] 1.1 Remove runtime DB initialization from bootstrap flow (`src/index.js`).
|
||||||
|
- [x] 1.2 Remove scheduled partition maintenance job from runtime service.
|
||||||
|
- [x] 1.3 Remove runtime missing-partition auto-fix behavior.
|
||||||
|
- [x] 1.4 Remove legacy DB provisioning modules and scripts from service project.
|
||||||
|
- [x] 1.5 Add external SQL/JS provisioning scripts under root `SQL_Script/` for DB/schema/partition management.
|
||||||
|
- [x] 1.6 Update project docs to point DB provisioning to `SQL_Script/`.
|
||||||
|
|
||||||
|
## 2. Validation
|
||||||
|
- [x] 2.1 Run `npm run lint` in `bls-onoffline-backend`.
|
||||||
|
- [x] 2.2 Run `npm run build` in `bls-onoffline-backend`.
|
||||||
|
- [x] 2.3 Run `openspec validate remove-runtime-db-provisioning --strict`.
|
||||||
31
bls-register-backend/openspec/project.md
Normal file
31
bls-register-backend/openspec/project.md
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Project Context
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
[Describe your project's purpose and goals]
|
||||||
|
|
||||||
|
## Tech Stack
|
||||||
|
- [List your primary technologies]
|
||||||
|
- [e.g., TypeScript, React, Node.js]
|
||||||
|
|
||||||
|
## Project Conventions
|
||||||
|
|
||||||
|
### Code Style
|
||||||
|
[Describe your code style preferences, formatting rules, and naming conventions]
|
||||||
|
|
||||||
|
### Architecture Patterns
|
||||||
|
[Document your architectural decisions and patterns]
|
||||||
|
|
||||||
|
### Testing Strategy
|
||||||
|
[Explain your testing approach and requirements]
|
||||||
|
|
||||||
|
### Git Workflow
|
||||||
|
[Describe your branching strategy and commit conventions]
|
||||||
|
|
||||||
|
## Domain Context
|
||||||
|
[Add domain-specific knowledge that AI assistants need to understand]
|
||||||
|
|
||||||
|
## Important Constraints
|
||||||
|
[List any technical, business, or regulatory constraints]
|
||||||
|
|
||||||
|
## External Dependencies
|
||||||
|
[Document key external services, APIs, or systems]
|
||||||
103
bls-register-backend/openspec/specs/onoffline/spec.md
Normal file
103
bls-register-backend/openspec/specs/onoffline/spec.md
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Spec: onoffline-backend
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
从 Kafka 消费设备上下线事件并按规则写入 PostgreSQL 分区表,确保高可靠性、幂等写入和错误恢复能力。
|
||||||
|
## Requirements
|
||||||
|
### Requirement: 消费并落库
|
||||||
|
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||||
|
|
||||||
|
#### Scenario: 非重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为空或不存在
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||||
|
|
||||||
|
### Requirement: 重启数据处理
|
||||||
|
系统 SHALL 在 RebootReason 非空时强制 current_status 为 on。
|
||||||
|
|
||||||
|
#### Scenario: 重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为非空值
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 on
|
||||||
|
|
||||||
|
### Requirement: 空值保留
|
||||||
|
系统 SHALL 保留上游空值,不对字段进行补 0。
|
||||||
|
|
||||||
|
#### Scenario: 空值写入
|
||||||
|
- **GIVEN** LauncherVersion 或 RebootReason 为空字符串
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 数据库存储值为对应的空字符串
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||||
|
|
||||||
|
#### Scenario: 分区预创建
|
||||||
|
- **GIVEN** 系统启动或每日凌晨
|
||||||
|
- **WHEN** 运行分区维护任务
|
||||||
|
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||||
|
|
||||||
|
### Requirement: 消费可靠性 (At-Least-Once)
|
||||||
|
系统 SHALL 仅在数据成功写入数据库后,才向 Kafka 提交消费位点。
|
||||||
|
|
||||||
|
#### Scenario: 逐条确认与顺序提交
|
||||||
|
- **GIVEN** 并发处理多条消息 (Offset 1, 2, 3)
|
||||||
|
- **WHEN** Offset 2 先完成,Offset 1 尚未完成
|
||||||
|
- **THEN** 系统不提交 Offset 2,直到 Offset 1 也完成,才提交 Offset 3 (即 1, 2, 3 都完成)
|
||||||
|
|
||||||
|
### Requirement: 数据库离线保护
|
||||||
|
系统 SHALL 在数据库连接丢失时暂停消费,防止数据堆积或丢失。
|
||||||
|
|
||||||
|
#### Scenario: 数据库断连
|
||||||
|
- **GIVEN** 数据库连接失败 (ECONNREFUSED 等)
|
||||||
|
- **WHEN** 消费者尝试写入
|
||||||
|
- **THEN** 暂停 Kafka 消费 1 分钟,并进入轮询检测模式,直到数据库恢复
|
||||||
|
|
||||||
|
### Requirement: 幂等写入
|
||||||
|
系统 SHALL 处理重复消费的数据,防止主键冲突。
|
||||||
|
|
||||||
|
#### Scenario: 重复数据处理
|
||||||
|
- **GIVEN** Kafka 重新投递已处理过的消息
|
||||||
|
- **WHEN** 尝试写入数据库
|
||||||
|
- **THEN** 使用 `ON CONFLICT DO NOTHING` 忽略冲突,视为处理成功
|
||||||
|
|
||||||
|
### Requirement: 性能与日志
|
||||||
|
系统 SHALL 最小化正常运行时的日志输出。
|
||||||
|
|
||||||
|
#### Scenario: 正常运行日志
|
||||||
|
- **GIVEN** 数据正常处理
|
||||||
|
- **WHEN** 写入成功
|
||||||
|
- **THEN** 不输出单条日志,仅每分钟输出聚合统计 (Pulled/Inserted)
|
||||||
|
|
||||||
|
### Requirement: 字段长度限制与截断
|
||||||
|
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||||
|
|
||||||
|
#### Scenario: 超长字段处理
|
||||||
|
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 字段被截断为前 255 个字符并入库
|
||||||
|
|
||||||
|
### Requirement: 时间戳单位自动识别
|
||||||
|
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||||
|
|
||||||
|
#### Scenario: 秒级时间戳转换
|
||||||
|
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||||
|
- **WHEN** 解析时间戳
|
||||||
|
- **THEN** 自动乘以 1000 转换为毫秒
|
||||||
|
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
|
|
||||||
|
#### Scenario: 分区缺失错误处理
|
||||||
|
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||||
|
- **WHEN** 服务处理该错误
|
||||||
|
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||||
|
|
||||||
11
bls-register-backend/openspec/specs/onoffline/status.md
Normal file
11
bls-register-backend/openspec/specs/onoffline/status.md
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
|
||||||
|
## Implementation Status
|
||||||
|
- **Date**: 2026-02-04
|
||||||
|
- **Status**: Completed
|
||||||
|
- **Notes**:
|
||||||
|
- 已完成核心消费逻辑、分区管理、数据库幂等写入。
|
||||||
|
- 已处理数据库连接泄露 (EADDRINUSE) 问题,增加了离线保护机制。
|
||||||
|
- 已修复时间戳单位问题 (Seconds -> MS)。
|
||||||
|
- 已将关键字段长度扩展至 VARCHAR(255) 并增加了代码层截断保护。
|
||||||
|
- 验证了数据积压消费能力。
|
||||||
|
- 本阶段开发任务已归档。
|
||||||
13
bls-register-backend/out.log
Normal file
13
bls-register-backend/out.log
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
{"level":"info","message":"Starting register consumer","timestamp":1773480367035,"context":{"env":"development","kafka":{"brokers":["kafka.blv-oa.com:9092"],"topic":"blwlog4Nodejs-rcu-register-topic","groupId":"bls-register-consumer-probe-1773480366464"},"db":{"host":"10.8.8.80","port":5434,"database":"log_platform","schema":"rcu_info","table":"rcu_info_events_g5","roomStatusSchema":"room_status","roomStatusTable":"room_status_moment_g5"},"flushIntervalMs":3000}}
|
||||||
|
{"level":"info","message":"Kafka Consumer rebalancing","timestamp":1773480367119,"context":{"groupId":"bls-register-consumer-probe-1773480366464","clientId":"bls-register-producer"}}
|
||||||
|
{"level":"info","message":"Kafka Consumer connected","timestamp":1773480367237,"context":{"groupId":"bls-register-consumer-probe-1773480366464","clientId":"bls-register-producer"}}
|
||||||
|
{"level":"info","message":"Kafka Consumer rebalanced","timestamp":1773480367237,"context":{"clientId":"bls-register-producer","groupId":"bls-register-consumer-probe-1773480366464"}}
|
||||||
|
{"level":"info","message":"Run counters","timestamp":1773480377038,"context":{"kafkaPulled":23,"dbInserted":23,"parseError":0,"dbFailed":0}}
|
||||||
|
{"level":"info","message":"Run counters","timestamp":1773480387038,"context":{"kafkaPulled":37,"dbInserted":31,"parseError":0,"dbFailed":0}}
|
||||||
|
|
||||||
|
[probe] published topic=blwlog4Nodejs-rcu-register-topic ts_ms=1773480366464 hotel_id=1172 room_id=515
|
||||||
|
[probe-db] event_rows=1
|
||||||
|
[probe-db] event.ts_ms=1773480366464 room_id=515 app_version=v1.2
|
||||||
|
[probe-db] event.udp_raw=YWJjZGVm
|
||||||
|
[probe-db] room_status_rows=1
|
||||||
|
[probe-db] room_status.room_id=515 register_ts_ms=1773480366464 upgrade_ts_ms=1773480367698
|
||||||
3526
bls-register-backend/package-lock.json
generated
Normal file
3526
bls-register-backend/package-lock.json
generated
Normal file
File diff suppressed because it is too large
Load Diff
27
bls-register-backend/package.json
Normal file
27
bls-register-backend/package.json
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"name": "bls-register-backend",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"type": "module",
|
||||||
|
"private": true,
|
||||||
|
"scripts": {
|
||||||
|
"dev": "node src/index.js",
|
||||||
|
"build": "vite build --ssr src/index.js --outDir dist",
|
||||||
|
"test": "vitest run",
|
||||||
|
"lint": "node scripts/lint.js",
|
||||||
|
"spec:lint": "openspec validate --specs --strict --no-interactive",
|
||||||
|
"spec:validate": "openspec validate --specs --no-interactive",
|
||||||
|
"start": "node dist/index.js"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"dotenv": "^16.4.5",
|
||||||
|
"kafka-node": "^5.0.0",
|
||||||
|
"node-cron": "^4.2.1",
|
||||||
|
"pg": "^8.11.5",
|
||||||
|
"redis": "^4.6.13",
|
||||||
|
"zod": "^4.3.6"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"vite": "^5.4.0",
|
||||||
|
"vitest": "^4.0.18"
|
||||||
|
}
|
||||||
|
}
|
||||||
1
bls-register-backend/probe.log
Normal file
1
bls-register-backend/probe.log
Normal file
@@ -0,0 +1 @@
|
|||||||
|
[probe] published topic=blwlog4Nodejs-rcu-register-topic ts_ms=1773480366464 hotel_id=1172 room_id=515
|
||||||
41
bls-register-backend/scripts/lint.js
Normal file
41
bls-register-backend/scripts/lint.js
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { spawnSync } from 'child_process';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
const projectRoot = path.resolve(__dirname, '..');
|
||||||
|
const targets = ['src', 'tests'];
|
||||||
|
|
||||||
|
const collectFiles = (dir) => {
|
||||||
|
if (!fs.existsSync(dir)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||||
|
return entries.flatMap((entry) => {
|
||||||
|
const fullPath = path.join(dir, entry.name);
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
return collectFiles(fullPath);
|
||||||
|
}
|
||||||
|
if (entry.isFile() && fullPath.endsWith('.js')) {
|
||||||
|
return [fullPath];
|
||||||
|
}
|
||||||
|
return [];
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const files = targets.flatMap((target) => collectFiles(path.join(projectRoot, target)));
|
||||||
|
|
||||||
|
const failures = [];
|
||||||
|
|
||||||
|
files.forEach((file) => {
|
||||||
|
const result = spawnSync(process.execPath, ['--check', file], { stdio: 'inherit' });
|
||||||
|
if (result.status !== 0) {
|
||||||
|
failures.push(file);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (failures.length > 0) {
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
66
bls-register-backend/scripts/publishProbe.js
Normal file
66
bls-register-backend/scripts/publishProbe.js
Normal file
@@ -0,0 +1,66 @@
|
|||||||
|
import dotenv from 'dotenv';
|
||||||
|
import kafka from 'kafka-node';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const probeTs = Number(process.argv[2] || Date.now());
|
||||||
|
const probeRoom = process.argv[3] || `PROBE-${probeTs}`;
|
||||||
|
const probeHotelId = Number(process.argv[4] || 1085);
|
||||||
|
const topic = process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-register-topic';
|
||||||
|
const kafkaHost = (process.env.KAFKA_BROKERS || '').split(',').map((s) => s.trim()).filter(Boolean).join(',');
|
||||||
|
const saslEnabled = process.env.KAFKA_SASL_ENABLED === 'true';
|
||||||
|
const sslEnabled = process.env.KAFKA_SSL_ENABLED === 'true';
|
||||||
|
|
||||||
|
const kafkaClientOptions = {
|
||||||
|
kafkaHost,
|
||||||
|
clientId: process.env.KAFKA_CLIENT_ID || 'bls-register-producer'
|
||||||
|
};
|
||||||
|
|
||||||
|
if (saslEnabled && process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD) {
|
||||||
|
kafkaClientOptions.sasl = {
|
||||||
|
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
|
||||||
|
username: process.env.KAFKA_SASL_USERNAME,
|
||||||
|
password: process.env.KAFKA_SASL_PASSWORD
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (sslEnabled) {
|
||||||
|
kafkaClientOptions.sslOptions = { rejectUnauthorized: false };
|
||||||
|
}
|
||||||
|
|
||||||
|
const payload = {
|
||||||
|
ts_ms: probeTs,
|
||||||
|
upgrade_ts_ms: probeTs + 1234,
|
||||||
|
hotel_id: Number.isFinite(probeHotelId) ? probeHotelId : 1085,
|
||||||
|
room_id: probeRoom,
|
||||||
|
device_id: `probe-device-${probeTs}`,
|
||||||
|
is_send: 0,
|
||||||
|
udp_raw: 'abc\u0000def',
|
||||||
|
extra: { source: 'probe', note: 'with\\u0000nul' },
|
||||||
|
app_version: 'v1\u0000.2',
|
||||||
|
launcher_version: 'launcher-1',
|
||||||
|
config_version: 'cfg-1'
|
||||||
|
};
|
||||||
|
|
||||||
|
const client = new kafka.KafkaClient(kafkaClientOptions);
|
||||||
|
|
||||||
|
const producer = new kafka.Producer(client);
|
||||||
|
|
||||||
|
producer.on('ready', () => {
|
||||||
|
producer.send(
|
||||||
|
[{ topic, messages: JSON.stringify(payload) }],
|
||||||
|
(err) => {
|
||||||
|
if (err) {
|
||||||
|
console.error(`[probe] publish failed: ${err.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
console.log(`[probe] published topic=${topic} ts_ms=${probeTs} hotel_id=${payload.hotel_id} room_id=${probeRoom}`);
|
||||||
|
producer.close(() => client.close(() => process.exit(0)));
|
||||||
|
}
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
producer.on('error', (err) => {
|
||||||
|
console.error(`[probe] producer error: ${err.message}`);
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
7
bls-register-backend/scripts/run-30s.ps1
Normal file
7
bls-register-backend/scripts/run-30s.ps1
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
$ErrorActionPreference = "Stop"
|
||||||
|
Remove-Item -Force out.log -ErrorAction SilentlyContinue
|
||||||
|
$p = Start-Process -FilePath node -ArgumentList 'src/index.js' -WorkingDirectory (Get-Location).Path -RedirectStandardOutput 'out.log' -RedirectStandardError 'out.log' -PassThru
|
||||||
|
Start-Sleep -Seconds 30
|
||||||
|
Stop-Process -Id $p.Id -Force
|
||||||
|
Start-Sleep -Seconds 1
|
||||||
|
Add-Content -Path out.log -Value "[runner] stopped after 30s"
|
||||||
67
bls-register-backend/scripts/verifyProbeInDb.js
Normal file
67
bls-register-backend/scripts/verifyProbeInDb.js
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
import dotenv from 'dotenv';
|
||||||
|
import pg from 'pg';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const probeTs = Number(process.argv[2]);
|
||||||
|
const probeRoom = process.argv[3];
|
||||||
|
const probeHotelId = Number(process.argv[4]);
|
||||||
|
|
||||||
|
if (!Number.isFinite(probeTs) || !probeRoom || !Number.isFinite(probeHotelId)) {
|
||||||
|
console.error('Usage: node scripts/verifyProbeInDb.js <probeTs> <probeRoom> <probeHotelId>');
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
const pool = new pg.Pool({
|
||||||
|
host: process.env.POSTGRES_HOST_G5,
|
||||||
|
port: Number(process.env.POSTGRES_PORT_G5 || 5434),
|
||||||
|
user: process.env.POSTGRES_USER_G5,
|
||||||
|
password: process.env.POSTGRES_PASSWORD_G5,
|
||||||
|
database: process.env.POSTGRES_DATABASE_G5,
|
||||||
|
max: 1
|
||||||
|
});
|
||||||
|
|
||||||
|
const main = async () => {
|
||||||
|
const eventResult = await pool.query(
|
||||||
|
`SELECT ts_ms, room_id, app_version, udp_raw
|
||||||
|
FROM rcu_info.rcu_info_events_g5
|
||||||
|
WHERE ts_ms = $1 AND room_id = $2
|
||||||
|
ORDER BY write_ts_ms DESC
|
||||||
|
LIMIT 1`,
|
||||||
|
[probeTs, probeRoom]
|
||||||
|
);
|
||||||
|
|
||||||
|
const statusResult = await pool.query(
|
||||||
|
`SELECT hotel_id, room_id, app_version, launcher_version, config_version, upgrade_ts_ms, register_ts_ms
|
||||||
|
FROM room_status.room_status_moment_g5
|
||||||
|
WHERE hotel_id = $1 AND room_id = $2
|
||||||
|
LIMIT 1`,
|
||||||
|
[probeHotelId, probeRoom]
|
||||||
|
);
|
||||||
|
|
||||||
|
console.log(`[probe-db] event_rows=${eventResult.rowCount}`);
|
||||||
|
if (eventResult.rowCount > 0) {
|
||||||
|
const row = eventResult.rows[0];
|
||||||
|
console.log(`[probe-db] event.ts_ms=${row.ts_ms} room_id=${row.room_id} app_version=${row.app_version}`);
|
||||||
|
console.log(`[probe-db] event.udp_raw=${row.udp_raw}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
console.log(`[probe-db] room_status_rows=${statusResult.rowCount}`);
|
||||||
|
if (statusResult.rowCount > 0) {
|
||||||
|
const row = statusResult.rows[0];
|
||||||
|
console.log(`[probe-db] room_status.room_id=${row.room_id} register_ts_ms=${row.register_ts_ms} upgrade_ts_ms=${row.upgrade_ts_ms}`);
|
||||||
|
} else {
|
||||||
|
console.log('[probe-db] room_status row not found (expected behavior when key does not exist)');
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
main()
|
||||||
|
.then(async () => {
|
||||||
|
await pool.end();
|
||||||
|
process.exit(0);
|
||||||
|
})
|
||||||
|
.catch(async (err) => {
|
||||||
|
console.error(`[probe-db] verify failed: ${err.message}`);
|
||||||
|
await pool.end();
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
36
bls-register-backend/scripts/verify_data.js
Normal file
36
bls-register-backend/scripts/verify_data.js
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
|
||||||
|
import { config } from '../src/config/config.js';
|
||||||
|
import dbManager from '../src/db/databaseManager.js';
|
||||||
|
import { logger } from '../src/utils/logger.js';
|
||||||
|
|
||||||
|
const verifyData = async () => {
|
||||||
|
const client = await dbManager.pool.connect();
|
||||||
|
try {
|
||||||
|
console.log('Verifying data in database...');
|
||||||
|
|
||||||
|
// Count total rows
|
||||||
|
const countSql = `SELECT count(*) FROM ${config.db.schema}.${config.db.table}`;
|
||||||
|
const countRes = await client.query(countSql);
|
||||||
|
console.log(`Total rows in ${config.db.schema}.${config.db.table}: ${countRes.rows[0].count}`);
|
||||||
|
|
||||||
|
// Check recent rows
|
||||||
|
const recentSql = `
|
||||||
|
SELECT * FROM ${config.db.schema}.${config.db.table}
|
||||||
|
ORDER BY ts_ms DESC
|
||||||
|
LIMIT 5
|
||||||
|
`;
|
||||||
|
const recentRes = await client.query(recentSql);
|
||||||
|
console.log('Recent 5 rows:');
|
||||||
|
recentRes.rows.forEach(row => {
|
||||||
|
console.log(JSON.stringify(row));
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error verifying data:', err);
|
||||||
|
} finally {
|
||||||
|
client.release();
|
||||||
|
await dbManager.pool.end();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
verifyData();
|
||||||
50
bls-register-backend/spec/onoffline-spec.md
Normal file
50
bls-register-backend/spec/onoffline-spec.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
bls-onoffline-backend 规格说明
|
||||||
|
|
||||||
|
1. Kafka 数据结构
|
||||||
|
{
|
||||||
|
"HotelCode": "1085",
|
||||||
|
"MAC": "00:1A:2B:3C:4D:5E",
|
||||||
|
"HostNumber": "091123987456",
|
||||||
|
"RoomNumber": "8888房",
|
||||||
|
"EndPoint": "50.2.60.1:6543",
|
||||||
|
"CurrentStatus": "on",
|
||||||
|
"CurrentTime": "2026-02-02T10:30:00Z",
|
||||||
|
"UnixTime": 1770000235000,
|
||||||
|
"LauncherVersion": "1.0.0",
|
||||||
|
"RebootReason": "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
2. Kafka 主题
|
||||||
|
Topic:blwlog4Nodejs-rcu-onoffline-topic
|
||||||
|
|
||||||
|
3. 数据库结构
|
||||||
|
数据库:log_platform
|
||||||
|
表:onoffline_record
|
||||||
|
字段:
|
||||||
|
guid varchar(32)
|
||||||
|
ts_ms int8
|
||||||
|
write_ts_ms int8
|
||||||
|
hotel_id int2
|
||||||
|
mac varchar(21)
|
||||||
|
device_id varchar(64)
|
||||||
|
room_id varchar(64)
|
||||||
|
ip varchar(21)
|
||||||
|
current_status varchar(10)
|
||||||
|
launcher_version varchar(10)
|
||||||
|
reboot_reason varchar(10)
|
||||||
|
主键:(ts_ms, mac, device_id, room_id)
|
||||||
|
按 ts_ms 每日分区
|
||||||
|
|
||||||
|
G5库结构(双写,临时接入):
|
||||||
|
库同为:log_platform
|
||||||
|
表:onoffline_record_g5
|
||||||
|
差异字段:
|
||||||
|
- guid 为 int4,由库自己生成。
|
||||||
|
- record_source 固定为 CRICS。
|
||||||
|
- current_status 为 int2,on映射为1,off映射为2,其余为0。
|
||||||
|
支持通过环境变量开关双写。
|
||||||
|
|
||||||
|
4. 数据处理规则
|
||||||
|
非重启数据:reboot_reason 为空或不存在,current_status 取 CurrentStatus
|
||||||
|
重启数据:reboot_reason 不为空,current_status 固定为 on
|
||||||
|
其余字段直接按 Kafka 原值落库,空值不补 0
|
||||||
64
bls-register-backend/src/config/config.js
Normal file
64
bls-register-backend/src/config/config.js
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const parseNumber = (value, defaultValue) => {
|
||||||
|
const parsed = Number(value);
|
||||||
|
return Number.isFinite(parsed) ? parsed : defaultValue;
|
||||||
|
};
|
||||||
|
|
||||||
|
const parseList = (value) =>
|
||||||
|
(value || '')
|
||||||
|
.split(',')
|
||||||
|
.map((item) => item.trim())
|
||||||
|
.filter(Boolean);
|
||||||
|
|
||||||
|
export const config = {
|
||||||
|
env: process.env.NODE_ENV || 'development',
|
||||||
|
port: parseNumber(process.env.PORT, 3001),
|
||||||
|
kafka: {
|
||||||
|
brokers: parseList(process.env.KAFKA_BROKERS),
|
||||||
|
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-register-topic',
|
||||||
|
groupId: process.env.KAFKA_GROUP_ID || 'bls-register-consumer',
|
||||||
|
clientId: process.env.KAFKA_CLIENT_ID || 'bls-register-consumer-client',
|
||||||
|
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
|
||||||
|
maxInFlight: parseNumber(process.env.KAFKA_MAX_IN_FLIGHT, 20000),
|
||||||
|
fetchMaxBytes: parseNumber(process.env.KAFKA_FETCH_MAX_BYTES, 50 * 1024 * 1024),
|
||||||
|
fetchMinBytes: parseNumber(process.env.KAFKA_FETCH_MIN_BYTES, 256 * 1024),
|
||||||
|
fetchMaxWaitMs: parseNumber(process.env.KAFKA_FETCH_MAX_WAIT_MS, 100),
|
||||||
|
fromOffset: process.env.KAFKA_FROM_OFFSET || 'latest',
|
||||||
|
autoCommitIntervalMs: parseNumber(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS, 5000),
|
||||||
|
commitIntervalMs: parseNumber(process.env.KAFKA_COMMIT_INTERVAL_MS, 200),
|
||||||
|
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === 'true',
|
||||||
|
batchSize: parseNumber(process.env.KAFKA_BATCH_SIZE, 5000),
|
||||||
|
batchTimeoutMs: parseNumber(process.env.KAFKA_BATCH_TIMEOUT_MS, 50),
|
||||||
|
flushIntervalMs: parseNumber(process.env.KAFKA_FLUSH_INTERVAL_MS, 3000),
|
||||||
|
logMessages: process.env.KAFKA_LOG_MESSAGES === 'true',
|
||||||
|
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
|
||||||
|
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
|
||||||
|
username: process.env.KAFKA_SASL_USERNAME,
|
||||||
|
password: process.env.KAFKA_SASL_PASSWORD
|
||||||
|
} : undefined
|
||||||
|
},
|
||||||
|
db: {
|
||||||
|
host: process.env.POSTGRES_HOST_G5,
|
||||||
|
port: parseNumber(process.env.POSTGRES_PORT_G5, 5434),
|
||||||
|
user: process.env.POSTGRES_USER_G5,
|
||||||
|
password: process.env.POSTGRES_PASSWORD_G5,
|
||||||
|
database: process.env.POSTGRES_DATABASE_G5,
|
||||||
|
max: parseNumber(process.env.POSTGRES_MAX_CONNECTIONS_G5, 6),
|
||||||
|
ssl: process.env.POSTGRES_SSL_G5 === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||||
|
schema: process.env.DB_SCHEMA || 'rcu_info',
|
||||||
|
table: process.env.DB_TABLE || 'rcu_info_events_g5',
|
||||||
|
roomStatusSchema: process.env.DB_ROOM_STATUS_SCHEMA || 'room_status',
|
||||||
|
roomStatusTable: process.env.DB_ROOM_STATUS_TABLE || 'room_status_moment_g5'
|
||||||
|
},
|
||||||
|
redis: {
|
||||||
|
host: process.env.REDIS_HOST || 'localhost',
|
||||||
|
port: parseNumber(process.env.REDIS_PORT, 6379),
|
||||||
|
password: process.env.REDIS_PASSWORD || undefined,
|
||||||
|
db: parseNumber(process.env.REDIS_DB, 0),
|
||||||
|
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline',
|
||||||
|
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3001)}`
|
||||||
|
}
|
||||||
|
};
|
||||||
242
bls-register-backend/src/db/databaseManager.js
Normal file
242
bls-register-backend/src/db/databaseManager.js
Normal file
@@ -0,0 +1,242 @@
|
|||||||
|
import pg from 'pg';
|
||||||
|
import { config } from '../config/config.js';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { Pool } = pg;
|
||||||
|
|
||||||
|
const registerColumns = [
|
||||||
|
'ts_ms',
|
||||||
|
'hotel_id',
|
||||||
|
'room_id',
|
||||||
|
'device_id',
|
||||||
|
'write_ts_ms',
|
||||||
|
'is_send',
|
||||||
|
'udp_raw',
|
||||||
|
'extra',
|
||||||
|
'ip_type',
|
||||||
|
'model_num',
|
||||||
|
'server_ip',
|
||||||
|
'ip',
|
||||||
|
'subnet_mask',
|
||||||
|
'gateway',
|
||||||
|
'dns',
|
||||||
|
'app_version',
|
||||||
|
'rcu_time',
|
||||||
|
'launcher_version',
|
||||||
|
'mac',
|
||||||
|
'room_type_id',
|
||||||
|
'config_version',
|
||||||
|
'room_status',
|
||||||
|
'season',
|
||||||
|
'sys_lock_status',
|
||||||
|
'authorization_time',
|
||||||
|
'authorization_days',
|
||||||
|
'room_num_remark',
|
||||||
|
'room_type_remark',
|
||||||
|
'room_remark',
|
||||||
|
'mcu_name',
|
||||||
|
'central_control_name',
|
||||||
|
'configure_hotel_name',
|
||||||
|
'configure_room_type_name'
|
||||||
|
];
|
||||||
|
|
||||||
|
const roomStatusColumns = [
|
||||||
|
'hotel_id',
|
||||||
|
'room_id',
|
||||||
|
'app_version',
|
||||||
|
'launcher_version',
|
||||||
|
'config_version',
|
||||||
|
'upgrade_ts_ms',
|
||||||
|
'register_ts_ms'
|
||||||
|
];
|
||||||
|
|
||||||
|
export class DatabaseManager {
|
||||||
|
constructor(dbConfig) {
|
||||||
|
this.pool = new Pool({
|
||||||
|
host: dbConfig.host,
|
||||||
|
port: dbConfig.port,
|
||||||
|
user: dbConfig.user,
|
||||||
|
password: dbConfig.password,
|
||||||
|
database: dbConfig.database,
|
||||||
|
max: dbConfig.max,
|
||||||
|
ssl: dbConfig.ssl
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async insertRegisterRows({ schema, table, rows }) {
|
||||||
|
if (!rows || rows.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const statement = `
|
||||||
|
INSERT INTO ${schema}.${table} (${registerColumns.join(', ')})
|
||||||
|
SELECT *
|
||||||
|
FROM UNNEST(
|
||||||
|
$1::int8[],
|
||||||
|
$2::int2[],
|
||||||
|
$3::text[],
|
||||||
|
$4::text[],
|
||||||
|
$5::int8[],
|
||||||
|
$6::int2[],
|
||||||
|
$7::text[],
|
||||||
|
$8::jsonb[],
|
||||||
|
$9::int2[],
|
||||||
|
$10::text[],
|
||||||
|
$11::text[],
|
||||||
|
$12::text[],
|
||||||
|
$13::text[],
|
||||||
|
$14::text[],
|
||||||
|
$15::text[],
|
||||||
|
$16::text[],
|
||||||
|
$17::text[],
|
||||||
|
$18::text[],
|
||||||
|
$19::text[],
|
||||||
|
$20::int8[],
|
||||||
|
$21::text[],
|
||||||
|
$22::int4[],
|
||||||
|
$23::int4[],
|
||||||
|
$24::int4[],
|
||||||
|
$25::text[],
|
||||||
|
$26::text[],
|
||||||
|
$27::text[],
|
||||||
|
$28::text[],
|
||||||
|
$29::text[],
|
||||||
|
$30::text[],
|
||||||
|
$31::text[],
|
||||||
|
$32::text[],
|
||||||
|
$33::text[]
|
||||||
|
)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const params = registerColumns.map((column) => rows.map((row) => row[column] ?? null));
|
||||||
|
await this.pool.query(statement, params);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Register table insert failed', {
|
||||||
|
error: error?.message,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
rowsLength: rows.length
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async updateRoomStatusRows({ schema, table, rows }) {
|
||||||
|
if (!rows || rows.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const statement = `
|
||||||
|
WITH incoming AS (
|
||||||
|
SELECT *
|
||||||
|
FROM UNNEST(
|
||||||
|
$1::int2[],
|
||||||
|
$2::text[],
|
||||||
|
$3::text[],
|
||||||
|
$4::text[],
|
||||||
|
$5::text[],
|
||||||
|
$6::int8[],
|
||||||
|
$7::int8[]
|
||||||
|
) AS u(${roomStatusColumns.join(', ')})
|
||||||
|
), dedup AS (
|
||||||
|
SELECT DISTINCT ON (hotel_id, room_id)
|
||||||
|
hotel_id,
|
||||||
|
room_id,
|
||||||
|
app_version,
|
||||||
|
launcher_version,
|
||||||
|
config_version,
|
||||||
|
upgrade_ts_ms,
|
||||||
|
register_ts_ms
|
||||||
|
FROM incoming
|
||||||
|
ORDER BY hotel_id, room_id, register_ts_ms DESC
|
||||||
|
), existing AS (
|
||||||
|
SELECT i.*, t.device_id
|
||||||
|
FROM dedup i
|
||||||
|
INNER JOIN ${schema}.${table} t
|
||||||
|
ON t.hotel_id = i.hotel_id
|
||||||
|
AND t.room_id = i.room_id
|
||||||
|
)
|
||||||
|
INSERT INTO ${schema}.${table} (
|
||||||
|
hotel_id,
|
||||||
|
room_id,
|
||||||
|
device_id,
|
||||||
|
app_version,
|
||||||
|
launcher_version,
|
||||||
|
config_version,
|
||||||
|
upgrade_ts_ms,
|
||||||
|
register_ts_ms
|
||||||
|
)
|
||||||
|
SELECT
|
||||||
|
hotel_id,
|
||||||
|
room_id,
|
||||||
|
device_id,
|
||||||
|
app_version,
|
||||||
|
launcher_version,
|
||||||
|
config_version,
|
||||||
|
upgrade_ts_ms,
|
||||||
|
register_ts_ms
|
||||||
|
FROM existing
|
||||||
|
ON CONFLICT (hotel_id, room_id) DO UPDATE
|
||||||
|
SET
|
||||||
|
app_version = EXCLUDED.app_version,
|
||||||
|
launcher_version = EXCLUDED.launcher_version,
|
||||||
|
config_version = EXCLUDED.config_version,
|
||||||
|
upgrade_ts_ms = EXCLUDED.upgrade_ts_ms,
|
||||||
|
register_ts_ms = EXCLUDED.register_ts_ms
|
||||||
|
`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const params = roomStatusColumns.map((column) => rows.map((row) => row[column] ?? null));
|
||||||
|
await this.pool.query(statement, params);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Room status table update failed', {
|
||||||
|
error: error?.message,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
rowsLength: rows.length
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async checkConnection() {
|
||||||
|
let client;
|
||||||
|
try {
|
||||||
|
const connectPromise = this.pool.connect();
|
||||||
|
|
||||||
|
// Create a timeout promise that rejects after 5000ms
|
||||||
|
const timeoutPromise = new Promise((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Race the connection attempt against the timeout
|
||||||
|
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||||
|
} catch (raceError) {
|
||||||
|
// If we timed out, the connectPromise might still resolve later.
|
||||||
|
// We must ensure that if it does, the client is released back to the pool immediately.
|
||||||
|
connectPromise.then(c => c.release()).catch(() => {});
|
||||||
|
throw raceError;
|
||||||
|
}
|
||||||
|
|
||||||
|
await client.query('SELECT 1');
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('Database check connection failed', { error: err.message });
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
if (client) {
|
||||||
|
client.release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async close() {
|
||||||
|
await this.pool.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dbManager = new DatabaseManager(config.db);
|
||||||
|
export default dbManager;
|
||||||
121
bls-register-backend/src/db/g5DatabaseManager.js
Normal file
121
bls-register-backend/src/db/g5DatabaseManager.js
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
import pg from 'pg';
|
||||||
|
import { config } from '../config/config.js';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { Pool } = pg;
|
||||||
|
|
||||||
|
const g5Columns = [
|
||||||
|
'ts_ms',
|
||||||
|
'write_ts_ms',
|
||||||
|
'hotel_id',
|
||||||
|
'mac',
|
||||||
|
'device_id',
|
||||||
|
'room_id',
|
||||||
|
'ip',
|
||||||
|
'current_status',
|
||||||
|
'launcher_version',
|
||||||
|
'reboot_reason',
|
||||||
|
'record_source'
|
||||||
|
];
|
||||||
|
|
||||||
|
export class G5DatabaseManager {
|
||||||
|
constructor(dbConfig) {
|
||||||
|
if (!dbConfig.enabled) return;
|
||||||
|
this.pool = new Pool({
|
||||||
|
host: dbConfig.host,
|
||||||
|
port: dbConfig.port,
|
||||||
|
user: dbConfig.user,
|
||||||
|
password: dbConfig.password,
|
||||||
|
database: dbConfig.database,
|
||||||
|
max: dbConfig.max,
|
||||||
|
ssl: dbConfig.ssl
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async insertRows({ schema, table, rows }) {
|
||||||
|
if (!this.pool || !rows || rows.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const statement = `
|
||||||
|
INSERT INTO ${schema}.${table} (${g5Columns.join(', ')})
|
||||||
|
SELECT *
|
||||||
|
FROM UNNEST(
|
||||||
|
$1::int8[],
|
||||||
|
$2::int8[],
|
||||||
|
$3::int2[],
|
||||||
|
$4::text[],
|
||||||
|
$5::text[],
|
||||||
|
$6::text[],
|
||||||
|
$7::text[],
|
||||||
|
$8::int2[],
|
||||||
|
$9::text[],
|
||||||
|
$10::text[],
|
||||||
|
$11::text[]
|
||||||
|
)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const params = g5Columns.map((column) => {
|
||||||
|
return rows.map((row) => {
|
||||||
|
if (column === 'record_source') {
|
||||||
|
return 'CRICS';
|
||||||
|
}
|
||||||
|
if (column === 'current_status') {
|
||||||
|
// current_status in G5 is int2
|
||||||
|
if (row.current_status === 'on') return 1;
|
||||||
|
if (row.current_status === 'off') return 2;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return row[column] ?? null;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
await this.pool.query(statement, params);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('G5 Database insert failed', {
|
||||||
|
error: error?.message,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
rowsLength: rows.length
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async checkConnection() {
|
||||||
|
if (!this.pool) return true; // Pretend it's ok if disabled
|
||||||
|
let client;
|
||||||
|
try {
|
||||||
|
const connectPromise = this.pool.connect();
|
||||||
|
const timeoutPromise = new Promise((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||||
|
});
|
||||||
|
try {
|
||||||
|
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||||
|
} catch (raceError) {
|
||||||
|
connectPromise.then(c => c.release()).catch(() => { });
|
||||||
|
throw raceError;
|
||||||
|
}
|
||||||
|
await client.query('SELECT 1');
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('G5 Database check connection failed', { error: err.message });
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
if (client) {
|
||||||
|
client.release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async close() {
|
||||||
|
if (this.pool) {
|
||||||
|
await this.pool.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const g5DbManager = new G5DatabaseManager(config.g5db);
|
||||||
|
export default g5DbManager;
|
||||||
271
bls-register-backend/src/index.js
Normal file
271
bls-register-backend/src/index.js
Normal file
@@ -0,0 +1,271 @@
|
|||||||
|
import cron from 'node-cron';
|
||||||
|
import { config } from './config/config.js';
|
||||||
|
import dbManager from './db/databaseManager.js';
|
||||||
|
import { createKafkaConsumers } from './kafka/consumer.js';
|
||||||
|
import { parseMessageToRows } from './processor/index.js';
|
||||||
|
import { MetricCollector } from './utils/metricCollector.js';
|
||||||
|
import { logger } from './utils/logger.js';
|
||||||
|
|
||||||
|
const NETWORK_CODES = new Set([
|
||||||
|
'ECONNREFUSED',
|
||||||
|
'ECONNRESET',
|
||||||
|
'EPIPE',
|
||||||
|
'ETIMEDOUT',
|
||||||
|
'ENOTFOUND',
|
||||||
|
'EHOSTUNREACH',
|
||||||
|
'ENETUNREACH',
|
||||||
|
'57P03',
|
||||||
|
'08006',
|
||||||
|
'08001',
|
||||||
|
'08000',
|
||||||
|
'08003'
|
||||||
|
]);
|
||||||
|
|
||||||
|
const isDbConnectionError = (err) => {
|
||||||
|
if (typeof err?.code === 'string' && NETWORK_CODES.has(err.code)) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
const message = typeof err?.message === 'string' ? err.message.toLowerCase() : '';
|
||||||
|
return (
|
||||||
|
message.includes('connection timeout') ||
|
||||||
|
message.includes('connection terminated') ||
|
||||||
|
message.includes('connection refused') ||
|
||||||
|
message.includes('terminating connection') ||
|
||||||
|
message.includes('econnrefused') ||
|
||||||
|
message.includes('econnreset') ||
|
||||||
|
message.includes('etimedout') ||
|
||||||
|
message.includes('could not connect') ||
|
||||||
|
message.includes('the database system is starting up') ||
|
||||||
|
message.includes('no pg_hba.conf entry')
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const sleep = (ms) => new Promise((resolve) => setTimeout(resolve, ms));
|
||||||
|
|
||||||
|
const bootstrap = async () => {
|
||||||
|
logger.info('Starting register consumer', {
|
||||||
|
env: config.env,
|
||||||
|
kafka: {
|
||||||
|
brokers: config.kafka.brokers,
|
||||||
|
topic: config.kafka.topic,
|
||||||
|
groupId: config.kafka.groupId
|
||||||
|
},
|
||||||
|
db: {
|
||||||
|
host: config.db.host,
|
||||||
|
port: config.db.port,
|
||||||
|
database: config.db.database,
|
||||||
|
schema: config.db.schema,
|
||||||
|
table: config.db.table,
|
||||||
|
roomStatusSchema: config.db.roomStatusSchema,
|
||||||
|
roomStatusTable: config.db.roomStatusTable
|
||||||
|
},
|
||||||
|
flushIntervalMs: config.kafka.flushIntervalMs
|
||||||
|
});
|
||||||
|
|
||||||
|
const metricCollector = new MetricCollector();
|
||||||
|
const totals = {
|
||||||
|
kafkaPulled: 0,
|
||||||
|
dbInserted: 0,
|
||||||
|
parseError: 0,
|
||||||
|
dbFailed: 0
|
||||||
|
};
|
||||||
|
const flushIntervalMs = Math.max(3000, Number.isFinite(config.kafka.flushIntervalMs) ? config.kafka.flushIntervalMs : 3000);
|
||||||
|
|
||||||
|
const queue = [];
|
||||||
|
let flushTimer = null;
|
||||||
|
let flushing = false;
|
||||||
|
const runCounterTimer = setInterval(() => {
|
||||||
|
logger.info('Run counters', {
|
||||||
|
kafkaPulled: totals.kafkaPulled,
|
||||||
|
dbInserted: totals.dbInserted,
|
||||||
|
parseError: totals.parseError,
|
||||||
|
dbFailed: totals.dbFailed
|
||||||
|
});
|
||||||
|
}, 10000);
|
||||||
|
|
||||||
|
const handleError = (error, message) => {
|
||||||
|
logger.error('Kafka processing error', {
|
||||||
|
error: error?.message,
|
||||||
|
type: error?.type,
|
||||||
|
topic: message?.topic,
|
||||||
|
partition: message?.partition,
|
||||||
|
offset: message?.offset
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
cron.schedule('* * * * *', () => {
|
||||||
|
const metrics = metricCollector.getAndReset();
|
||||||
|
const flushAvgMs = metrics.batch_flush_count > 0
|
||||||
|
? (metrics.batch_flush_ms_sum / metrics.batch_flush_count).toFixed(1)
|
||||||
|
: '0.0';
|
||||||
|
const dbAvgMs = metrics.db_insert_count > 0
|
||||||
|
? (metrics.db_insert_ms_sum / metrics.db_insert_count).toFixed(1)
|
||||||
|
: '0.0';
|
||||||
|
logger.info('Minute metrics', {
|
||||||
|
kafkaPulled: metrics.kafka_pulled,
|
||||||
|
parseError: metrics.parse_error,
|
||||||
|
dbInserted: metrics.db_inserted,
|
||||||
|
dbFailed: metrics.db_failed,
|
||||||
|
flushAvgMs,
|
||||||
|
dbAvgMs
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
const processValidRowsWithRetry = async (registerRows, roomStatusRows) => {
|
||||||
|
const startedAt = Date.now();
|
||||||
|
while (true) {
|
||||||
|
try {
|
||||||
|
await dbManager.insertRegisterRows({
|
||||||
|
schema: config.db.schema,
|
||||||
|
table: config.db.table,
|
||||||
|
rows: registerRows
|
||||||
|
});
|
||||||
|
await dbManager.updateRoomStatusRows({
|
||||||
|
schema: config.db.roomStatusSchema,
|
||||||
|
table: config.db.roomStatusTable,
|
||||||
|
rows: roomStatusRows
|
||||||
|
});
|
||||||
|
|
||||||
|
metricCollector.increment('db_insert_count', 1);
|
||||||
|
metricCollector.increment('db_insert_ms_sum', Date.now() - startedAt);
|
||||||
|
metricCollector.increment('db_inserted', registerRows.length);
|
||||||
|
totals.dbInserted += registerRows.length;
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
if (!isDbConnectionError(err)) {
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
logger.warn('Database unavailable, retrying in 5s', { error: err?.message });
|
||||||
|
await sleep(5000);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const scheduleFlush = () => {
|
||||||
|
if (flushTimer) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
flushTimer = setTimeout(() => {
|
||||||
|
flushTimer = null;
|
||||||
|
void flushQueue();
|
||||||
|
}, flushIntervalMs);
|
||||||
|
};
|
||||||
|
|
||||||
|
const flushQueue = async () => {
|
||||||
|
if (flushing) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (queue.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
flushing = true;
|
||||||
|
const startedAt = Date.now();
|
||||||
|
const currentBatch = queue.splice(0, queue.length);
|
||||||
|
|
||||||
|
const parsedItems = [];
|
||||||
|
for (const item of currentBatch) {
|
||||||
|
try {
|
||||||
|
const parsed = parseMessageToRows(item.message);
|
||||||
|
parsedItems.push({ item, parsed });
|
||||||
|
} catch (err) {
|
||||||
|
metricCollector.increment('parse_error');
|
||||||
|
totals.parseError += 1;
|
||||||
|
handleError(err, item.message);
|
||||||
|
item.resolve();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const insertParsedItems = async (items) => {
|
||||||
|
if (items.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const registerRows = items.flatMap((it) => it.parsed.registerRows);
|
||||||
|
const roomStatusRows = items.flatMap((it) => it.parsed.roomStatusRows);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await processValidRowsWithRetry(registerRows, roomStatusRows);
|
||||||
|
} catch (err) {
|
||||||
|
if (items.length > 1) {
|
||||||
|
const mid = Math.floor(items.length / 2);
|
||||||
|
await insertParsedItems(items.slice(0, mid));
|
||||||
|
await insertParsedItems(items.slice(mid));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
metricCollector.increment('db_failed', 1);
|
||||||
|
totals.dbFailed += 1;
|
||||||
|
handleError(err, items[0].item.message);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if (parsedItems.length > 0) {
|
||||||
|
await insertParsedItems(parsedItems);
|
||||||
|
|
||||||
|
for (const parsedItem of parsedItems) {
|
||||||
|
parsedItem.item.resolve();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metricCollector.increment('batch_flush_count', 1);
|
||||||
|
metricCollector.increment('batch_flush_ms_sum', Date.now() - startedAt);
|
||||||
|
|
||||||
|
flushing = false;
|
||||||
|
|
||||||
|
if (queue.length > 0) {
|
||||||
|
scheduleFlush();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleMessage = (message) => {
|
||||||
|
metricCollector.increment('kafka_pulled');
|
||||||
|
totals.kafkaPulled += 1;
|
||||||
|
return new Promise((resolve) => {
|
||||||
|
queue.push({ message, resolve });
|
||||||
|
scheduleFlush();
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const consumers = createKafkaConsumers({
|
||||||
|
kafkaConfig: config.kafka,
|
||||||
|
onMessage: handleMessage,
|
||||||
|
onError: handleError
|
||||||
|
});
|
||||||
|
|
||||||
|
const shutdown = async (signal) => {
|
||||||
|
logger.info(`Received ${signal}, shutting down...`);
|
||||||
|
try {
|
||||||
|
if (flushTimer) {
|
||||||
|
clearTimeout(flushTimer);
|
||||||
|
flushTimer = null;
|
||||||
|
}
|
||||||
|
clearInterval(runCounterTimer);
|
||||||
|
await flushQueue();
|
||||||
|
|
||||||
|
if (consumers && consumers.length > 0) {
|
||||||
|
await Promise.all(consumers.map((consumer) => new Promise((resolve) => consumer.close(true, resolve))));
|
||||||
|
}
|
||||||
|
|
||||||
|
await dbManager.close();
|
||||||
|
logger.info('Run summary', {
|
||||||
|
kafkaPulled: totals.kafkaPulled,
|
||||||
|
dbInserted: totals.dbInserted,
|
||||||
|
parseError: totals.parseError,
|
||||||
|
dbFailed: totals.dbFailed
|
||||||
|
});
|
||||||
|
process.exit(0);
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('Error during shutdown', { error: err?.message });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||||
|
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||||
|
};
|
||||||
|
|
||||||
|
bootstrap().catch((error) => {
|
||||||
|
logger.error('Service bootstrap failed', { error: error?.message });
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
175
bls-register-backend/src/kafka/consumer.js
Normal file
175
bls-register-backend/src/kafka/consumer.js
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
import kafka from 'kafka-node';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { ConsumerGroup } = kafka;
|
||||||
|
|
||||||
|
import { OffsetTracker } from './offsetTracker.js';
|
||||||
|
|
||||||
|
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
|
||||||
|
const kafkaHost = kafkaConfig.brokers.join(',');
|
||||||
|
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
|
||||||
|
const id = `${clientId}-${process.pid}-${Date.now()}`;
|
||||||
|
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 5000;
|
||||||
|
const commitIntervalMs = Number.isFinite(kafkaConfig.commitIntervalMs) ? kafkaConfig.commitIntervalMs : 200;
|
||||||
|
let inFlight = 0;
|
||||||
|
|
||||||
|
const tracker = new OffsetTracker();
|
||||||
|
let pendingCommits = new Map(); // key: `${topic}-${partition}` -> { topic, partition, offset }
|
||||||
|
let commitTimer = null;
|
||||||
|
|
||||||
|
const flushCommits = () => {
|
||||||
|
if (pendingCommits.size === 0) return;
|
||||||
|
const batch = pendingCommits;
|
||||||
|
pendingCommits = new Map();
|
||||||
|
|
||||||
|
consumer.sendOffsetCommitRequest(
|
||||||
|
Array.from(batch.values()),
|
||||||
|
(err) => {
|
||||||
|
if (err) {
|
||||||
|
for (const [k, v] of batch.entries()) {
|
||||||
|
pendingCommits.set(k, v);
|
||||||
|
}
|
||||||
|
logger.error('Kafka commit failed', { error: err?.message, count: batch.size });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const scheduleCommitFlush = () => {
|
||||||
|
if (commitTimer) return;
|
||||||
|
commitTimer = setTimeout(() => {
|
||||||
|
commitTimer = null;
|
||||||
|
flushCommits();
|
||||||
|
}, commitIntervalMs);
|
||||||
|
};
|
||||||
|
|
||||||
|
const consumer = new ConsumerGroup(
|
||||||
|
{
|
||||||
|
kafkaHost,
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId,
|
||||||
|
id,
|
||||||
|
fromOffset: kafkaConfig.fromOffset || 'latest',
|
||||||
|
protocol: ['roundrobin'],
|
||||||
|
outOfRangeOffset: 'latest',
|
||||||
|
autoCommit: false,
|
||||||
|
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
|
||||||
|
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
|
||||||
|
fetchMinBytes: kafkaConfig.fetchMinBytes,
|
||||||
|
fetchMaxWaitMs: kafkaConfig.fetchMaxWaitMs,
|
||||||
|
sasl: kafkaConfig.sasl
|
||||||
|
},
|
||||||
|
kafkaConfig.topic
|
||||||
|
);
|
||||||
|
|
||||||
|
const tryResume = () => {
|
||||||
|
if (inFlight < maxInFlight && consumer.paused) {
|
||||||
|
consumer.resume();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
consumer.on('message', (message) => {
|
||||||
|
inFlight += 1;
|
||||||
|
tracker.add(message.topic, message.partition, message.offset);
|
||||||
|
|
||||||
|
if (inFlight >= maxInFlight) {
|
||||||
|
consumer.pause();
|
||||||
|
}
|
||||||
|
Promise.resolve(onMessage(message))
|
||||||
|
.then(() => {})
|
||||||
|
.catch((error) => {
|
||||||
|
logger.error('Kafka message handling failed', { error: error?.message });
|
||||||
|
if (onError) {
|
||||||
|
onError(error, message);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
const commitOffset = tracker.markDone(message.topic, message.partition, message.offset);
|
||||||
|
if (commitOffset !== null) {
|
||||||
|
const key = `${message.topic}-${message.partition}`;
|
||||||
|
pendingCommits.set(key, {
|
||||||
|
topic: message.topic,
|
||||||
|
partition: message.partition,
|
||||||
|
offset: commitOffset,
|
||||||
|
metadata: 'm'
|
||||||
|
});
|
||||||
|
scheduleCommitFlush();
|
||||||
|
}
|
||||||
|
inFlight -= 1;
|
||||||
|
tryResume();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('error', (error) => {
|
||||||
|
logger.error('Kafka consumer error', { error: error?.message });
|
||||||
|
if (onError) {
|
||||||
|
onError(error);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('connect', () => {
|
||||||
|
logger.info(`Kafka Consumer connected`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('rebalancing', () => {
|
||||||
|
logger.info(`Kafka Consumer rebalancing`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
tracker.clear();
|
||||||
|
pendingCommits.clear();
|
||||||
|
if (commitTimer) {
|
||||||
|
clearTimeout(commitTimer);
|
||||||
|
commitTimer = null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('rebalanced', () => {
|
||||||
|
logger.info('Kafka Consumer rebalanced', { clientId, groupId: kafkaConfig.groupId });
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('error', (err) => {
|
||||||
|
logger.error('Kafka Consumer Error', { error: err.message });
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('offsetOutOfRange', (err) => {
|
||||||
|
logger.warn('Offset out of range', { error: err.message, topic: err.topic, partition: err.partition });
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
consumer.on('offsetOutOfRange', (error) => {
|
||||||
|
logger.warn(`Kafka Consumer offset out of range`, {
|
||||||
|
error: error?.message,
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('close', () => {
|
||||||
|
if (commitTimer) {
|
||||||
|
clearTimeout(commitTimer);
|
||||||
|
commitTimer = null;
|
||||||
|
}
|
||||||
|
flushCommits();
|
||||||
|
logger.warn(`Kafka Consumer closed`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return consumer;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
|
||||||
|
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
|
||||||
|
const count = Math.max(1, instances);
|
||||||
|
return Array.from({ length: count }, (_, idx) =>
|
||||||
|
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) =>
|
||||||
|
createKafkaConsumers({ kafkaConfig, onMessage, onError })[0];
|
||||||
53
bls-register-backend/src/kafka/offsetTracker.js
Normal file
53
bls-register-backend/src/kafka/offsetTracker.js
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
export class OffsetTracker {
|
||||||
|
constructor() {
|
||||||
|
// Map<topic-partition, { nextCommitOffset: number|null, done: Set<number> }>
|
||||||
|
this.partitions = new Map();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when a message is received (before processing)
|
||||||
|
add(topic, partition, offset) {
|
||||||
|
const key = `${topic}-${partition}`;
|
||||||
|
if (!this.partitions.has(key)) {
|
||||||
|
this.partitions.set(key, { nextCommitOffset: null, done: new Set() });
|
||||||
|
}
|
||||||
|
const state = this.partitions.get(key);
|
||||||
|
const numericOffset = Number(offset);
|
||||||
|
if (!Number.isFinite(numericOffset)) return;
|
||||||
|
if (state.nextCommitOffset === null) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
} else if (numericOffset < state.nextCommitOffset) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when a message is successfully processed
|
||||||
|
// Returns the next offset to commit (if any advancement is possible), or null
|
||||||
|
markDone(topic, partition, offset) {
|
||||||
|
const key = `${topic}-${partition}`;
|
||||||
|
const state = this.partitions.get(key);
|
||||||
|
if (!state) return null;
|
||||||
|
|
||||||
|
const numericOffset = Number(offset);
|
||||||
|
if (!Number.isFinite(numericOffset)) return null;
|
||||||
|
|
||||||
|
state.done.add(numericOffset);
|
||||||
|
|
||||||
|
if (state.nextCommitOffset === null) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
let advanced = false;
|
||||||
|
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
|
||||||
|
state.done.delete(state.nextCommitOffset);
|
||||||
|
state.nextCommitOffset += 1;
|
||||||
|
advanced = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!advanced) return null;
|
||||||
|
return state.nextCommitOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
clear() {
|
||||||
|
this.partitions.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
288
bls-register-backend/src/processor/index.js
Normal file
288
bls-register-backend/src/processor/index.js
Normal file
@@ -0,0 +1,288 @@
|
|||||||
|
import { kafkaPayloadSchema } from '../schema/kafkaPayload.js';
|
||||||
|
|
||||||
|
const parseKafkaPayload = (value) => {
|
||||||
|
const raw = Buffer.isBuffer(value) ? value.toString('utf8') : value;
|
||||||
|
if (typeof raw !== 'string') {
|
||||||
|
throw new Error('Invalid kafka message value');
|
||||||
|
}
|
||||||
|
return JSON.parse(raw);
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeText = (value, maxLength) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const str = String(value).replace(/\u0000/g, '');
|
||||||
|
if (maxLength && str.length > maxLength) {
|
||||||
|
return str.substring(0, maxLength);
|
||||||
|
}
|
||||||
|
return str;
|
||||||
|
};
|
||||||
|
|
||||||
|
const sanitizeJsonValue = (value) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
return value.replace(/\u0000/g, '');
|
||||||
|
}
|
||||||
|
if (Array.isArray(value)) {
|
||||||
|
return value.map((item) => sanitizeJsonValue(item));
|
||||||
|
}
|
||||||
|
if (typeof value === 'object') {
|
||||||
|
const out = {};
|
||||||
|
for (const [k, v] of Object.entries(value)) {
|
||||||
|
out[k] = sanitizeJsonValue(v);
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
};
|
||||||
|
|
||||||
|
const isLikelyBase64 = (text) => {
|
||||||
|
if (!text || text.length % 4 !== 0) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
return /^[A-Za-z0-9+/]+={0,2}$/.test(text);
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeInteger = (value) => {
|
||||||
|
if (value === undefined || value === null || value === '') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const numeric = typeof value === 'number' ? value : Number(value);
|
||||||
|
if (!Number.isFinite(numeric)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
return Math.trunc(numeric);
|
||||||
|
};
|
||||||
|
|
||||||
|
const inRangeOr = (value, min, max, fallback) => {
|
||||||
|
if (typeof value !== 'number' || Number.isNaN(value) || value < min || value > max) {
|
||||||
|
return fallback;
|
||||||
|
}
|
||||||
|
return value;
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeTsMs = (value) => {
|
||||||
|
const numeric = normalizeInteger(value);
|
||||||
|
if (numeric === null) {
|
||||||
|
return Date.now();
|
||||||
|
}
|
||||||
|
if (numeric > 0 && numeric < 100000000000) {
|
||||||
|
return numeric * 1000;
|
||||||
|
}
|
||||||
|
return numeric;
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeUdpRaw = (value) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
const text = value.replace(/\u0000/g, '');
|
||||||
|
if (isLikelyBase64(text)) {
|
||||||
|
return text;
|
||||||
|
}
|
||||||
|
return Buffer.from(text, 'utf8').toString('base64');
|
||||||
|
}
|
||||||
|
if (Buffer.isBuffer(value)) {
|
||||||
|
return value.toString('base64');
|
||||||
|
}
|
||||||
|
if (Array.isArray(value)) {
|
||||||
|
return Buffer.from(value).toString('base64');
|
||||||
|
}
|
||||||
|
return Buffer.from(String(value), 'utf8').toString('base64');
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeExtra = (value) => {
|
||||||
|
if (value === undefined || value === null || value === '') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (typeof value === 'object') {
|
||||||
|
return sanitizeJsonValue(value);
|
||||||
|
}
|
||||||
|
if (typeof value === 'string') {
|
||||||
|
try {
|
||||||
|
const parsed = JSON.parse(value);
|
||||||
|
if (parsed && typeof parsed === 'object') {
|
||||||
|
return sanitizeJsonValue(parsed);
|
||||||
|
}
|
||||||
|
return sanitizeJsonValue({ value: parsed });
|
||||||
|
} catch {
|
||||||
|
return sanitizeJsonValue({ raw: value });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return sanitizeJsonValue({ raw: String(value) });
|
||||||
|
};
|
||||||
|
|
||||||
|
const pick = (payload, snakeKey, pascalKey) => {
|
||||||
|
if (payload[snakeKey] !== undefined) {
|
||||||
|
return payload[snakeKey];
|
||||||
|
}
|
||||||
|
if (payload[pascalKey] !== undefined) {
|
||||||
|
return payload[pascalKey];
|
||||||
|
}
|
||||||
|
return undefined;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildRowsFromMessageValue = (value) => {
|
||||||
|
const payload = parseKafkaPayload(value);
|
||||||
|
return buildRowsFromPayload(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildRowsFromPayload = (rawPayload) => {
|
||||||
|
const normalizedInput = {
|
||||||
|
ts_ms: pick(rawPayload, 'ts_ms', 'ts_ms'),
|
||||||
|
upgrade_ts_ms: pick(rawPayload, 'upgrade_ts_ms', 'upgrade_ts_ms'),
|
||||||
|
hotel_id: pick(rawPayload, 'hotel_id', 'hotel_id'),
|
||||||
|
room_id: pick(rawPayload, 'room_id', 'room_id'),
|
||||||
|
device_id: pick(rawPayload, 'device_id', 'device_id'),
|
||||||
|
is_send: pick(rawPayload, 'is_send', 'is_send'),
|
||||||
|
udp_raw: pick(rawPayload, 'udp_raw', 'udp_raw'),
|
||||||
|
extra: pick(rawPayload, 'extra', 'extra'),
|
||||||
|
ip_type: pick(rawPayload, 'ip_type', 'ip_type'),
|
||||||
|
model_num: pick(rawPayload, 'model_num', 'model_num'),
|
||||||
|
server_ip: pick(rawPayload, 'server_ip', 'server_ip'),
|
||||||
|
ip: pick(rawPayload, 'ip', 'ip'),
|
||||||
|
subnet_mask: pick(rawPayload, 'subnet_mask', 'subnet_mask'),
|
||||||
|
gateway: pick(rawPayload, 'gateway', 'gateway'),
|
||||||
|
dns: pick(rawPayload, 'dns', 'dns'),
|
||||||
|
app_version: pick(rawPayload, 'app_version', 'app_version'),
|
||||||
|
rcu_time: pick(rawPayload, 'rcu_time', 'rcu_time'),
|
||||||
|
launcher_version: pick(rawPayload, 'launcher_version', 'launcher_version'),
|
||||||
|
mac: pick(rawPayload, 'mac', 'mac'),
|
||||||
|
room_type_id: pick(rawPayload, 'room_type_id', 'room_type_id'),
|
||||||
|
config_version: pick(rawPayload, 'config_version', 'config_version'),
|
||||||
|
room_status: pick(rawPayload, 'room_status', 'room_status'),
|
||||||
|
season: pick(rawPayload, 'season', 'season'),
|
||||||
|
sys_lock_status: pick(rawPayload, 'sys_lock_status', 'sys_lock_status'),
|
||||||
|
authorization_time: pick(rawPayload, 'authorization_time', 'authorization_time'),
|
||||||
|
authorization_days: pick(rawPayload, 'authorization_days', 'authorization_days'),
|
||||||
|
room_num_remark: pick(rawPayload, 'room_num_remark', 'room_num_remark'),
|
||||||
|
room_type_remark: pick(rawPayload, 'room_type_remark', 'room_type_remark'),
|
||||||
|
room_remark: pick(rawPayload, 'room_remark', 'room_remark'),
|
||||||
|
mcu_name: pick(rawPayload, 'mcu_name', 'mcu_name'),
|
||||||
|
central_control_name: pick(rawPayload, 'central_control_name', 'central_control_name'),
|
||||||
|
configure_hotel_name: pick(rawPayload, 'configure_hotel_name', 'configure_hotel_name'),
|
||||||
|
configure_room_type_name: pick(rawPayload, 'configure_room_type_name', 'configure_room_type_name')
|
||||||
|
};
|
||||||
|
|
||||||
|
const payload = kafkaPayloadSchema.parse(normalizedInput);
|
||||||
|
|
||||||
|
const tsMs = normalizeTsMs(payload.ts_ms);
|
||||||
|
const hotelId = inRangeOr(normalizeInteger(payload.hotel_id), -32768, 32767, 0);
|
||||||
|
const roomId = normalizeText(payload.room_id, 50) || '';
|
||||||
|
|
||||||
|
const registerRow = {
|
||||||
|
ts_ms: tsMs,
|
||||||
|
hotel_id: hotelId,
|
||||||
|
room_id: roomId,
|
||||||
|
device_id: normalizeText(payload.device_id, 64),
|
||||||
|
write_ts_ms: Date.now(),
|
||||||
|
is_send: inRangeOr(normalizeInteger(payload.is_send), -32768, 32767, 0),
|
||||||
|
udp_raw: normalizeUdpRaw(payload.udp_raw),
|
||||||
|
extra: normalizeExtra(payload.extra),
|
||||||
|
ip_type: inRangeOr(normalizeInteger(payload.ip_type), -32768, 32767, null),
|
||||||
|
model_num: normalizeText(payload.model_num, 32),
|
||||||
|
server_ip: normalizeText(payload.server_ip, 21),
|
||||||
|
ip: normalizeText(payload.ip, 21),
|
||||||
|
subnet_mask: normalizeText(payload.subnet_mask, 15),
|
||||||
|
gateway: normalizeText(payload.gateway, 15),
|
||||||
|
dns: normalizeText(payload.dns, 15),
|
||||||
|
app_version: normalizeText(payload.app_version, 64),
|
||||||
|
rcu_time: normalizeText(payload.rcu_time, 25),
|
||||||
|
launcher_version: normalizeText(payload.launcher_version, 64),
|
||||||
|
mac: normalizeText(payload.mac, 17),
|
||||||
|
room_type_id: normalizeInteger(payload.room_type_id),
|
||||||
|
config_version: normalizeText(payload.config_version, 32),
|
||||||
|
room_status: inRangeOr(normalizeInteger(payload.room_status), -2147483648, 2147483647, null),
|
||||||
|
season: inRangeOr(normalizeInteger(payload.season), -2147483648, 2147483647, null),
|
||||||
|
sys_lock_status: inRangeOr(normalizeInteger(payload.sys_lock_status), -2147483648, 2147483647, null),
|
||||||
|
authorization_time: normalizeText(payload.authorization_time, 10),
|
||||||
|
authorization_days: normalizeText(payload.authorization_days, 10),
|
||||||
|
room_num_remark: normalizeText(payload.room_num_remark, 255),
|
||||||
|
room_type_remark: normalizeText(payload.room_type_remark, 64),
|
||||||
|
room_remark: normalizeText(payload.room_remark, 64),
|
||||||
|
mcu_name: normalizeText(payload.mcu_name, 255),
|
||||||
|
central_control_name: normalizeText(payload.central_control_name, 255),
|
||||||
|
configure_hotel_name: normalizeText(payload.configure_hotel_name, 255),
|
||||||
|
configure_room_type_name: normalizeText(payload.configure_room_type_name, 255)
|
||||||
|
};
|
||||||
|
|
||||||
|
const roomStatusUpdateRow = {
|
||||||
|
hotel_id: hotelId,
|
||||||
|
room_id: roomId,
|
||||||
|
app_version: registerRow.app_version,
|
||||||
|
launcher_version: registerRow.launcher_version,
|
||||||
|
config_version: registerRow.config_version,
|
||||||
|
upgrade_ts_ms: normalizeTsMs(payload.upgrade_ts_ms),
|
||||||
|
register_ts_ms: tsMs
|
||||||
|
};
|
||||||
|
|
||||||
|
return {
|
||||||
|
registerRows: [registerRow],
|
||||||
|
roomStatusRows: [roomStatusUpdateRow]
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parseMessageToRows = (message) => {
|
||||||
|
const rawValue = message.value.toString();
|
||||||
|
// logger.info('Processing message', { offset: message.offset, rawValuePreview: rawValue.substring(0, 100) });
|
||||||
|
|
||||||
|
let payload;
|
||||||
|
try {
|
||||||
|
payload = JSON.parse(rawValue);
|
||||||
|
} catch (e) {
|
||||||
|
const error = new Error(`JSON Parse Error: ${e.message}`);
|
||||||
|
error.type = 'PARSE_ERROR';
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// logger.info('Payload parsed', { payload });
|
||||||
|
|
||||||
|
const validationResult = kafkaPayloadSchema.safeParse(payload);
|
||||||
|
|
||||||
|
if (!validationResult.success) {
|
||||||
|
const error = new Error(`Schema Validation Failed: ${JSON.stringify(validationResult.error.errors)}`);
|
||||||
|
error.type = 'VALIDATION_ERROR';
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return buildRowsFromPayload(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const processKafkaMessage = async ({ message, dbManager, config }) => {
|
||||||
|
let rows;
|
||||||
|
try {
|
||||||
|
rows = parseMessageToRows(message);
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await dbManager.insertRegisterRows({ schema: config.db.schema, table: config.db.table, rows: rows.registerRows });
|
||||||
|
await dbManager.updateRoomStatusRows({
|
||||||
|
schema: config.db.roomStatusSchema,
|
||||||
|
table: config.db.roomStatusTable,
|
||||||
|
rows: rows.roomStatusRows
|
||||||
|
});
|
||||||
|
} catch (error) {
|
||||||
|
error.type = 'DB_ERROR';
|
||||||
|
const sample = rows?.registerRows?.[0];
|
||||||
|
error.dbContext = {
|
||||||
|
rowsLength: rows?.registerRows?.length || 0,
|
||||||
|
sampleRow: sample
|
||||||
|
? {
|
||||||
|
ts_ms: sample.ts_ms,
|
||||||
|
hotel_id: sample.hotel_id,
|
||||||
|
device_id: sample.device_id,
|
||||||
|
room_id: sample.room_id
|
||||||
|
}
|
||||||
|
: null
|
||||||
|
};
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows.registerRows.length;
|
||||||
|
};
|
||||||
83
bls-register-backend/src/processor/udpParser.js
Normal file
83
bls-register-backend/src/processor/udpParser.js
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
const normalizeHex = (hex) => {
|
||||||
|
if (typeof hex !== 'string') {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
let cleaned = hex.trim().replace(/^0x/i, '').replace(/\s+/g, '');
|
||||||
|
if (cleaned.length % 2 === 1) {
|
||||||
|
cleaned = `0${cleaned}`;
|
||||||
|
}
|
||||||
|
return cleaned;
|
||||||
|
};
|
||||||
|
|
||||||
|
const toHex = (value) => `0x${value.toString(16).padStart(2, '0')}`;
|
||||||
|
|
||||||
|
const readUInt16 = (buffer, offset) => buffer.readUInt16BE(offset);
|
||||||
|
|
||||||
|
export const parse0x36 = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const sysLockStatus = buffer.length > 0 ? buffer[0] : null;
|
||||||
|
const reportCount = buffer.length > 7 ? buffer[7] : null;
|
||||||
|
let offset = 8;
|
||||||
|
const devices = [];
|
||||||
|
for (let i = 0; i < (reportCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
devices.push({
|
||||||
|
dev_type: buffer[offset],
|
||||||
|
dev_addr: buffer[offset + 1],
|
||||||
|
dev_loop: readUInt16(buffer, offset + 2),
|
||||||
|
dev_data: readUInt16(buffer, offset + 4)
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
const faultCount = offset < buffer.length ? buffer[offset] : null;
|
||||||
|
offset += 1;
|
||||||
|
const faults = [];
|
||||||
|
for (let i = 0; i < (faultCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
faults.push({
|
||||||
|
fault_dev_type: buffer[offset],
|
||||||
|
fault_dev_addr: buffer[offset + 1],
|
||||||
|
fault_dev_loop: readUInt16(buffer, offset + 2),
|
||||||
|
error_type: buffer[offset + 4],
|
||||||
|
error_data: buffer[offset + 5]
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
sysLockStatus,
|
||||||
|
reportCount,
|
||||||
|
faultCount,
|
||||||
|
devices,
|
||||||
|
faults
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parse0x0fDownlink = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const controlCount = buffer.length > 0 ? buffer[0] : null;
|
||||||
|
let offset = 1;
|
||||||
|
const controlParams = [];
|
||||||
|
for (let i = 0; i < (controlCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
const typeValue = readUInt16(buffer, offset + 4);
|
||||||
|
controlParams.push({
|
||||||
|
dev_type: buffer[offset],
|
||||||
|
dev_addr: buffer[offset + 1],
|
||||||
|
loop: readUInt16(buffer, offset + 2),
|
||||||
|
type: typeValue,
|
||||||
|
type_l: buffer[offset + 4],
|
||||||
|
type_h: buffer[offset + 5]
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
controlCount,
|
||||||
|
controlParams
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parse0x0fAck = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const ackCode = buffer.length > 1 ? toHex(buffer[1]) : null;
|
||||||
|
return { ackCode };
|
||||||
|
};
|
||||||
53
bls-register-backend/src/redis/errorQueue.js
Normal file
53
bls-register-backend/src/redis/errorQueue.js
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
export const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
|
||||||
|
|
||||||
|
export const enqueueError = async (client, queueKey, payload) => {
|
||||||
|
try {
|
||||||
|
await client.rPush(queueKey, JSON.stringify(payload));
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Redis enqueue error failed', { error: error?.message });
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const startErrorRetryWorker = async ({
|
||||||
|
client,
|
||||||
|
queueKey,
|
||||||
|
handler,
|
||||||
|
redisIntegration,
|
||||||
|
maxAttempts = 5
|
||||||
|
}) => {
|
||||||
|
while (true) {
|
||||||
|
const result = await client.blPop(queueKey, 0);
|
||||||
|
const raw = result?.element;
|
||||||
|
if (!raw) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let item;
|
||||||
|
try {
|
||||||
|
item = JSON.parse(raw);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Invalid error payload', { error: error?.message });
|
||||||
|
await redisIntegration.error('Invalid error payload', { module: 'redis', stack: error?.message });
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const attempts = item.attempts || 0;
|
||||||
|
try {
|
||||||
|
await handler(item);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Retry handler failed', { error: error?.message, stack: error?.stack });
|
||||||
|
const nextPayload = {
|
||||||
|
...item,
|
||||||
|
attempts: attempts + 1,
|
||||||
|
lastError: error?.message,
|
||||||
|
lastAttemptAt: Date.now()
|
||||||
|
};
|
||||||
|
if (nextPayload.attempts >= maxAttempts) {
|
||||||
|
await redisIntegration.error('Retry attempts exceeded', { module: 'retry', stack: JSON.stringify(nextPayload) });
|
||||||
|
} else {
|
||||||
|
await enqueueError(client, queueKey, nextPayload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
14
bls-register-backend/src/redis/redisClient.js
Normal file
14
bls-register-backend/src/redis/redisClient.js
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import { createClient } from 'redis';
|
||||||
|
|
||||||
|
export const createRedisClient = async (config) => {
|
||||||
|
const client = createClient({
|
||||||
|
socket: {
|
||||||
|
host: config.host,
|
||||||
|
port: config.port
|
||||||
|
},
|
||||||
|
password: config.password,
|
||||||
|
database: config.db
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
return client;
|
||||||
|
};
|
||||||
40
bls-register-backend/src/redis/redisIntegration.js
Normal file
40
bls-register-backend/src/redis/redisIntegration.js
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
export class RedisIntegration {
|
||||||
|
constructor(client, projectName, apiBaseUrl) {
|
||||||
|
this.client = client;
|
||||||
|
this.projectName = projectName;
|
||||||
|
this.apiBaseUrl = apiBaseUrl;
|
||||||
|
this.heartbeatKey = '项目心跳';
|
||||||
|
this.logKey = `${projectName}_项目控制台`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async info(message, context) {
|
||||||
|
const payload = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
level: 'info',
|
||||||
|
message,
|
||||||
|
metadata: context || undefined
|
||||||
|
};
|
||||||
|
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
async error(message, context) {
|
||||||
|
const payload = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
level: 'error',
|
||||||
|
message,
|
||||||
|
metadata: context || undefined
|
||||||
|
};
|
||||||
|
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
startHeartbeat() {
|
||||||
|
setInterval(() => {
|
||||||
|
const payload = {
|
||||||
|
projectName: this.projectName,
|
||||||
|
apiBaseUrl: this.apiBaseUrl,
|
||||||
|
lastActiveAt: Date.now()
|
||||||
|
};
|
||||||
|
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
|
||||||
|
}, 3000);
|
||||||
|
}
|
||||||
|
}
|
||||||
55
bls-register-backend/src/schema/kafkaPayload.js
Normal file
55
bls-register-backend/src/schema/kafkaPayload.js
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
const toNumber = (value) => {
|
||||||
|
if (value === undefined || value === null || value === '') {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
const parsed = Number(value);
|
||||||
|
return Number.isFinite(parsed) ? parsed : null;
|
||||||
|
};
|
||||||
|
|
||||||
|
const toStringAllowEmpty = (value) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
return String(value);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const kafkaPayloadSchema = z.object({
|
||||||
|
ts_ms: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
upgrade_ts_ms: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
hotel_id: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
room_id: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
device_id: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
is_send: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
udp_raw: z.any().optional().nullable(),
|
||||||
|
extra: z.any().optional().nullable(),
|
||||||
|
ip_type: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
model_num: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
server_ip: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
ip: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
subnet_mask: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
gateway: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
dns: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
app_version: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
rcu_time: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
launcher_version: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
mac: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
room_type_id: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
config_version: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
room_status: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
season: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
sys_lock_status: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
authorization_time: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
authorization_days: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
room_num_remark: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
room_type_remark: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
room_remark: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
mcu_name: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
central_control_name: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
configure_hotel_name: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
configure_room_type_name: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable()
|
||||||
|
});
|
||||||
21
bls-register-backend/src/utils/logger.js
Normal file
21
bls-register-backend/src/utils/logger.js
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
const format = (level, message, context) => {
|
||||||
|
const payload = {
|
||||||
|
level,
|
||||||
|
message,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
...(context ? { context } : {})
|
||||||
|
};
|
||||||
|
return JSON.stringify(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const logger = {
|
||||||
|
info(message, context) {
|
||||||
|
process.stdout.write(`${format('info', message, context)}\n`);
|
||||||
|
},
|
||||||
|
error(message, context) {
|
||||||
|
process.stderr.write(`${format('error', message, context)}\n`);
|
||||||
|
},
|
||||||
|
warn(message, context) {
|
||||||
|
process.stderr.write(`${format('warn', message, context)}\n`);
|
||||||
|
}
|
||||||
|
};
|
||||||
43
bls-register-backend/src/utils/metricCollector.js
Normal file
43
bls-register-backend/src/utils/metricCollector.js
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
export class MetricCollector {
|
||||||
|
constructor() {
|
||||||
|
this.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
reset() {
|
||||||
|
this.metrics = {
|
||||||
|
kafka_pulled: 0,
|
||||||
|
parse_error: 0,
|
||||||
|
db_inserted: 0,
|
||||||
|
db_failed: 0,
|
||||||
|
db_insert_count: 0,
|
||||||
|
db_insert_ms_sum: 0,
|
||||||
|
batch_flush_count: 0,
|
||||||
|
batch_flush_ms_sum: 0
|
||||||
|
};
|
||||||
|
this.keyed = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
increment(metric, count = 1) {
|
||||||
|
if (this.metrics.hasOwnProperty(metric)) {
|
||||||
|
this.metrics[metric] += count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
incrementKeyed(metric, key, count = 1) {
|
||||||
|
if (!key) return;
|
||||||
|
if (!this.keyed[metric]) {
|
||||||
|
this.keyed[metric] = {};
|
||||||
|
}
|
||||||
|
if (!Object.prototype.hasOwnProperty.call(this.keyed[metric], key)) {
|
||||||
|
this.keyed[metric][key] = 0;
|
||||||
|
}
|
||||||
|
this.keyed[metric][key] += count;
|
||||||
|
}
|
||||||
|
|
||||||
|
getAndReset() {
|
||||||
|
const current = { ...this.metrics };
|
||||||
|
const keyed = JSON.parse(JSON.stringify(this.keyed));
|
||||||
|
this.reset();
|
||||||
|
return { ...current, keyed };
|
||||||
|
}
|
||||||
|
}
|
||||||
3
bls-register-backend/src/utils/uuid.js
Normal file
3
bls-register-backend/src/utils/uuid.js
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
import { randomUUID } from 'crypto';
|
||||||
|
|
||||||
|
export const createGuid = () => randomUUID().replace(/-/g, '');
|
||||||
54
bls-register-backend/tests/processor.test.js
Normal file
54
bls-register-backend/tests/processor.test.js
Normal file
@@ -0,0 +1,54 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { buildRowsFromPayload } from '../src/processor/index.js';
|
||||||
|
|
||||||
|
describe('Register Processor', () => {
|
||||||
|
const basePayload = {
|
||||||
|
ts_ms: 1770000235000,
|
||||||
|
upgrade_ts_ms: 1770001235000,
|
||||||
|
hotel_id: 1085,
|
||||||
|
room_id: '8888',
|
||||||
|
device_id: '091123987456',
|
||||||
|
is_send: 0,
|
||||||
|
app_version: '2.1.0',
|
||||||
|
launcher_version: '1.0.0',
|
||||||
|
config_version: 'cfg-v8'
|
||||||
|
};
|
||||||
|
|
||||||
|
it('should map payload into register and room-status rows', () => {
|
||||||
|
const rows = buildRowsFromPayload(basePayload);
|
||||||
|
|
||||||
|
expect(rows.registerRows).toHaveLength(1);
|
||||||
|
expect(rows.roomStatusRows).toHaveLength(1);
|
||||||
|
|
||||||
|
expect(rows.registerRows[0].hotel_id).toBe(1085);
|
||||||
|
expect(rows.registerRows[0].room_id).toBe('8888');
|
||||||
|
expect(rows.registerRows[0].device_id).toBe('091123987456');
|
||||||
|
|
||||||
|
expect(rows.roomStatusRows[0].register_ts_ms).toBe(1770000235000);
|
||||||
|
expect(rows.roomStatusRows[0].upgrade_ts_ms).toBe(1770001235000);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should force hotel_id to 0 when out of int2 range', () => {
|
||||||
|
const rows = buildRowsFromPayload({ ...basePayload, hotel_id: 60000 });
|
||||||
|
expect(rows.registerRows[0].hotel_id).toBe(0);
|
||||||
|
expect(rows.roomStatusRows[0].hotel_id).toBe(0);
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should convert udp_raw byte array to base64 text', () => {
|
||||||
|
const rows = buildRowsFromPayload({
|
||||||
|
...basePayload,
|
||||||
|
udp_raw: [1, 2, 3, 4]
|
||||||
|
});
|
||||||
|
expect(rows.registerRows[0].udp_raw).toBe('AQIDBA==');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should strip NUL bytes from text fields', () => {
|
||||||
|
const rows = buildRowsFromPayload({
|
||||||
|
...basePayload,
|
||||||
|
app_version: 'v1\u0000\u0000.2',
|
||||||
|
room_num_remark: 'A\u0000B'
|
||||||
|
});
|
||||||
|
expect(rows.registerRows[0].app_version).toBe('v1.2');
|
||||||
|
expect(rows.registerRows[0].room_num_remark).toBe('AB');
|
||||||
|
});
|
||||||
|
});
|
||||||
5
bls-register-backend/verify.log
Normal file
5
bls-register-backend/verify.log
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
[probe-db] event_rows=1
|
||||||
|
[probe-db] event.ts_ms=1773480366464 room_id=515 app_version=v1.2
|
||||||
|
[probe-db] event.udp_raw=YWJjZGVm
|
||||||
|
[probe-db] room_status_rows=1
|
||||||
|
[probe-db] room_status.room_id=515 register_ts_ms=1773480366464 upgrade_ts_ms=1773480367698
|
||||||
12
bls-register-backend/vite.config.js
Normal file
12
bls-register-backend/vite.config.js
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
import { defineConfig } from 'vite';
|
||||||
|
|
||||||
|
export default defineConfig({
|
||||||
|
build: {
|
||||||
|
ssr: 'src/index.js',
|
||||||
|
outDir: 'dist',
|
||||||
|
target: 'node18',
|
||||||
|
rollupOptions: {
|
||||||
|
external: ['dotenv', 'kafka-node', 'pg', 'redis']
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
91
docs/project.md
Normal file
91
docs/project.md
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
|
||||||
|
0xB1命令
|
||||||
|
## 模式
|
||||||
|
- rcu_info
|
||||||
|
### 数据表
|
||||||
|
- rcu_info_events
|
||||||
|
|
||||||
|
#### 基础字段
|
||||||
|
| 字段名 | 类型 | 备注 |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| guid | int8 | 8位整数,由数据库自己生成 |
|
||||||
|
| ts_ms | int8 | 事件发生的时间戳(毫秒级 Unix 时间),作为分区键和主键的一部分。 |
|
||||||
|
| hotel_id | int2 | 酒店Code,smallint 类型,范围 [0, 32767],标识所属酒店。 |
|
||||||
|
| room_id | varchar(50) | 房间号,字符串类型,长度 1~50,标识具体房间。 |
|
||||||
|
| device_id | varchar(64) | 设备唯一标识符,最长64字符,以CRICS拼接字段为准。 |
|
||||||
|
| write_ts_ms | int8 | 写入数据库的时间戳(毫秒级 Unix 时间) |
|
||||||
|
|
||||||
|
#### 信息字段
|
||||||
|
| 字段名 | 类型 | 备注 |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| is_send | int2 | 1:下发,0:上报 默认0 |
|
||||||
|
| udp_raw | text | UDP原始数据(使用Base64编码) |
|
||||||
|
| extra | jsonb | 扩展字段(JSON格式) |
|
||||||
|
|
||||||
|
#### 数据字段
|
||||||
|
| 字段名 | 类型 | 备注 |
|
||||||
|
| --- | --- | --- |
|
||||||
|
| ip_type | int2 | IP类型 |
|
||||||
|
| model_num | varchar(32) | 机型编号 |
|
||||||
|
| server_ip | varchar(21) | 服务器IP |
|
||||||
|
| ip | varchar(21) | IP:Port |
|
||||||
|
| subnet_mask | varchar(15) | 子网掩码 |
|
||||||
|
| gateway | varchar(15) | 网关 |
|
||||||
|
| dns | varchar(15) | DNS |
|
||||||
|
| app_version | varchar(64) | 软件版本号 |
|
||||||
|
| rcu_time | varchar(25) | RCU时间 |
|
||||||
|
| launcher_version | varchar(64) | Launcher版本号 |
|
||||||
|
| mac | varchar(17) | MAC地址 |
|
||||||
|
| room_type_id | int8 | 房间类型id |
|
||||||
|
| config_version | varchar(32) | 配置版本号 |
|
||||||
|
| room_status | int4 | 房间状态 |
|
||||||
|
| season | int4 | 季节 |
|
||||||
|
| sys_lock_status | int4 | 系统锁定状态 |
|
||||||
|
| authorization_time | varchar(10) | 授权时间 |
|
||||||
|
| authorization_days | varchar(10) | 授权天数 |
|
||||||
|
| room_num_remark | varchar(255) | 房号备注 |
|
||||||
|
| room_type_remark | varchar(64) | 房型备注 |
|
||||||
|
| room_remark | varchar(64) | 房间备注 |
|
||||||
|
| mcu_name | varchar(255) | MCU名称 |
|
||||||
|
| central_control_name | varchar(255) | 中控机型名称 |
|
||||||
|
| configure_hotel_name | varchar(255) | 配置数据酒店名称 |
|
||||||
|
| configure_room_type_name | varchar(255) | 配置数据房型别名 |
|
||||||
|
|
||||||
|
|
||||||
|
#### 生产服务器的推送的数据用于kafka的C#类:
|
||||||
|
public class RegisterInfo
|
||||||
|
{
|
||||||
|
public long ts_ms { get; set; }
|
||||||
|
public long upgrade_ts_ms { get; set; }
|
||||||
|
public long hotel_id { get; set; }
|
||||||
|
public string room_id { get; set; }
|
||||||
|
public string device_id { get; set; }
|
||||||
|
public ushort is_send { get; set; }
|
||||||
|
public byte[] udp_raw { get; set; }
|
||||||
|
public string extra { get; set; }
|
||||||
|
public int ip_type { get; set; }
|
||||||
|
public string model_num { get; set; }
|
||||||
|
public string server_ip { get; set; }
|
||||||
|
public string ip { get; set; }
|
||||||
|
public string subnet_mask { get; set; }
|
||||||
|
public string gateway { get; set; }
|
||||||
|
public string dns { get; set; }
|
||||||
|
public string app_version { get; set; }
|
||||||
|
public string rcu_time { get; set; }
|
||||||
|
public string launcher_version { get; set; }
|
||||||
|
public string mac { get; set; }
|
||||||
|
public int room_type_id { get; set; }
|
||||||
|
public string config_version { get; set; }
|
||||||
|
public int room_status { get; set; }
|
||||||
|
public string season { get; set; }
|
||||||
|
public int sys_lock_status { get; set; }
|
||||||
|
public long authorization_time { get; set; }
|
||||||
|
public long authorization_days { get; set; }
|
||||||
|
public string room_num_remark { get; set; }
|
||||||
|
public string room_type_remark { get; set; }
|
||||||
|
public string room_remark { get; set; }
|
||||||
|
public string mcu_name { get; set; }
|
||||||
|
public string central_control_name { get; set; }
|
||||||
|
public string configure_hotel_name { get; set; }
|
||||||
|
public string configure_room_type_name { get; set; }
|
||||||
|
}
|
||||||
92
docs/rcu_info_events_g5.sql
Normal file
92
docs/rcu_info_events_g5.sql
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
/*
|
||||||
|
Navicat Premium Dump SQL
|
||||||
|
|
||||||
|
Source Server : FnOS 80
|
||||||
|
Source Server Type : PostgreSQL
|
||||||
|
Source Server Version : 150017 (150017)
|
||||||
|
Source Host : 10.8.8.80:5434
|
||||||
|
Source Catalog : log_platform
|
||||||
|
Source Schema : rcu_info
|
||||||
|
|
||||||
|
Target Server Type : PostgreSQL
|
||||||
|
Target Server Version : 150017 (150017)
|
||||||
|
File Encoding : 65001
|
||||||
|
|
||||||
|
Date: 12/03/2026 17:36:43
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Table structure for rcu_info_events_g5
|
||||||
|
-- ----------------------------
|
||||||
|
DROP TABLE IF EXISTS "rcu_info"."rcu_info_events_g5";
|
||||||
|
CREATE TABLE "rcu_info"."rcu_info_events_g5" (
|
||||||
|
"guid" int8 NOT NULL DEFAULT nextval('"rcu_info".rcu_info_events_g5_guid_seq'::regclass),
|
||||||
|
"ts_ms" int8 NOT NULL,
|
||||||
|
"hotel_id" int2 NOT NULL,
|
||||||
|
"room_id" varchar(50) COLLATE "pg_catalog"."default" NOT NULL,
|
||||||
|
"device_id" varchar(64) COLLATE "pg_catalog"."default",
|
||||||
|
"write_ts_ms" int8 DEFAULT ((EXTRACT(epoch FROM now()) * (1000)::numeric))::bigint,
|
||||||
|
"is_send" int2 DEFAULT 0,
|
||||||
|
"udp_raw" text COLLATE "pg_catalog"."default",
|
||||||
|
"extra" jsonb,
|
||||||
|
"ip_type" int2,
|
||||||
|
"model_num" varchar(32) COLLATE "pg_catalog"."default",
|
||||||
|
"server_ip" varchar(21) COLLATE "pg_catalog"."default",
|
||||||
|
"ip" varchar(21) COLLATE "pg_catalog"."default",
|
||||||
|
"subnet_mask" varchar(15) COLLATE "pg_catalog"."default",
|
||||||
|
"gateway" varchar(15) COLLATE "pg_catalog"."default",
|
||||||
|
"dns" varchar(15) COLLATE "pg_catalog"."default",
|
||||||
|
"app_version" varchar(64) COLLATE "pg_catalog"."default",
|
||||||
|
"rcu_time" varchar(25) COLLATE "pg_catalog"."default",
|
||||||
|
"launcher_version" varchar(64) COLLATE "pg_catalog"."default",
|
||||||
|
"mac" varchar(17) COLLATE "pg_catalog"."default",
|
||||||
|
"room_type_id" int8,
|
||||||
|
"config_version" varchar(32) COLLATE "pg_catalog"."default",
|
||||||
|
"room_status" int4,
|
||||||
|
"season" int4,
|
||||||
|
"sys_lock_status" int4,
|
||||||
|
"authorization_time" varchar(10) COLLATE "pg_catalog"."default",
|
||||||
|
"authorization_days" varchar(10) COLLATE "pg_catalog"."default",
|
||||||
|
"room_num_remark" varchar(255) COLLATE "pg_catalog"."default",
|
||||||
|
"room_type_remark" varchar(64) COLLATE "pg_catalog"."default",
|
||||||
|
"room_remark" varchar(64) COLLATE "pg_catalog"."default",
|
||||||
|
"mcu_name" varchar(255) COLLATE "pg_catalog"."default",
|
||||||
|
"central_control_name" varchar(255) COLLATE "pg_catalog"."default",
|
||||||
|
"configure_hotel_name" varchar(255) COLLATE "pg_catalog"."default",
|
||||||
|
"configure_room_type_name" varchar(255) COLLATE "pg_catalog"."default"
|
||||||
|
)
|
||||||
|
;
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Indexes structure for table rcu_info_events_g5
|
||||||
|
-- ----------------------------
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_app_ver" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"app_version" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
|
||||||
|
) WHERE app_version IS NOT NULL;
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_central_ctrl_name" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"central_control_name" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
|
||||||
|
) WHERE central_control_name IS NOT NULL;
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_device_id" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"device_id" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
|
||||||
|
) WHERE device_id IS NOT NULL;
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_is_send" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"is_send" "pg_catalog"."int2_ops" ASC NULLS LAST
|
||||||
|
);
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_launcher_ver" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"launcher_version" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
|
||||||
|
) WHERE launcher_version IS NOT NULL;
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_mac" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"mac" COLLATE "pg_catalog"."default" "pg_catalog"."text_ops" ASC NULLS LAST
|
||||||
|
) WHERE mac IS NOT NULL;
|
||||||
|
CREATE INDEX "idx_rcu_info_g5_sys_lock" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"sys_lock_status" "pg_catalog"."int4_ops" ASC NULLS LAST
|
||||||
|
);
|
||||||
|
CREATE INDEX "rcu_info_events_g5_ts_ms_idx" ON "rcu_info"."rcu_info_events_g5" USING btree (
|
||||||
|
"ts_ms" "pg_catalog"."int8_ops" DESC NULLS FIRST
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Primary Key structure for table rcu_info_events_g5
|
||||||
|
-- ----------------------------
|
||||||
|
ALTER TABLE "rcu_info"."rcu_info_events_g5" ADD CONSTRAINT "rcu_info_events_g5_pkey" PRIMARY KEY ("hotel_id", "room_id", "ts_ms", "guid");
|
||||||
91
docs/room_status_moment_g5.sql
Normal file
91
docs/room_status_moment_g5.sql
Normal file
@@ -0,0 +1,91 @@
|
|||||||
|
/*
|
||||||
|
Navicat Premium Dump SQL
|
||||||
|
|
||||||
|
Source Server : FnOS 80
|
||||||
|
Source Server Type : PostgreSQL
|
||||||
|
Source Server Version : 150017 (150017)
|
||||||
|
Source Host : 10.8.8.80:5434
|
||||||
|
Source Catalog : log_platform
|
||||||
|
Source Schema : room_status
|
||||||
|
|
||||||
|
Target Server Type : PostgreSQL
|
||||||
|
Target Server Version : 150017 (150017)
|
||||||
|
File Encoding : 65001
|
||||||
|
|
||||||
|
Date: 14/03/2026 09:58:21
|
||||||
|
*/
|
||||||
|
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Table structure for room_status_moment_g5
|
||||||
|
-- ----------------------------
|
||||||
|
DROP TABLE IF EXISTS "room_status"."room_status_moment_g5";
|
||||||
|
CREATE TABLE "room_status"."room_status_moment_g5" (
|
||||||
|
"hotel_id" int2 NOT NULL,
|
||||||
|
"room_id" text COLLATE "pg_catalog"."default" NOT NULL,
|
||||||
|
"device_id" text COLLATE "pg_catalog"."default" NOT NULL,
|
||||||
|
"ts_ms" int8 NOT NULL DEFAULT ((EXTRACT(epoch FROM clock_timestamp()) * (1000)::numeric))::bigint,
|
||||||
|
"sys_lock_status" int2,
|
||||||
|
"online_status" int2,
|
||||||
|
"launcher_version" text COLLATE "pg_catalog"."default",
|
||||||
|
"app_version" text COLLATE "pg_catalog"."default",
|
||||||
|
"config_version" text COLLATE "pg_catalog"."default",
|
||||||
|
"register_ts_ms" int8,
|
||||||
|
"upgrade_ts_ms" int8,
|
||||||
|
"config_ts_ms" int8,
|
||||||
|
"ip" text COLLATE "pg_catalog"."default",
|
||||||
|
"pms_status" int2,
|
||||||
|
"power_state" int2,
|
||||||
|
"cardless_state" int2,
|
||||||
|
"service_mask" int8,
|
||||||
|
"insert_card" int2,
|
||||||
|
"bright_g" int2,
|
||||||
|
"agreement_ver" text COLLATE "pg_catalog"."default",
|
||||||
|
"air_address" _text COLLATE "pg_catalog"."default",
|
||||||
|
"air_state" _int2,
|
||||||
|
"air_model" _int2,
|
||||||
|
"air_speed" _int2,
|
||||||
|
"air_set_temp" _int2,
|
||||||
|
"air_now_temp" _int2,
|
||||||
|
"air_solenoid_valve" _int2,
|
||||||
|
"elec_address" _text COLLATE "pg_catalog"."default",
|
||||||
|
"elec_voltage" _float8,
|
||||||
|
"elec_ampere" _float8,
|
||||||
|
"elec_power" _float8,
|
||||||
|
"elec_phase" _float8,
|
||||||
|
"elec_energy" _float8,
|
||||||
|
"elec_sum_energy" _float8,
|
||||||
|
"carbon_state" int2,
|
||||||
|
"dev_loops" jsonb,
|
||||||
|
"energy_carbon_sum" float8,
|
||||||
|
"energy_nocard_sum" float8,
|
||||||
|
"external_device" jsonb DEFAULT '{}'::jsonb,
|
||||||
|
"faulty_device_count" jsonb DEFAULT '{}'::jsonb
|
||||||
|
)
|
||||||
|
WITH (fillfactor=90)
|
||||||
|
TABLESPACE "ts_hot"
|
||||||
|
;
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Indexes structure for table room_status_moment_g5
|
||||||
|
-- ----------------------------
|
||||||
|
CREATE INDEX "idx_rsm_g5_dashboard_query" ON "room_status"."room_status_moment_g5" USING btree (
|
||||||
|
"hotel_id" "pg_catalog"."int2_ops" ASC NULLS LAST,
|
||||||
|
"online_status" "pg_catalog"."int2_ops" ASC NULLS LAST,
|
||||||
|
"power_state" "pg_catalog"."int2_ops" ASC NULLS LAST
|
||||||
|
);
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Triggers structure for table room_status_moment_g5
|
||||||
|
-- ----------------------------
|
||||||
|
CREATE TRIGGER "trg_update_rsm_ts_ms" BEFORE UPDATE ON "room_status"."room_status_moment_g5"
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE PROCEDURE "room_status"."update_ts_ms_g5"();
|
||||||
|
CREATE TRIGGER "trigger_room_status_change" AFTER UPDATE ON "room_status"."room_status_moment_g5"
|
||||||
|
FOR EACH ROW
|
||||||
|
EXECUTE PROCEDURE "room_status"."handle_room_status_change"();
|
||||||
|
|
||||||
|
-- ----------------------------
|
||||||
|
-- Primary Key structure for table room_status_moment_g5
|
||||||
|
-- ----------------------------
|
||||||
|
ALTER TABLE "room_status"."room_status_moment_g5" ADD CONSTRAINT "room_status_moment_g5_pkey" PRIMARY KEY ("hotel_id", "room_id");
|
||||||
51
docs/template/bls-onoffline-backend/.env
vendored
Normal file
51
docs/template/bls-onoffline-backend/.env
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
KAFKA_BROKERS=kafka.blv-oa.com:9092
|
||||||
|
KAFKA_CLIENT_ID=bls-onoffline-producer
|
||||||
|
KAFKA_GROUP_ID=bls-onoffline-consumer
|
||||||
|
KAFKA_TOPICS=blwlog4Nodejs-rcu-onoffline-topic-0
|
||||||
|
KAFKA_AUTO_COMMIT=false
|
||||||
|
KAFKA_AUTO_COMMIT_INTERVAL_MS=5000
|
||||||
|
KAFKA_SASL_ENABLED=true
|
||||||
|
KAFKA_SASL_MECHANISM=plain
|
||||||
|
KAFKA_SASL_USERNAME=blwmomo
|
||||||
|
KAFKA_SASL_PASSWORD=blwmomo
|
||||||
|
KAFKA_SSL_ENABLED=false
|
||||||
|
KAFKA_CONSUMER_INSTANCES=3
|
||||||
|
KAFKA_MAX_IN_FLIGHT=5000
|
||||||
|
KAFKA_BATCH_SIZE=1000
|
||||||
|
KAFKA_BATCH_TIMEOUT_MS=20
|
||||||
|
KAFKA_COMMIT_INTERVAL_MS=200
|
||||||
|
KAFKA_COMMIT_ON_ATTEMPT=true
|
||||||
|
KAFKA_FETCH_MAX_BYTES=10485760
|
||||||
|
KAFKA_FETCH_MAX_WAIT_MS=100
|
||||||
|
KAFKA_FETCH_MIN_BYTES=1
|
||||||
|
|
||||||
|
#POSTGRES_HOST=10.8.8.109
|
||||||
|
#POSTGRES_PORT=5433
|
||||||
|
#POSTGRES_DATABASE=log_platform
|
||||||
|
#POSTGRES_USER=log_admin
|
||||||
|
#POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
|
||||||
|
#POSTGRES_MAX_CONNECTIONS=6
|
||||||
|
#POSTGRES_IDLE_TIMEOUT_MS=30000
|
||||||
|
#DB_SCHEMA=onoffline
|
||||||
|
#DB_TABLE=onoffline_record
|
||||||
|
|
||||||
|
# =========================
|
||||||
|
# PostgreSQL 配置 G5库专用
|
||||||
|
# =========================
|
||||||
|
POSTGRES_HOST_G5=10.8.8.80
|
||||||
|
POSTGRES_PORT_G5=5434
|
||||||
|
POSTGRES_DATABASE_G5=log_platform
|
||||||
|
POSTGRES_USER_G5=log_admin
|
||||||
|
POSTGRES_PASSWORD_G5=H3IkLUt8K!x
|
||||||
|
POSTGRES_IDLE_TIMEOUT_MS_G5=30000
|
||||||
|
|
||||||
|
PORT=3001
|
||||||
|
LOG_LEVEL=info
|
||||||
|
|
||||||
|
# Redis connection
|
||||||
|
REDIS_HOST=10.8.8.109
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=
|
||||||
|
REDIS_DB=15
|
||||||
|
REDIS_CONNECT_TIMEOUT_MS=5000
|
||||||
|
REDIS_PROJECT_NAME=bls-onoffline
|
||||||
31
docs/template/bls-onoffline-backend/.env.example
vendored
Normal file
31
docs/template/bls-onoffline-backend/.env.example
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Server Configuration
|
||||||
|
PORT=3001
|
||||||
|
NODE_ENV=development
|
||||||
|
|
||||||
|
# Kafka Configuration
|
||||||
|
KAFKA_BROKERS=localhost:9092
|
||||||
|
KAFKA_TOPIC=blwlog4Nodejs-rcu-onoffline-topic
|
||||||
|
KAFKA_GROUP_ID=bls-onoffline-group
|
||||||
|
KAFKA_CLIENT_ID=bls-onoffline-client
|
||||||
|
KAFKA_CONSUMER_INSTANCES=1
|
||||||
|
# KAFKA_SASL_USERNAME=
|
||||||
|
# KAFKA_SASL_PASSWORD=
|
||||||
|
# KAFKA_SASL_MECHANISM=plain
|
||||||
|
|
||||||
|
# Database Configuration (PostgreSQL)
|
||||||
|
DB_HOST=localhost
|
||||||
|
DB_PORT=5432
|
||||||
|
DB_USER=postgres
|
||||||
|
DB_PASSWORD=password
|
||||||
|
DB_DATABASE=log_platform
|
||||||
|
DB_SCHEMA=public
|
||||||
|
DB_TABLE=onoffline_record
|
||||||
|
DB_MAX_CONNECTIONS=10
|
||||||
|
|
||||||
|
# Redis Configuration
|
||||||
|
REDIS_HOST=localhost
|
||||||
|
REDIS_PORT=6379
|
||||||
|
REDIS_PASSWORD=
|
||||||
|
REDIS_DB=0
|
||||||
|
REDIS_PROJECT_NAME=bls-onoffline
|
||||||
|
REDIS_API_BASE_URL=http://localhost:3001
|
||||||
18
docs/template/bls-onoffline-backend/AGENTS.md
vendored
Normal file
18
docs/template/bls-onoffline-backend/AGENTS.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
<!-- OPENSPEC:START -->
|
||||||
|
# OpenSpec Instructions
|
||||||
|
|
||||||
|
These instructions are for AI assistants working in this project.
|
||||||
|
|
||||||
|
Always open `@/openspec/AGENTS.md` when the request:
|
||||||
|
- Mentions planning or proposals (words like proposal, spec, change, plan)
|
||||||
|
- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work
|
||||||
|
- Sounds ambiguous and you need the authoritative spec before coding
|
||||||
|
|
||||||
|
Use `@/openspec/AGENTS.md` to learn:
|
||||||
|
- How to create and apply change proposals
|
||||||
|
- Spec format and conventions
|
||||||
|
- Project structure and guidelines
|
||||||
|
|
||||||
|
Keep this managed block so 'openspec update' can refresh the instructions.
|
||||||
|
|
||||||
|
<!-- OPENSPEC:END -->
|
||||||
30
docs/template/bls-onoffline-backend/README.md
vendored
Normal file
30
docs/template/bls-onoffline-backend/README.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
bls-onoffline-backend
|
||||||
|
|
||||||
|
安装与运行
|
||||||
|
- Node.js 22+
|
||||||
|
- npm install
|
||||||
|
- npm run dev
|
||||||
|
|
||||||
|
构建与测试
|
||||||
|
- npm run build
|
||||||
|
- npm run test
|
||||||
|
- npm run lint
|
||||||
|
|
||||||
|
规范校验
|
||||||
|
- npm run spec:lint
|
||||||
|
- npm run spec:validate
|
||||||
|
|
||||||
|
环境变量
|
||||||
|
- 复制 .env.example 为 .env 并按实际环境配置
|
||||||
|
|
||||||
|
数据库初始化
|
||||||
|
- 运行服务前请先通过根目录 SQL_Script 下脚本完成建库与分区维护
|
||||||
|
- `../SQL_Script/create_database.sql`:建库(psql)
|
||||||
|
- `../SQL_Script/create_schema_and_parent_table.sql`:建 schema 与主分区表
|
||||||
|
- `../SQL_Script/create_partition_for_day.sql`:按日建分区模板
|
||||||
|
- `../SQL_Script/generate_init_sql.js`:生成建库+建表 SQL
|
||||||
|
- `../SQL_Script/generate_partition_sql.js`:生成单日分区 SQL
|
||||||
|
- `../SQL_Script/generate_partition_range_sql.js`:生成批量分区 SQL
|
||||||
|
|
||||||
|
规范说明
|
||||||
|
- 规格文件位于 spec/onoffline-spec.md
|
||||||
1086
docs/template/bls-onoffline-backend/dist/index.js
vendored
Normal file
1086
docs/template/bls-onoffline-backend/dist/index.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
docs/template/bls-onoffline-backend/ecosystem.config.cjs
vendored
Normal file
22
docs/template/bls-onoffline-backend/ecosystem.config.cjs
vendored
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
module.exports = {
|
||||||
|
apps: [{
|
||||||
|
name: 'bls-onoffline',
|
||||||
|
script: 'dist/index.js',
|
||||||
|
instances: 1,
|
||||||
|
exec_mode: 'fork',
|
||||||
|
autorestart: true,
|
||||||
|
watch: false,
|
||||||
|
max_memory_restart: '1G',
|
||||||
|
env_file: '.env',
|
||||||
|
env: {
|
||||||
|
NODE_ENV: 'production',
|
||||||
|
PORT: 3001
|
||||||
|
},
|
||||||
|
error_file: './logs/error.log',
|
||||||
|
out_file: './logs/out.log',
|
||||||
|
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
|
||||||
|
merge_logs: true,
|
||||||
|
kill_timeout: 5000,
|
||||||
|
time: true
|
||||||
|
}]
|
||||||
|
};
|
||||||
456
docs/template/bls-onoffline-backend/openspec/AGENTS.md
vendored
Normal file
456
docs/template/bls-onoffline-backend/openspec/AGENTS.md
vendored
Normal file
@@ -0,0 +1,456 @@
|
|||||||
|
# OpenSpec Instructions
|
||||||
|
|
||||||
|
Instructions for AI coding assistants using OpenSpec for spec-driven development.
|
||||||
|
|
||||||
|
## TL;DR Quick Checklist
|
||||||
|
|
||||||
|
- Search existing work: `openspec spec list --long`, `openspec list` (use `rg` only for full-text search)
|
||||||
|
- Decide scope: new capability vs modify existing capability
|
||||||
|
- Pick a unique `change-id`: kebab-case, verb-led (`add-`, `update-`, `remove-`, `refactor-`)
|
||||||
|
- Scaffold: `proposal.md`, `tasks.md`, `design.md` (only if needed), and delta specs per affected capability
|
||||||
|
- Write deltas: use `## ADDED|MODIFIED|REMOVED|RENAMED Requirements`; include at least one `#### Scenario:` per requirement
|
||||||
|
- Validate: `openspec validate [change-id] --strict` and fix issues
|
||||||
|
- Request approval: Do not start implementation until proposal is approved
|
||||||
|
|
||||||
|
## Three-Stage Workflow
|
||||||
|
|
||||||
|
### Stage 1: Creating Changes
|
||||||
|
Create proposal when you need to:
|
||||||
|
- Add features or functionality
|
||||||
|
- Make breaking changes (API, schema)
|
||||||
|
- Change architecture or patterns
|
||||||
|
- Optimize performance (changes behavior)
|
||||||
|
- Update security patterns
|
||||||
|
|
||||||
|
Triggers (examples):
|
||||||
|
- "Help me create a change proposal"
|
||||||
|
- "Help me plan a change"
|
||||||
|
- "Help me create a proposal"
|
||||||
|
- "I want to create a spec proposal"
|
||||||
|
- "I want to create a spec"
|
||||||
|
|
||||||
|
Loose matching guidance:
|
||||||
|
- Contains one of: `proposal`, `change`, `spec`
|
||||||
|
- With one of: `create`, `plan`, `make`, `start`, `help`
|
||||||
|
|
||||||
|
Skip proposal for:
|
||||||
|
- Bug fixes (restore intended behavior)
|
||||||
|
- Typos, formatting, comments
|
||||||
|
- Dependency updates (non-breaking)
|
||||||
|
- Configuration changes
|
||||||
|
- Tests for existing behavior
|
||||||
|
|
||||||
|
**Workflow**
|
||||||
|
1. Review `openspec/project.md`, `openspec list`, and `openspec list --specs` to understand current context.
|
||||||
|
2. Choose a unique verb-led `change-id` and scaffold `proposal.md`, `tasks.md`, optional `design.md`, and spec deltas under `openspec/changes/<id>/`.
|
||||||
|
3. Draft spec deltas using `## ADDED|MODIFIED|REMOVED Requirements` with at least one `#### Scenario:` per requirement.
|
||||||
|
4. Run `openspec validate <id> --strict` and resolve any issues before sharing the proposal.
|
||||||
|
|
||||||
|
### Stage 2: Implementing Changes
|
||||||
|
Track these steps as TODOs and complete them one by one.
|
||||||
|
1. **Read proposal.md** - Understand what's being built
|
||||||
|
2. **Read design.md** (if exists) - Review technical decisions
|
||||||
|
3. **Read tasks.md** - Get implementation checklist
|
||||||
|
4. **Implement tasks sequentially** - Complete in order
|
||||||
|
5. **Confirm completion** - Ensure every item in `tasks.md` is finished before updating statuses
|
||||||
|
6. **Update checklist** - After all work is done, set every task to `- [x]` so the list reflects reality
|
||||||
|
7. **Approval gate** - Do not start implementation until the proposal is reviewed and approved
|
||||||
|
|
||||||
|
### Stage 3: Archiving Changes
|
||||||
|
After deployment, create separate PR to:
|
||||||
|
- Move `changes/[name]/` → `changes/archive/YYYY-MM-DD-[name]/`
|
||||||
|
- Update `specs/` if capabilities changed
|
||||||
|
- Use `openspec archive <change-id> --skip-specs --yes` for tooling-only changes (always pass the change ID explicitly)
|
||||||
|
- Run `openspec validate --strict` to confirm the archived change passes checks
|
||||||
|
|
||||||
|
## Before Any Task
|
||||||
|
|
||||||
|
**Context Checklist:**
|
||||||
|
- [ ] Read relevant specs in `specs/[capability]/spec.md`
|
||||||
|
- [ ] Check pending changes in `changes/` for conflicts
|
||||||
|
- [ ] Read `openspec/project.md` for conventions
|
||||||
|
- [ ] Run `openspec list` to see active changes
|
||||||
|
- [ ] Run `openspec list --specs` to see existing capabilities
|
||||||
|
|
||||||
|
**Before Creating Specs:**
|
||||||
|
- Always check if capability already exists
|
||||||
|
- Prefer modifying existing specs over creating duplicates
|
||||||
|
- Use `openspec show [spec]` to review current state
|
||||||
|
- If request is ambiguous, ask 1–2 clarifying questions before scaffolding
|
||||||
|
|
||||||
|
### Search Guidance
|
||||||
|
- Enumerate specs: `openspec spec list --long` (or `--json` for scripts)
|
||||||
|
- Enumerate changes: `openspec list` (or `openspec change list --json` - deprecated but available)
|
||||||
|
- Show details:
|
||||||
|
- Spec: `openspec show <spec-id> --type spec` (use `--json` for filters)
|
||||||
|
- Change: `openspec show <change-id> --json --deltas-only`
|
||||||
|
- Full-text search (use ripgrep): `rg -n "Requirement:|Scenario:" openspec/specs`
|
||||||
|
|
||||||
|
## Quick Start
|
||||||
|
|
||||||
|
### CLI Commands
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Essential commands
|
||||||
|
openspec list # List active changes
|
||||||
|
openspec list --specs # List specifications
|
||||||
|
openspec show [item] # Display change or spec
|
||||||
|
openspec validate [item] # Validate changes or specs
|
||||||
|
openspec archive <change-id> [--yes|-y] # Archive after deployment (add --yes for non-interactive runs)
|
||||||
|
|
||||||
|
# Project management
|
||||||
|
openspec init [path] # Initialize OpenSpec
|
||||||
|
openspec update [path] # Update instruction files
|
||||||
|
|
||||||
|
# Interactive mode
|
||||||
|
openspec show # Prompts for selection
|
||||||
|
openspec validate # Bulk validation mode
|
||||||
|
|
||||||
|
# Debugging
|
||||||
|
openspec show [change] --json --deltas-only
|
||||||
|
openspec validate [change] --strict
|
||||||
|
```
|
||||||
|
|
||||||
|
### Command Flags
|
||||||
|
|
||||||
|
- `--json` - Machine-readable output
|
||||||
|
- `--type change|spec` - Disambiguate items
|
||||||
|
- `--strict` - Comprehensive validation
|
||||||
|
- `--no-interactive` - Disable prompts
|
||||||
|
- `--skip-specs` - Archive without spec updates
|
||||||
|
- `--yes`/`-y` - Skip confirmation prompts (non-interactive archive)
|
||||||
|
|
||||||
|
## Directory Structure
|
||||||
|
|
||||||
|
```
|
||||||
|
openspec/
|
||||||
|
├── project.md # Project conventions
|
||||||
|
├── specs/ # Current truth - what IS built
|
||||||
|
│ └── [capability]/ # Single focused capability
|
||||||
|
│ ├── spec.md # Requirements and scenarios
|
||||||
|
│ └── design.md # Technical patterns
|
||||||
|
├── changes/ # Proposals - what SHOULD change
|
||||||
|
│ ├── [change-name]/
|
||||||
|
│ │ ├── proposal.md # Why, what, impact
|
||||||
|
│ │ ├── tasks.md # Implementation checklist
|
||||||
|
│ │ ├── design.md # Technical decisions (optional; see criteria)
|
||||||
|
│ │ └── specs/ # Delta changes
|
||||||
|
│ │ └── [capability]/
|
||||||
|
│ │ └── spec.md # ADDED/MODIFIED/REMOVED
|
||||||
|
│ └── archive/ # Completed changes
|
||||||
|
```
|
||||||
|
|
||||||
|
## Creating Change Proposals
|
||||||
|
|
||||||
|
### Decision Tree
|
||||||
|
|
||||||
|
```
|
||||||
|
New request?
|
||||||
|
├─ Bug fix restoring spec behavior? → Fix directly
|
||||||
|
├─ Typo/format/comment? → Fix directly
|
||||||
|
├─ New feature/capability? → Create proposal
|
||||||
|
├─ Breaking change? → Create proposal
|
||||||
|
├─ Architecture change? → Create proposal
|
||||||
|
└─ Unclear? → Create proposal (safer)
|
||||||
|
```
|
||||||
|
|
||||||
|
### Proposal Structure
|
||||||
|
|
||||||
|
1. **Create directory:** `changes/[change-id]/` (kebab-case, verb-led, unique)
|
||||||
|
|
||||||
|
2. **Write proposal.md:**
|
||||||
|
```markdown
|
||||||
|
# Change: [Brief description of change]
|
||||||
|
|
||||||
|
## Why
|
||||||
|
[1-2 sentences on problem/opportunity]
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- [Bullet list of changes]
|
||||||
|
- [Mark breaking changes with **BREAKING**]
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: [list capabilities]
|
||||||
|
- Affected code: [key files/systems]
|
||||||
|
```
|
||||||
|
|
||||||
|
3. **Create spec deltas:** `specs/[capability]/spec.md`
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: New Feature
|
||||||
|
The system SHALL provide...
|
||||||
|
|
||||||
|
#### Scenario: Success case
|
||||||
|
- **WHEN** user performs action
|
||||||
|
- **THEN** expected result
|
||||||
|
|
||||||
|
## MODIFIED Requirements
|
||||||
|
### Requirement: Existing Feature
|
||||||
|
[Complete modified requirement]
|
||||||
|
|
||||||
|
## REMOVED Requirements
|
||||||
|
### Requirement: Old Feature
|
||||||
|
**Reason**: [Why removing]
|
||||||
|
**Migration**: [How to handle]
|
||||||
|
```
|
||||||
|
If multiple capabilities are affected, create multiple delta files under `changes/[change-id]/specs/<capability>/spec.md`—one per capability.
|
||||||
|
|
||||||
|
4. **Create tasks.md:**
|
||||||
|
```markdown
|
||||||
|
## 1. Implementation
|
||||||
|
- [ ] 1.1 Create database schema
|
||||||
|
- [ ] 1.2 Implement API endpoint
|
||||||
|
- [ ] 1.3 Add frontend component
|
||||||
|
- [ ] 1.4 Write tests
|
||||||
|
```
|
||||||
|
|
||||||
|
5. **Create design.md when needed:**
|
||||||
|
Create `design.md` if any of the following apply; otherwise omit it:
|
||||||
|
- Cross-cutting change (multiple services/modules) or a new architectural pattern
|
||||||
|
- New external dependency or significant data model changes
|
||||||
|
- Security, performance, or migration complexity
|
||||||
|
- Ambiguity that benefits from technical decisions before coding
|
||||||
|
|
||||||
|
Minimal `design.md` skeleton:
|
||||||
|
```markdown
|
||||||
|
## Context
|
||||||
|
[Background, constraints, stakeholders]
|
||||||
|
|
||||||
|
## Goals / Non-Goals
|
||||||
|
- Goals: [...]
|
||||||
|
- Non-Goals: [...]
|
||||||
|
|
||||||
|
## Decisions
|
||||||
|
- Decision: [What and why]
|
||||||
|
- Alternatives considered: [Options + rationale]
|
||||||
|
|
||||||
|
## Risks / Trade-offs
|
||||||
|
- [Risk] → Mitigation
|
||||||
|
|
||||||
|
## Migration Plan
|
||||||
|
[Steps, rollback]
|
||||||
|
|
||||||
|
## Open Questions
|
||||||
|
- [...]
|
||||||
|
```
|
||||||
|
|
||||||
|
## Spec File Format
|
||||||
|
|
||||||
|
### Critical: Scenario Formatting
|
||||||
|
|
||||||
|
**CORRECT** (use #### headers):
|
||||||
|
```markdown
|
||||||
|
#### Scenario: User login success
|
||||||
|
- **WHEN** valid credentials provided
|
||||||
|
- **THEN** return JWT token
|
||||||
|
```
|
||||||
|
|
||||||
|
**WRONG** (don't use bullets or bold):
|
||||||
|
```markdown
|
||||||
|
- **Scenario: User login** ❌
|
||||||
|
**Scenario**: User login ❌
|
||||||
|
### Scenario: User login ❌
|
||||||
|
```
|
||||||
|
|
||||||
|
Every requirement MUST have at least one scenario.
|
||||||
|
|
||||||
|
### Requirement Wording
|
||||||
|
- Use SHALL/MUST for normative requirements (avoid should/may unless intentionally non-normative)
|
||||||
|
|
||||||
|
### Delta Operations
|
||||||
|
|
||||||
|
- `## ADDED Requirements` - New capabilities
|
||||||
|
- `## MODIFIED Requirements` - Changed behavior
|
||||||
|
- `## REMOVED Requirements` - Deprecated features
|
||||||
|
- `## RENAMED Requirements` - Name changes
|
||||||
|
|
||||||
|
Headers matched with `trim(header)` - whitespace ignored.
|
||||||
|
|
||||||
|
#### When to use ADDED vs MODIFIED
|
||||||
|
- ADDED: Introduces a new capability or sub-capability that can stand alone as a requirement. Prefer ADDED when the change is orthogonal (e.g., adding "Slash Command Configuration") rather than altering the semantics of an existing requirement.
|
||||||
|
- MODIFIED: Changes the behavior, scope, or acceptance criteria of an existing requirement. Always paste the full, updated requirement content (header + all scenarios). The archiver will replace the entire requirement with what you provide here; partial deltas will drop previous details.
|
||||||
|
- RENAMED: Use when only the name changes. If you also change behavior, use RENAMED (name) plus MODIFIED (content) referencing the new name.
|
||||||
|
|
||||||
|
Common pitfall: Using MODIFIED to add a new concern without including the previous text. This causes loss of detail at archive time. If you aren’t explicitly changing the existing requirement, add a new requirement under ADDED instead.
|
||||||
|
|
||||||
|
Authoring a MODIFIED requirement correctly:
|
||||||
|
1) Locate the existing requirement in `openspec/specs/<capability>/spec.md`.
|
||||||
|
2) Copy the entire requirement block (from `### Requirement: ...` through its scenarios).
|
||||||
|
3) Paste it under `## MODIFIED Requirements` and edit to reflect the new behavior.
|
||||||
|
4) Ensure the header text matches exactly (whitespace-insensitive) and keep at least one `#### Scenario:`.
|
||||||
|
|
||||||
|
Example for RENAMED:
|
||||||
|
```markdown
|
||||||
|
## RENAMED Requirements
|
||||||
|
- FROM: `### Requirement: Login`
|
||||||
|
- TO: `### Requirement: User Authentication`
|
||||||
|
```
|
||||||
|
|
||||||
|
## Troubleshooting
|
||||||
|
|
||||||
|
### Common Errors
|
||||||
|
|
||||||
|
**"Change must have at least one delta"**
|
||||||
|
- Check `changes/[name]/specs/` exists with .md files
|
||||||
|
- Verify files have operation prefixes (## ADDED Requirements)
|
||||||
|
|
||||||
|
**"Requirement must have at least one scenario"**
|
||||||
|
- Check scenarios use `#### Scenario:` format (4 hashtags)
|
||||||
|
- Don't use bullet points or bold for scenario headers
|
||||||
|
|
||||||
|
**Silent scenario parsing failures**
|
||||||
|
- Exact format required: `#### Scenario: Name`
|
||||||
|
- Debug with: `openspec show [change] --json --deltas-only`
|
||||||
|
|
||||||
|
### Validation Tips
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Always use strict mode for comprehensive checks
|
||||||
|
openspec validate [change] --strict
|
||||||
|
|
||||||
|
# Debug delta parsing
|
||||||
|
openspec show [change] --json | jq '.deltas'
|
||||||
|
|
||||||
|
# Check specific requirement
|
||||||
|
openspec show [spec] --json -r 1
|
||||||
|
```
|
||||||
|
|
||||||
|
## Happy Path Script
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# 1) Explore current state
|
||||||
|
openspec spec list --long
|
||||||
|
openspec list
|
||||||
|
# Optional full-text search:
|
||||||
|
# rg -n "Requirement:|Scenario:" openspec/specs
|
||||||
|
# rg -n "^#|Requirement:" openspec/changes
|
||||||
|
|
||||||
|
# 2) Choose change id and scaffold
|
||||||
|
CHANGE=add-two-factor-auth
|
||||||
|
mkdir -p openspec/changes/$CHANGE/{specs/auth}
|
||||||
|
printf "## Why\n...\n\n## What Changes\n- ...\n\n## Impact\n- ...\n" > openspec/changes/$CHANGE/proposal.md
|
||||||
|
printf "## 1. Implementation\n- [ ] 1.1 ...\n" > openspec/changes/$CHANGE/tasks.md
|
||||||
|
|
||||||
|
# 3) Add deltas (example)
|
||||||
|
cat > openspec/changes/$CHANGE/specs/auth/spec.md << 'EOF'
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: Two-Factor Authentication
|
||||||
|
Users MUST provide a second factor during login.
|
||||||
|
|
||||||
|
#### Scenario: OTP required
|
||||||
|
- **WHEN** valid credentials are provided
|
||||||
|
- **THEN** an OTP challenge is required
|
||||||
|
EOF
|
||||||
|
|
||||||
|
# 4) Validate
|
||||||
|
openspec validate $CHANGE --strict
|
||||||
|
```
|
||||||
|
|
||||||
|
## Multi-Capability Example
|
||||||
|
|
||||||
|
```
|
||||||
|
openspec/changes/add-2fa-notify/
|
||||||
|
├── proposal.md
|
||||||
|
├── tasks.md
|
||||||
|
└── specs/
|
||||||
|
├── auth/
|
||||||
|
│ └── spec.md # ADDED: Two-Factor Authentication
|
||||||
|
└── notifications/
|
||||||
|
└── spec.md # ADDED: OTP email notification
|
||||||
|
```
|
||||||
|
|
||||||
|
auth/spec.md
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: Two-Factor Authentication
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
notifications/spec.md
|
||||||
|
```markdown
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: OTP Email Notification
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Best Practices
|
||||||
|
|
||||||
|
### Simplicity First
|
||||||
|
- Default to <100 lines of new code
|
||||||
|
- Single-file implementations until proven insufficient
|
||||||
|
- Avoid frameworks without clear justification
|
||||||
|
- Choose boring, proven patterns
|
||||||
|
|
||||||
|
### Complexity Triggers
|
||||||
|
Only add complexity with:
|
||||||
|
- Performance data showing current solution too slow
|
||||||
|
- Concrete scale requirements (>1000 users, >100MB data)
|
||||||
|
- Multiple proven use cases requiring abstraction
|
||||||
|
|
||||||
|
### Clear References
|
||||||
|
- Use `file.ts:42` format for code locations
|
||||||
|
- Reference specs as `specs/auth/spec.md`
|
||||||
|
- Link related changes and PRs
|
||||||
|
|
||||||
|
### Capability Naming
|
||||||
|
- Use verb-noun: `user-auth`, `payment-capture`
|
||||||
|
- Single purpose per capability
|
||||||
|
- 10-minute understandability rule
|
||||||
|
- Split if description needs "AND"
|
||||||
|
|
||||||
|
### Change ID Naming
|
||||||
|
- Use kebab-case, short and descriptive: `add-two-factor-auth`
|
||||||
|
- Prefer verb-led prefixes: `add-`, `update-`, `remove-`, `refactor-`
|
||||||
|
- Ensure uniqueness; if taken, append `-2`, `-3`, etc.
|
||||||
|
|
||||||
|
## Tool Selection Guide
|
||||||
|
|
||||||
|
| Task | Tool | Why |
|
||||||
|
|------|------|-----|
|
||||||
|
| Find files by pattern | Glob | Fast pattern matching |
|
||||||
|
| Search code content | Grep | Optimized regex search |
|
||||||
|
| Read specific files | Read | Direct file access |
|
||||||
|
| Explore unknown scope | Task | Multi-step investigation |
|
||||||
|
|
||||||
|
## Error Recovery
|
||||||
|
|
||||||
|
### Change Conflicts
|
||||||
|
1. Run `openspec list` to see active changes
|
||||||
|
2. Check for overlapping specs
|
||||||
|
3. Coordinate with change owners
|
||||||
|
4. Consider combining proposals
|
||||||
|
|
||||||
|
### Validation Failures
|
||||||
|
1. Run with `--strict` flag
|
||||||
|
2. Check JSON output for details
|
||||||
|
3. Verify spec file format
|
||||||
|
4. Ensure scenarios properly formatted
|
||||||
|
|
||||||
|
### Missing Context
|
||||||
|
1. Read project.md first
|
||||||
|
2. Check related specs
|
||||||
|
3. Review recent archives
|
||||||
|
4. Ask for clarification
|
||||||
|
|
||||||
|
## Quick Reference
|
||||||
|
|
||||||
|
### Stage Indicators
|
||||||
|
- `changes/` - Proposed, not yet built
|
||||||
|
- `specs/` - Built and deployed
|
||||||
|
- `archive/` - Completed changes
|
||||||
|
|
||||||
|
### File Purposes
|
||||||
|
- `proposal.md` - Why and what
|
||||||
|
- `tasks.md` - Implementation steps
|
||||||
|
- `design.md` - Technical decisions
|
||||||
|
- `spec.md` - Requirements and behavior
|
||||||
|
|
||||||
|
### CLI Essentials
|
||||||
|
```bash
|
||||||
|
openspec list # What's in progress?
|
||||||
|
openspec show [item] # View details
|
||||||
|
openspec validate --strict # Is it correct?
|
||||||
|
openspec archive <change-id> [--yes|-y] # Mark complete (add --yes for automation)
|
||||||
|
```
|
||||||
|
|
||||||
|
Remember: Specs are truth. Changes are proposals. Keep them in sync.
|
||||||
@@ -0,0 +1,17 @@
|
|||||||
|
# Change: Fix Kafka Partitioning and Schema Issues
|
||||||
|
|
||||||
|
## Why
|
||||||
|
Production deployment revealed issues with data ingestion:
|
||||||
|
1. Kafka Topic name changed to include partition suffix.
|
||||||
|
2. Legacy data contains second-level timestamps (1970s) causing partition lookup failures in PostgreSQL (which expects ms).
|
||||||
|
3. Variable-length fields (reboot reason, status) exceeded VARCHAR(10) limits, causing crashes.
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- **Modified Requirement**: Update Kafka Topic to `blwlog4Nodejs-rcu-onoffline-topic-0`.
|
||||||
|
- **New Requirement**: Implement heuristic timestamp conversion (Sec -> MS) for values < 100B.
|
||||||
|
- **New Requirement**: Truncate specific fields to VARCHAR(255) to prevent DB rejection.
|
||||||
|
- **Modified Requirement**: Update DB Schema to VARCHAR(255) for robustness.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `onoffline`
|
||||||
|
- Affected code: `src/processor/index.js`, `scripts/init_db.sql`
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
## MODIFIED Requirements
|
||||||
|
### Requirement: 消费并落库
|
||||||
|
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||||
|
|
||||||
|
#### Scenario: 非重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为空或不存在
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||||
|
|
||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: 字段长度限制与截断
|
||||||
|
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||||
|
|
||||||
|
#### Scenario: 超长字段处理
|
||||||
|
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 字段被截断为前 255 个字符并入库
|
||||||
|
|
||||||
|
### Requirement: 时间戳单位自动识别
|
||||||
|
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||||
|
|
||||||
|
#### Scenario: 秒级时间戳转换
|
||||||
|
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||||
|
- **WHEN** 解析时间戳
|
||||||
|
- **THEN** 自动乘以 1000 转换为毫秒
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [x] Update Kafka Topic in .env and config
|
||||||
|
- [x] Implement timestamp unit detection and conversion in processor
|
||||||
|
- [x] Implement field truncation logic in processor
|
||||||
|
- [x] Update database schema definition (init_db.sql) to VARCHAR(255)
|
||||||
|
- [x] Verify data ingestion with production stream
|
||||||
@@ -0,0 +1,18 @@
|
|||||||
|
# Change: Optimize Kafka Consumption Performance
|
||||||
|
|
||||||
|
## Why
|
||||||
|
User reports extremely slow Kafka consumption. Current implementation processes and inserts messages one-by-one, which creates a bottleneck at the database network round-trip time (RTT).
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- **New Requirement**: Implement Batch Processing for Kafka messages.
|
||||||
|
- **Refactor**: Decouple message parsing from insertion in `processor`.
|
||||||
|
- **Logic**:
|
||||||
|
- Accumulate messages in a buffer (e.g., 500ms or 500 items).
|
||||||
|
- Perform Batch Insert into PostgreSQL.
|
||||||
|
- Implement Row-by-Row fallback for batch failures (to isolate bad data).
|
||||||
|
- Handle DB connection errors with retry loop at batch level.
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `onoffline`
|
||||||
|
- Affected code: `src/index.js`, `src/processor/index.js`
|
||||||
|
- Performance: Expected 10x-100x throughput increase.
|
||||||
@@ -0,0 +1,13 @@
|
|||||||
|
## ADDED Requirements
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
@@ -0,0 +1,5 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [ ] Refactor `src/processor/index.js` to export `parseMessageToRows`
|
||||||
|
- [ ] Implement `BatchProcessor` logic in `src/index.js`
|
||||||
|
- [ ] Update `handleMessage` to use `BatchProcessor`
|
||||||
|
- [ ] Verify performance improvement
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
# Proposal: Refactor Partition Indexes
|
||||||
|
|
||||||
|
## Goal
|
||||||
|
利用 PostgreSQL 默认的支持,改变每日分区创立时的索引策略,不再在代码中对每个分区单独创建索引。
|
||||||
|
|
||||||
|
## Context
|
||||||
|
当前 `PartitionManager` 在动态创建子分区后,会隐式调用查询在子分区上创建六个单列索引。由于我们使用的是 PostgreSQL 11+,且我们在初始化脚本中的主分区表 `onoffline.onoffline_record` 上已经创建了所有的索引,此主表上的索引会自动应用于所有的子分区,不需要我们在创建分区时另外手动添加。
|
||||||
|
|
||||||
|
## Proposed Changes
|
||||||
|
1. 在 `src/db/partitionManager.js` 中移除子分区显式创建索引的方法 `ensurePartitionIndexes` 以及针对已有子分区的循环索引检查函数 `ensureIndexesForExistingPartitions`。
|
||||||
|
2. 在更新分区流程 `ensurePartitions` 以及 `ensurePartitionsForTimestamps` 中,移除对 `ensurePartitionIndexes` 的调用。
|
||||||
@@ -0,0 +1,11 @@
|
|||||||
|
# Spec Delta: onoffline-backend
|
||||||
|
|
||||||
|
## MODIFIED Requirements
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||||
|
|
||||||
|
#### Scenario: 分区预创建
|
||||||
|
- **GIVEN** 系统启动或每日凌晨
|
||||||
|
- **WHEN** 运行分区维护任务
|
||||||
|
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
# Tasks: Refactor Partition Indexes
|
||||||
|
|
||||||
|
- [x] refactor `src/db/partitionManager.js`: remove `ensurePartitionIndexes` and `ensureIndexesForExistingPartitions`.
|
||||||
|
- [x] refactor `src/db/partitionManager.js`: update `ensurePartitions` and `ensurePartitionsForTimestamps` to remove calls to `ensurePartitionIndexes`.
|
||||||
|
- [x] refactor `src/db/initializer.js` (and any other occurrences) to reflect the removal.
|
||||||
|
- [x] update openspec requirements to clarify that index propagation relies on PostgreSQL parent-table indexes.
|
||||||
@@ -0,0 +1,14 @@
|
|||||||
|
# Change: remove runtime db provisioning
|
||||||
|
|
||||||
|
## Why
|
||||||
|
当前服务在运行时承担了建库、建表和分区维护职责,导致服务职责边界不清晰,也会引入启动阶段 DDL 风险。现已将该能力剥离到根目录 `SQL_Script/`,需要通过 OpenSpec 正式记录为规范变更。
|
||||||
|
|
||||||
|
## What Changes
|
||||||
|
- 移除服务启动阶段的数据库初始化与定时分区维护要求。
|
||||||
|
- 移除服务在写入失败时自动创建缺失分区的要求。
|
||||||
|
- 明确数据库结构与分区维护由外部脚本(`SQL_Script/`)负责。
|
||||||
|
- 保留服务的核心职责:Kafka 消费、解析、写库、重试与监控。
|
||||||
|
|
||||||
|
## Impact
|
||||||
|
- Affected specs: `openspec/specs/onoffline/spec.md`
|
||||||
|
- Affected code: `src/index.js`, `src/config/config.js`, `src/db/initializer.js`, `src/db/partitionManager.js`, `scripts/init_db.sql`, `scripts/verify_partitions.js`, `../SQL_Script/*`
|
||||||
@@ -0,0 +1,32 @@
|
|||||||
|
## MODIFIED Requirements
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区;运行服务本身 SHALL NOT 执行建库、建表、分区创建或定时分区维护。
|
||||||
|
|
||||||
|
#### Scenario: 服务启动不执行 DDL
|
||||||
|
- **GIVEN** 服务进程启动
|
||||||
|
- **WHEN** 进入 bootstrap 过程
|
||||||
|
- **THEN** 仅初始化消费、处理、监控相关能力,不执行数据库创建、表结构初始化与分区创建
|
||||||
|
|
||||||
|
#### Scenario: 分区由外部脚本维护
|
||||||
|
- **GIVEN** 需要创建数据库对象或新增未来分区
|
||||||
|
- **WHEN** 执行外部 SQL/JS 工具
|
||||||
|
- **THEN** 通过根目录 `SQL_Script/` 完成建库和分区维护,而不是由服务运行时自动执行
|
||||||
|
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
|
|
||||||
|
#### Scenario: 分区缺失错误处理
|
||||||
|
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||||
|
- **WHEN** 服务处理该错误
|
||||||
|
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||||
@@ -0,0 +1,12 @@
|
|||||||
|
## 1. Implementation
|
||||||
|
- [x] 1.1 Remove runtime DB initialization from bootstrap flow (`src/index.js`).
|
||||||
|
- [x] 1.2 Remove scheduled partition maintenance job from runtime service.
|
||||||
|
- [x] 1.3 Remove runtime missing-partition auto-fix behavior.
|
||||||
|
- [x] 1.4 Remove legacy DB provisioning modules and scripts from service project.
|
||||||
|
- [x] 1.5 Add external SQL/JS provisioning scripts under root `SQL_Script/` for DB/schema/partition management.
|
||||||
|
- [x] 1.6 Update project docs to point DB provisioning to `SQL_Script/`.
|
||||||
|
|
||||||
|
## 2. Validation
|
||||||
|
- [x] 2.1 Run `npm run lint` in `bls-onoffline-backend`.
|
||||||
|
- [x] 2.2 Run `npm run build` in `bls-onoffline-backend`.
|
||||||
|
- [x] 2.3 Run `openspec validate remove-runtime-db-provisioning --strict`.
|
||||||
31
docs/template/bls-onoffline-backend/openspec/project.md
vendored
Normal file
31
docs/template/bls-onoffline-backend/openspec/project.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
# Project Context
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
[Describe your project's purpose and goals]
|
||||||
|
|
||||||
|
## Tech Stack
|
||||||
|
- [List your primary technologies]
|
||||||
|
- [e.g., TypeScript, React, Node.js]
|
||||||
|
|
||||||
|
## Project Conventions
|
||||||
|
|
||||||
|
### Code Style
|
||||||
|
[Describe your code style preferences, formatting rules, and naming conventions]
|
||||||
|
|
||||||
|
### Architecture Patterns
|
||||||
|
[Document your architectural decisions and patterns]
|
||||||
|
|
||||||
|
### Testing Strategy
|
||||||
|
[Explain your testing approach and requirements]
|
||||||
|
|
||||||
|
### Git Workflow
|
||||||
|
[Describe your branching strategy and commit conventions]
|
||||||
|
|
||||||
|
## Domain Context
|
||||||
|
[Add domain-specific knowledge that AI assistants need to understand]
|
||||||
|
|
||||||
|
## Important Constraints
|
||||||
|
[List any technical, business, or regulatory constraints]
|
||||||
|
|
||||||
|
## External Dependencies
|
||||||
|
[Document key external services, APIs, or systems]
|
||||||
103
docs/template/bls-onoffline-backend/openspec/specs/onoffline/spec.md
vendored
Normal file
103
docs/template/bls-onoffline-backend/openspec/specs/onoffline/spec.md
vendored
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
# Spec: onoffline-backend
|
||||||
|
|
||||||
|
## Purpose
|
||||||
|
从 Kafka 消费设备上下线事件并按规则写入 PostgreSQL 分区表,确保高可靠性、幂等写入和错误恢复能力。
|
||||||
|
## Requirements
|
||||||
|
### Requirement: 消费并落库
|
||||||
|
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||||
|
|
||||||
|
#### Scenario: 非重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为空或不存在
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||||
|
|
||||||
|
### Requirement: 重启数据处理
|
||||||
|
系统 SHALL 在 RebootReason 非空时强制 current_status 为 on。
|
||||||
|
|
||||||
|
#### Scenario: 重启数据写入
|
||||||
|
- **GIVEN** RebootReason 为非空值
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** current_status 等于 on
|
||||||
|
|
||||||
|
### Requirement: 空值保留
|
||||||
|
系统 SHALL 保留上游空值,不对字段进行补 0。
|
||||||
|
|
||||||
|
#### Scenario: 空值写入
|
||||||
|
- **GIVEN** LauncherVersion 或 RebootReason 为空字符串
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 数据库存储值为对应的空字符串
|
||||||
|
|
||||||
|
### Requirement: 数据库分区策略
|
||||||
|
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||||
|
|
||||||
|
#### Scenario: 分区预创建
|
||||||
|
- **GIVEN** 系统启动或每日凌晨
|
||||||
|
- **WHEN** 运行分区维护任务
|
||||||
|
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||||
|
|
||||||
|
### Requirement: 消费可靠性 (At-Least-Once)
|
||||||
|
系统 SHALL 仅在数据成功写入数据库后,才向 Kafka 提交消费位点。
|
||||||
|
|
||||||
|
#### Scenario: 逐条确认与顺序提交
|
||||||
|
- **GIVEN** 并发处理多条消息 (Offset 1, 2, 3)
|
||||||
|
- **WHEN** Offset 2 先完成,Offset 1 尚未完成
|
||||||
|
- **THEN** 系统不提交 Offset 2,直到 Offset 1 也完成,才提交 Offset 3 (即 1, 2, 3 都完成)
|
||||||
|
|
||||||
|
### Requirement: 数据库离线保护
|
||||||
|
系统 SHALL 在数据库连接丢失时暂停消费,防止数据堆积或丢失。
|
||||||
|
|
||||||
|
#### Scenario: 数据库断连
|
||||||
|
- **GIVEN** 数据库连接失败 (ECONNREFUSED 等)
|
||||||
|
- **WHEN** 消费者尝试写入
|
||||||
|
- **THEN** 暂停 Kafka 消费 1 分钟,并进入轮询检测模式,直到数据库恢复
|
||||||
|
|
||||||
|
### Requirement: 幂等写入
|
||||||
|
系统 SHALL 处理重复消费的数据,防止主键冲突。
|
||||||
|
|
||||||
|
#### Scenario: 重复数据处理
|
||||||
|
- **GIVEN** Kafka 重新投递已处理过的消息
|
||||||
|
- **WHEN** 尝试写入数据库
|
||||||
|
- **THEN** 使用 `ON CONFLICT DO NOTHING` 忽略冲突,视为处理成功
|
||||||
|
|
||||||
|
### Requirement: 性能与日志
|
||||||
|
系统 SHALL 最小化正常运行时的日志输出。
|
||||||
|
|
||||||
|
#### Scenario: 正常运行日志
|
||||||
|
- **GIVEN** 数据正常处理
|
||||||
|
- **WHEN** 写入成功
|
||||||
|
- **THEN** 不输出单条日志,仅每分钟输出聚合统计 (Pulled/Inserted)
|
||||||
|
|
||||||
|
### Requirement: 字段长度限制与截断
|
||||||
|
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||||
|
|
||||||
|
#### Scenario: 超长字段处理
|
||||||
|
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||||
|
- **WHEN** 消息被处理
|
||||||
|
- **THEN** 字段被截断为前 255 个字符并入库
|
||||||
|
|
||||||
|
### Requirement: 时间戳单位自动识别
|
||||||
|
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||||
|
|
||||||
|
#### Scenario: 秒级时间戳转换
|
||||||
|
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||||
|
- **WHEN** 解析时间戳
|
||||||
|
- **THEN** 自动乘以 1000 转换为毫秒
|
||||||
|
|
||||||
|
### Requirement: 批量消费与写入
|
||||||
|
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||||
|
|
||||||
|
#### Scenario: 批量写入
|
||||||
|
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||||
|
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||||
|
- **THEN** 执行一次批量数据库插入操作
|
||||||
|
|
||||||
|
#### Scenario: 写入失败降级
|
||||||
|
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||||
|
- **WHEN** 捕获异常
|
||||||
|
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||||
|
|
||||||
|
#### Scenario: 分区缺失错误处理
|
||||||
|
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||||
|
- **WHEN** 服务处理该错误
|
||||||
|
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||||
|
|
||||||
11
docs/template/bls-onoffline-backend/openspec/specs/onoffline/status.md
vendored
Normal file
11
docs/template/bls-onoffline-backend/openspec/specs/onoffline/status.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
|
||||||
|
## Implementation Status
|
||||||
|
- **Date**: 2026-02-04
|
||||||
|
- **Status**: Completed
|
||||||
|
- **Notes**:
|
||||||
|
- 已完成核心消费逻辑、分区管理、数据库幂等写入。
|
||||||
|
- 已处理数据库连接泄露 (EADDRINUSE) 问题,增加了离线保护机制。
|
||||||
|
- 已修复时间戳单位问题 (Seconds -> MS)。
|
||||||
|
- 已将关键字段长度扩展至 VARCHAR(255) 并增加了代码层截断保护。
|
||||||
|
- 验证了数据积压消费能力。
|
||||||
|
- 本阶段开发任务已归档。
|
||||||
3526
docs/template/bls-onoffline-backend/package-lock.json
generated
vendored
Normal file
3526
docs/template/bls-onoffline-backend/package-lock.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
docs/template/bls-onoffline-backend/package.json
vendored
Normal file
27
docs/template/bls-onoffline-backend/package.json
vendored
Normal file
@@ -0,0 +1,27 @@
|
|||||||
|
{
|
||||||
|
"name": "bls-onoffline-backend",
|
||||||
|
"version": "1.0.0",
|
||||||
|
"type": "module",
|
||||||
|
"private": true,
|
||||||
|
"scripts": {
|
||||||
|
"dev": "node src/index.js",
|
||||||
|
"build": "vite build --ssr src/index.js --outDir dist",
|
||||||
|
"test": "vitest run",
|
||||||
|
"lint": "node scripts/lint.js",
|
||||||
|
"spec:lint": "openspec validate --specs --strict --no-interactive",
|
||||||
|
"spec:validate": "openspec validate --specs --no-interactive",
|
||||||
|
"start": "node dist/index.js"
|
||||||
|
},
|
||||||
|
"dependencies": {
|
||||||
|
"dotenv": "^16.4.5",
|
||||||
|
"kafka-node": "^5.0.0",
|
||||||
|
"node-cron": "^4.2.1",
|
||||||
|
"pg": "^8.11.5",
|
||||||
|
"redis": "^4.6.13",
|
||||||
|
"zod": "^4.3.6"
|
||||||
|
},
|
||||||
|
"devDependencies": {
|
||||||
|
"vite": "^5.4.0",
|
||||||
|
"vitest": "^4.0.18"
|
||||||
|
}
|
||||||
|
}
|
||||||
41
docs/template/bls-onoffline-backend/scripts/lint.js
vendored
Normal file
41
docs/template/bls-onoffline-backend/scripts/lint.js
vendored
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
import fs from 'fs';
|
||||||
|
import path from 'path';
|
||||||
|
import { fileURLToPath } from 'url';
|
||||||
|
import { spawnSync } from 'child_process';
|
||||||
|
|
||||||
|
const __filename = fileURLToPath(import.meta.url);
|
||||||
|
const __dirname = path.dirname(__filename);
|
||||||
|
const projectRoot = path.resolve(__dirname, '..');
|
||||||
|
const targets = ['src', 'tests'];
|
||||||
|
|
||||||
|
const collectFiles = (dir) => {
|
||||||
|
if (!fs.existsSync(dir)) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||||
|
return entries.flatMap((entry) => {
|
||||||
|
const fullPath = path.join(dir, entry.name);
|
||||||
|
if (entry.isDirectory()) {
|
||||||
|
return collectFiles(fullPath);
|
||||||
|
}
|
||||||
|
if (entry.isFile() && fullPath.endsWith('.js')) {
|
||||||
|
return [fullPath];
|
||||||
|
}
|
||||||
|
return [];
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const files = targets.flatMap((target) => collectFiles(path.join(projectRoot, target)));
|
||||||
|
|
||||||
|
const failures = [];
|
||||||
|
|
||||||
|
files.forEach((file) => {
|
||||||
|
const result = spawnSync(process.execPath, ['--check', file], { stdio: 'inherit' });
|
||||||
|
if (result.status !== 0) {
|
||||||
|
failures.push(file);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
if (failures.length > 0) {
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
36
docs/template/bls-onoffline-backend/scripts/verify_data.js
vendored
Normal file
36
docs/template/bls-onoffline-backend/scripts/verify_data.js
vendored
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
|
||||||
|
import { config } from '../src/config/config.js';
|
||||||
|
import dbManager from '../src/db/databaseManager.js';
|
||||||
|
import { logger } from '../src/utils/logger.js';
|
||||||
|
|
||||||
|
const verifyData = async () => {
|
||||||
|
const client = await dbManager.pool.connect();
|
||||||
|
try {
|
||||||
|
console.log('Verifying data in database...');
|
||||||
|
|
||||||
|
// Count total rows
|
||||||
|
const countSql = `SELECT count(*) FROM ${config.db.schema}.${config.db.table}`;
|
||||||
|
const countRes = await client.query(countSql);
|
||||||
|
console.log(`Total rows in ${config.db.schema}.${config.db.table}: ${countRes.rows[0].count}`);
|
||||||
|
|
||||||
|
// Check recent rows
|
||||||
|
const recentSql = `
|
||||||
|
SELECT * FROM ${config.db.schema}.${config.db.table}
|
||||||
|
ORDER BY ts_ms DESC
|
||||||
|
LIMIT 5
|
||||||
|
`;
|
||||||
|
const recentRes = await client.query(recentSql);
|
||||||
|
console.log('Recent 5 rows:');
|
||||||
|
recentRes.rows.forEach(row => {
|
||||||
|
console.log(JSON.stringify(row));
|
||||||
|
});
|
||||||
|
|
||||||
|
} catch (err) {
|
||||||
|
console.error('Error verifying data:', err);
|
||||||
|
} finally {
|
||||||
|
client.release();
|
||||||
|
await dbManager.pool.end();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
verifyData();
|
||||||
50
docs/template/bls-onoffline-backend/spec/onoffline-spec.md
vendored
Normal file
50
docs/template/bls-onoffline-backend/spec/onoffline-spec.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
bls-onoffline-backend 规格说明
|
||||||
|
|
||||||
|
1. Kafka 数据结构
|
||||||
|
{
|
||||||
|
"HotelCode": "1085",
|
||||||
|
"MAC": "00:1A:2B:3C:4D:5E",
|
||||||
|
"HostNumber": "091123987456",
|
||||||
|
"RoomNumber": "8888房",
|
||||||
|
"EndPoint": "50.2.60.1:6543",
|
||||||
|
"CurrentStatus": "on",
|
||||||
|
"CurrentTime": "2026-02-02T10:30:00Z",
|
||||||
|
"UnixTime": 1770000235000,
|
||||||
|
"LauncherVersion": "1.0.0",
|
||||||
|
"RebootReason": "1"
|
||||||
|
}
|
||||||
|
|
||||||
|
2. Kafka 主题
|
||||||
|
Topic:blwlog4Nodejs-rcu-onoffline-topic
|
||||||
|
|
||||||
|
3. 数据库结构
|
||||||
|
数据库:log_platform
|
||||||
|
表:onoffline_record
|
||||||
|
字段:
|
||||||
|
guid varchar(32)
|
||||||
|
ts_ms int8
|
||||||
|
write_ts_ms int8
|
||||||
|
hotel_id int2
|
||||||
|
mac varchar(21)
|
||||||
|
device_id varchar(64)
|
||||||
|
room_id varchar(64)
|
||||||
|
ip varchar(21)
|
||||||
|
current_status varchar(10)
|
||||||
|
launcher_version varchar(10)
|
||||||
|
reboot_reason varchar(10)
|
||||||
|
主键:(ts_ms, mac, device_id, room_id)
|
||||||
|
按 ts_ms 每日分区
|
||||||
|
|
||||||
|
G5库结构(双写,临时接入):
|
||||||
|
库同为:log_platform
|
||||||
|
表:onoffline_record_g5
|
||||||
|
差异字段:
|
||||||
|
- guid 为 int4,由库自己生成。
|
||||||
|
- record_source 固定为 CRICS。
|
||||||
|
- current_status 为 int2,on映射为1,off映射为2,其余为0。
|
||||||
|
支持通过环境变量开关双写。
|
||||||
|
|
||||||
|
4. 数据处理规则
|
||||||
|
非重启数据:reboot_reason 为空或不存在,current_status 取 CurrentStatus
|
||||||
|
重启数据:reboot_reason 不为空,current_status 固定为 on
|
||||||
|
其余字段直接按 Kafka 原值落库,空值不补 0
|
||||||
72
docs/template/bls-onoffline-backend/src/config/config.js
vendored
Normal file
72
docs/template/bls-onoffline-backend/src/config/config.js
vendored
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
import dotenv from 'dotenv';
|
||||||
|
|
||||||
|
dotenv.config();
|
||||||
|
|
||||||
|
const parseNumber = (value, defaultValue) => {
|
||||||
|
const parsed = Number(value);
|
||||||
|
return Number.isFinite(parsed) ? parsed : defaultValue;
|
||||||
|
};
|
||||||
|
|
||||||
|
const parseList = (value) =>
|
||||||
|
(value || '')
|
||||||
|
.split(',')
|
||||||
|
.map((item) => item.trim())
|
||||||
|
.filter(Boolean);
|
||||||
|
|
||||||
|
export const config = {
|
||||||
|
env: process.env.NODE_ENV || 'development',
|
||||||
|
port: parseNumber(process.env.PORT, 3001),
|
||||||
|
kafka: {
|
||||||
|
brokers: parseList(process.env.KAFKA_BROKERS),
|
||||||
|
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-onoffline-topic',
|
||||||
|
groupId: process.env.KAFKA_GROUP_ID || 'bls-onoffline-group',
|
||||||
|
clientId: process.env.KAFKA_CLIENT_ID || 'bls-onoffline-client',
|
||||||
|
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
|
||||||
|
maxInFlight: parseNumber(process.env.KAFKA_MAX_IN_FLIGHT, 20000),
|
||||||
|
fetchMaxBytes: parseNumber(process.env.KAFKA_FETCH_MAX_BYTES, 50 * 1024 * 1024),
|
||||||
|
fetchMinBytes: parseNumber(process.env.KAFKA_FETCH_MIN_BYTES, 256 * 1024),
|
||||||
|
fetchMaxWaitMs: parseNumber(process.env.KAFKA_FETCH_MAX_WAIT_MS, 100),
|
||||||
|
autoCommitIntervalMs: parseNumber(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS, 5000),
|
||||||
|
commitIntervalMs: parseNumber(process.env.KAFKA_COMMIT_INTERVAL_MS, 200),
|
||||||
|
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === 'true',
|
||||||
|
batchSize: parseNumber(process.env.KAFKA_BATCH_SIZE, 5000),
|
||||||
|
batchTimeoutMs: parseNumber(process.env.KAFKA_BATCH_TIMEOUT_MS, 50),
|
||||||
|
logMessages: process.env.KAFKA_LOG_MESSAGES === 'true',
|
||||||
|
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
|
||||||
|
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
|
||||||
|
username: process.env.KAFKA_SASL_USERNAME,
|
||||||
|
password: process.env.KAFKA_SASL_PASSWORD
|
||||||
|
} : undefined
|
||||||
|
},
|
||||||
|
db: {
|
||||||
|
host: process.env.DB_HOST || process.env.POSTGRES_HOST || 'localhost',
|
||||||
|
port: parseNumber(process.env.DB_PORT || process.env.POSTGRES_PORT, 5432),
|
||||||
|
user: process.env.DB_USER || process.env.POSTGRES_USER || 'postgres',
|
||||||
|
password: process.env.DB_PASSWORD || process.env.POSTGRES_PASSWORD || '',
|
||||||
|
database: process.env.DB_DATABASE || process.env.POSTGRES_DATABASE || 'log_platform',
|
||||||
|
max: parseNumber(process.env.DB_MAX_CONNECTIONS || process.env.POSTGRES_MAX_CONNECTIONS, 10),
|
||||||
|
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||||
|
schema: process.env.DB_SCHEMA || 'onoffline',
|
||||||
|
table: process.env.DB_TABLE || 'onoffline_record'
|
||||||
|
},
|
||||||
|
g5db: {
|
||||||
|
enabled: !!process.env.POSTGRES_HOST_G5,
|
||||||
|
host: process.env.POSTGRES_HOST_G5,
|
||||||
|
port: parseNumber(process.env.POSTGRES_PORT_G5, 5434),
|
||||||
|
user: process.env.POSTGRES_USER_G5,
|
||||||
|
password: process.env.POSTGRES_PASSWORD_G5,
|
||||||
|
database: process.env.POSTGRES_DATABASE_G5,
|
||||||
|
max: parseNumber(process.env.POSTGRES_MAX_CONNECTIONS_G5, 3),
|
||||||
|
ssl: process.env.POSTGRES_SSL_G5 === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||||
|
schema: process.env.DB_SCHEMA_G5 || 'onoffline',
|
||||||
|
table: process.env.DB_TABLE_G5 || 'onoffline_record_g5'
|
||||||
|
},
|
||||||
|
redis: {
|
||||||
|
host: process.env.REDIS_HOST || 'localhost',
|
||||||
|
port: parseNumber(process.env.REDIS_PORT, 6379),
|
||||||
|
password: process.env.REDIS_PASSWORD || undefined,
|
||||||
|
db: parseNumber(process.env.REDIS_DB, 0),
|
||||||
|
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline',
|
||||||
|
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3001)}`
|
||||||
|
}
|
||||||
|
};
|
||||||
108
docs/template/bls-onoffline-backend/src/db/databaseManager.js
vendored
Normal file
108
docs/template/bls-onoffline-backend/src/db/databaseManager.js
vendored
Normal file
@@ -0,0 +1,108 @@
|
|||||||
|
import pg from 'pg';
|
||||||
|
import { config } from '../config/config.js';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { Pool } = pg;
|
||||||
|
|
||||||
|
const columns = [
|
||||||
|
'guid',
|
||||||
|
'ts_ms',
|
||||||
|
'write_ts_ms',
|
||||||
|
'hotel_id',
|
||||||
|
'mac',
|
||||||
|
'device_id',
|
||||||
|
'room_id',
|
||||||
|
'ip',
|
||||||
|
'current_status',
|
||||||
|
'launcher_version',
|
||||||
|
'reboot_reason'
|
||||||
|
];
|
||||||
|
|
||||||
|
export class DatabaseManager {
|
||||||
|
constructor(dbConfig) {
|
||||||
|
this.pool = new Pool({
|
||||||
|
host: dbConfig.host,
|
||||||
|
port: dbConfig.port,
|
||||||
|
user: dbConfig.user,
|
||||||
|
password: dbConfig.password,
|
||||||
|
database: dbConfig.database,
|
||||||
|
max: dbConfig.max,
|
||||||
|
ssl: dbConfig.ssl
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async insertRows({ schema, table, rows }) {
|
||||||
|
if (!rows || rows.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const statement = `
|
||||||
|
INSERT INTO ${schema}.${table} (${columns.join(', ')})
|
||||||
|
SELECT *
|
||||||
|
FROM UNNEST(
|
||||||
|
$1::text[],
|
||||||
|
$2::int8[],
|
||||||
|
$3::int8[],
|
||||||
|
$4::int2[],
|
||||||
|
$5::text[],
|
||||||
|
$6::text[],
|
||||||
|
$7::text[],
|
||||||
|
$8::text[],
|
||||||
|
$9::text[],
|
||||||
|
$10::text[],
|
||||||
|
$11::text[]
|
||||||
|
)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`;
|
||||||
|
try {
|
||||||
|
const params = columns.map((column) => rows.map((row) => row[column] ?? null));
|
||||||
|
await this.pool.query(statement, params);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Database insert failed', {
|
||||||
|
error: error?.message,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
rowsLength: rows.length
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async checkConnection() {
|
||||||
|
let client;
|
||||||
|
try {
|
||||||
|
const connectPromise = this.pool.connect();
|
||||||
|
|
||||||
|
// Create a timeout promise that rejects after 5000ms
|
||||||
|
const timeoutPromise = new Promise((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||||
|
});
|
||||||
|
|
||||||
|
try {
|
||||||
|
// Race the connection attempt against the timeout
|
||||||
|
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||||
|
} catch (raceError) {
|
||||||
|
// If we timed out, the connectPromise might still resolve later.
|
||||||
|
// We must ensure that if it does, the client is released back to the pool immediately.
|
||||||
|
connectPromise.then(c => c.release()).catch(() => {});
|
||||||
|
throw raceError;
|
||||||
|
}
|
||||||
|
|
||||||
|
await client.query('SELECT 1');
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('Database check connection failed', { error: err.message });
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
if (client) {
|
||||||
|
client.release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async close() {
|
||||||
|
await this.pool.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const dbManager = new DatabaseManager(config.db);
|
||||||
|
export default dbManager;
|
||||||
121
docs/template/bls-onoffline-backend/src/db/g5DatabaseManager.js
vendored
Normal file
121
docs/template/bls-onoffline-backend/src/db/g5DatabaseManager.js
vendored
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
import pg from 'pg';
|
||||||
|
import { config } from '../config/config.js';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { Pool } = pg;
|
||||||
|
|
||||||
|
const g5Columns = [
|
||||||
|
'ts_ms',
|
||||||
|
'write_ts_ms',
|
||||||
|
'hotel_id',
|
||||||
|
'mac',
|
||||||
|
'device_id',
|
||||||
|
'room_id',
|
||||||
|
'ip',
|
||||||
|
'current_status',
|
||||||
|
'launcher_version',
|
||||||
|
'reboot_reason',
|
||||||
|
'record_source'
|
||||||
|
];
|
||||||
|
|
||||||
|
export class G5DatabaseManager {
|
||||||
|
constructor(dbConfig) {
|
||||||
|
if (!dbConfig.enabled) return;
|
||||||
|
this.pool = new Pool({
|
||||||
|
host: dbConfig.host,
|
||||||
|
port: dbConfig.port,
|
||||||
|
user: dbConfig.user,
|
||||||
|
password: dbConfig.password,
|
||||||
|
database: dbConfig.database,
|
||||||
|
max: dbConfig.max,
|
||||||
|
ssl: dbConfig.ssl
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
async insertRows({ schema, table, rows }) {
|
||||||
|
if (!this.pool || !rows || rows.length === 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const statement = `
|
||||||
|
INSERT INTO ${schema}.${table} (${g5Columns.join(', ')})
|
||||||
|
SELECT *
|
||||||
|
FROM UNNEST(
|
||||||
|
$1::int8[],
|
||||||
|
$2::int8[],
|
||||||
|
$3::int2[],
|
||||||
|
$4::text[],
|
||||||
|
$5::text[],
|
||||||
|
$6::text[],
|
||||||
|
$7::text[],
|
||||||
|
$8::int2[],
|
||||||
|
$9::text[],
|
||||||
|
$10::text[],
|
||||||
|
$11::text[]
|
||||||
|
)
|
||||||
|
ON CONFLICT DO NOTHING
|
||||||
|
`;
|
||||||
|
|
||||||
|
try {
|
||||||
|
const params = g5Columns.map((column) => {
|
||||||
|
return rows.map((row) => {
|
||||||
|
if (column === 'record_source') {
|
||||||
|
return 'CRICS';
|
||||||
|
}
|
||||||
|
if (column === 'current_status') {
|
||||||
|
// current_status in G5 is int2
|
||||||
|
if (row.current_status === 'on') return 1;
|
||||||
|
if (row.current_status === 'off') return 2;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
return row[column] ?? null;
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
await this.pool.query(statement, params);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('G5 Database insert failed', {
|
||||||
|
error: error?.message,
|
||||||
|
schema,
|
||||||
|
table,
|
||||||
|
rowsLength: rows.length
|
||||||
|
});
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async checkConnection() {
|
||||||
|
if (!this.pool) return true; // Pretend it's ok if disabled
|
||||||
|
let client;
|
||||||
|
try {
|
||||||
|
const connectPromise = this.pool.connect();
|
||||||
|
const timeoutPromise = new Promise((_, reject) => {
|
||||||
|
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||||
|
});
|
||||||
|
try {
|
||||||
|
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||||
|
} catch (raceError) {
|
||||||
|
connectPromise.then(c => c.release()).catch(() => { });
|
||||||
|
throw raceError;
|
||||||
|
}
|
||||||
|
await client.query('SELECT 1');
|
||||||
|
return true;
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('G5 Database check connection failed', { error: err.message });
|
||||||
|
return false;
|
||||||
|
} finally {
|
||||||
|
if (client) {
|
||||||
|
client.release();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async close() {
|
||||||
|
if (this.pool) {
|
||||||
|
await this.pool.end();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const g5DbManager = new G5DatabaseManager(config.g5db);
|
||||||
|
export default g5DbManager;
|
||||||
469
docs/template/bls-onoffline-backend/src/index.js
vendored
Normal file
469
docs/template/bls-onoffline-backend/src/index.js
vendored
Normal file
@@ -0,0 +1,469 @@
|
|||||||
|
import cron from 'node-cron';
|
||||||
|
import { config } from './config/config.js';
|
||||||
|
import dbManager from './db/databaseManager.js';
|
||||||
|
import g5DbManager from './db/g5DatabaseManager.js';
|
||||||
|
import { createKafkaConsumers } from './kafka/consumer.js';
|
||||||
|
import { parseMessageToRows } from './processor/index.js';
|
||||||
|
import { createRedisClient } from './redis/redisClient.js';
|
||||||
|
import { RedisIntegration } from './redis/redisIntegration.js';
|
||||||
|
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
|
||||||
|
import { MetricCollector } from './utils/metricCollector.js';
|
||||||
|
import { logger } from './utils/logger.js';
|
||||||
|
|
||||||
|
const bootstrap = async () => {
|
||||||
|
// Log startup config (masked)
|
||||||
|
logger.info('Starting application with config', {
|
||||||
|
env: process.env.NODE_ENV,
|
||||||
|
db: {
|
||||||
|
host: config.db.host,
|
||||||
|
port: config.db.port,
|
||||||
|
user: config.db.user,
|
||||||
|
database: config.db.database,
|
||||||
|
schema: config.db.schema
|
||||||
|
},
|
||||||
|
kafka: {
|
||||||
|
brokers: config.kafka.brokers,
|
||||||
|
topic: config.kafka.topic,
|
||||||
|
groupId: config.kafka.groupId
|
||||||
|
},
|
||||||
|
redis: {
|
||||||
|
host: config.redis.host,
|
||||||
|
port: config.redis.port
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Metric Collector
|
||||||
|
const metricCollector = new MetricCollector();
|
||||||
|
|
||||||
|
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||||
|
// Moved after redisIntegration initialization
|
||||||
|
|
||||||
|
const redisClient = await createRedisClient(config.redis);
|
||||||
|
const redisIntegration = new RedisIntegration(
|
||||||
|
redisClient,
|
||||||
|
config.redis.projectName,
|
||||||
|
config.redis.apiBaseUrl
|
||||||
|
);
|
||||||
|
redisIntegration.startHeartbeat();
|
||||||
|
|
||||||
|
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||||
|
cron.schedule('* * * * *', async () => {
|
||||||
|
const metrics = metricCollector.getAndReset();
|
||||||
|
const flushAvgMs = metrics.batch_flush_count > 0 ? (metrics.batch_flush_ms_sum / metrics.batch_flush_count).toFixed(1) : '0.0';
|
||||||
|
const dbAvgMs = metrics.db_insert_count > 0 ? (metrics.db_insert_ms_sum / metrics.db_insert_count).toFixed(1) : '0.0';
|
||||||
|
const report = `[Metrics] Pulled:${metrics.kafka_pulled} ParseErr:${metrics.parse_error} Inserted:${metrics.db_inserted} Failed:${metrics.db_failed} FlushAvg:${flushAvgMs}ms DbAvg:${dbAvgMs}ms`;
|
||||||
|
console.log(report);
|
||||||
|
logger.info(report);
|
||||||
|
|
||||||
|
try {
|
||||||
|
await redisIntegration.info('Minute Metrics', metrics);
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('Failed to report metrics to Redis', { error: err?.message });
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
|
||||||
|
|
||||||
|
const handleError = async (error, message) => {
|
||||||
|
logger.error('Kafka processing error', {
|
||||||
|
error: error?.message,
|
||||||
|
type: error?.type,
|
||||||
|
stack: error?.stack
|
||||||
|
});
|
||||||
|
try {
|
||||||
|
await redisIntegration.error('Kafka processing error', {
|
||||||
|
module: 'kafka',
|
||||||
|
stack: error?.stack || error?.message
|
||||||
|
});
|
||||||
|
} catch (redisError) {
|
||||||
|
logger.error('Redis error log failed', { error: redisError?.message });
|
||||||
|
}
|
||||||
|
if (message) {
|
||||||
|
const messageValue = Buffer.isBuffer(message.value)
|
||||||
|
? message.value.toString('utf8')
|
||||||
|
: message.value;
|
||||||
|
try {
|
||||||
|
await enqueueError(redisClient, errorQueueKey, {
|
||||||
|
attempts: 0,
|
||||||
|
value: messageValue,
|
||||||
|
meta: {
|
||||||
|
topic: message.topic,
|
||||||
|
partition: message.partition,
|
||||||
|
offset: message.offset,
|
||||||
|
key: message.key
|
||||||
|
},
|
||||||
|
timestamp: Date.now()
|
||||||
|
});
|
||||||
|
} catch (enqueueError) {
|
||||||
|
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const configuredBatchSize = Number.isFinite(config.kafka.batchSize) ? config.kafka.batchSize : 1000;
|
||||||
|
const configuredBatchTimeoutMs = Number.isFinite(config.kafka.batchTimeoutMs) ? config.kafka.batchTimeoutMs : 20;
|
||||||
|
const configuredMaxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5000;
|
||||||
|
|
||||||
|
const BATCH_SIZE = Math.max(10, Math.min(configuredBatchSize, configuredMaxInFlight));
|
||||||
|
const BATCH_TIMEOUT_MS = Math.max(1, configuredBatchTimeoutMs);
|
||||||
|
const commitOnAttempt = config.kafka.commitOnAttempt === true;
|
||||||
|
|
||||||
|
const batchStates = new Map();
|
||||||
|
|
||||||
|
const partitionKeyFromMessage = (message) => {
|
||||||
|
if (message?.topic !== undefined && message?.partition !== undefined) {
|
||||||
|
return `${message.topic}-${message.partition}`;
|
||||||
|
}
|
||||||
|
return 'retry';
|
||||||
|
};
|
||||||
|
|
||||||
|
const dayKeyFromTsMs = (tsMs) => {
|
||||||
|
const numeric = typeof tsMs === 'string' ? Number(tsMs) : tsMs;
|
||||||
|
if (!Number.isFinite(numeric)) return null;
|
||||||
|
const d = new Date(numeric);
|
||||||
|
if (Number.isNaN(d.getTime())) return null;
|
||||||
|
const yyyy = d.getFullYear();
|
||||||
|
const mm = String(d.getMonth() + 1).padStart(2, '0');
|
||||||
|
const dd = String(d.getDate()).padStart(2, '0');
|
||||||
|
return `${yyyy}${mm}${dd}`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const getBatchState = (key) => {
|
||||||
|
if (!batchStates.has(key)) {
|
||||||
|
batchStates.set(key, { items: [], timer: null, flushing: null });
|
||||||
|
}
|
||||||
|
return batchStates.get(key);
|
||||||
|
};
|
||||||
|
|
||||||
|
const isDbConnectionError = (err) => {
|
||||||
|
const code = err?.code;
|
||||||
|
if (typeof code === 'string') {
|
||||||
|
const networkCodes = new Set([
|
||||||
|
'ECONNREFUSED',
|
||||||
|
'ECONNRESET',
|
||||||
|
'EPIPE',
|
||||||
|
'ETIMEDOUT',
|
||||||
|
'ENOTFOUND',
|
||||||
|
'EHOSTUNREACH',
|
||||||
|
'ENETUNREACH',
|
||||||
|
'57P03',
|
||||||
|
'08006',
|
||||||
|
'08001',
|
||||||
|
'08000',
|
||||||
|
'08003'
|
||||||
|
]);
|
||||||
|
if (networkCodes.has(code)) return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
const message = typeof err?.message === 'string' ? err.message : '';
|
||||||
|
if (!message) return false;
|
||||||
|
const lower = message.toLowerCase();
|
||||||
|
return (
|
||||||
|
lower.includes('connection timeout') ||
|
||||||
|
lower.includes('connection terminated') ||
|
||||||
|
lower.includes('connection refused') ||
|
||||||
|
lower.includes('terminating connection') ||
|
||||||
|
lower.includes('econnrefused') ||
|
||||||
|
lower.includes('econnreset') ||
|
||||||
|
lower.includes('etimedout') ||
|
||||||
|
lower.includes('could not connect') ||
|
||||||
|
lower.includes('the database system is starting up') ||
|
||||||
|
lower.includes('no pg_hba.conf entry')
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const insertRowsWithRetry = async (rows) => {
|
||||||
|
const startedAt = Date.now();
|
||||||
|
while (true) {
|
||||||
|
try {
|
||||||
|
const promises = [
|
||||||
|
dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows })
|
||||||
|
];
|
||||||
|
if (config.g5db.enabled) {
|
||||||
|
promises.push(g5DbManager.insertRows({ schema: config.g5db.schema, table: config.g5db.table, rows }).catch(e => {
|
||||||
|
logger.error('G5 Database insert failed but non-blocking', { error: e.message });
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
await Promise.all(promises);
|
||||||
|
|
||||||
|
metricCollector.increment('db_insert_count', 1);
|
||||||
|
metricCollector.increment('db_insert_ms_sum', Date.now() - startedAt);
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
if (isDbConnectionError(err)) {
|
||||||
|
logger.error('Database offline during batch insert. Retrying in 5s...', { error: err.message });
|
||||||
|
await new Promise(r => setTimeout(r, 5000));
|
||||||
|
while (!(await dbManager.checkConnection())) {
|
||||||
|
logger.warn('Database still offline. Waiting 5s...');
|
||||||
|
await new Promise(r => setTimeout(r, 5000));
|
||||||
|
}
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
throw err;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const insertRowsOnce = async (rows) => {
|
||||||
|
const startedAt = Date.now();
|
||||||
|
const promises = [
|
||||||
|
dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows })
|
||||||
|
];
|
||||||
|
if (config.g5db.enabled) {
|
||||||
|
promises.push(g5DbManager.insertRows({ schema: config.g5db.schema, table: config.g5db.table, rows }).catch(e => {
|
||||||
|
logger.error('G5 Database insert failed in insertOnce', { error: e.message });
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
await Promise.all(promises);
|
||||||
|
metricCollector.increment('db_insert_count', 1);
|
||||||
|
metricCollector.increment('db_insert_ms_sum', Date.now() - startedAt);
|
||||||
|
};
|
||||||
|
|
||||||
|
const resolveInsertedItems = (partitionKey, items) => {
|
||||||
|
let insertedRows = 0;
|
||||||
|
for (const p of items) {
|
||||||
|
insertedRows += p.rows.length;
|
||||||
|
const dayKey = dayKeyFromTsMs(p.rows?.[0]?.ts_ms);
|
||||||
|
if (dayKey) {
|
||||||
|
metricCollector.incrementKeyed('db_inserted_by_day', dayKey, p.rows.length);
|
||||||
|
}
|
||||||
|
p.item.resolve();
|
||||||
|
}
|
||||||
|
metricCollector.increment('db_inserted', insertedRows);
|
||||||
|
metricCollector.incrementKeyed('db_inserted_by_partition', partitionKey, insertedRows);
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleFailedItem = async (partitionKey, p, err) => {
|
||||||
|
metricCollector.increment('db_failed');
|
||||||
|
metricCollector.incrementKeyed('db_failed_by_partition', partitionKey, 1);
|
||||||
|
const dayKey = dayKeyFromTsMs(p.rows?.[0]?.ts_ms);
|
||||||
|
if (dayKey) {
|
||||||
|
metricCollector.incrementKeyed('db_failed_by_day', dayKey, 1);
|
||||||
|
}
|
||||||
|
await handleError(err, p.item.message);
|
||||||
|
p.item.resolve();
|
||||||
|
};
|
||||||
|
|
||||||
|
const insertItemsDegraded = async (partitionKey, items) => {
|
||||||
|
if (items.length === 0) return;
|
||||||
|
const rows = items.flatMap(p => p.rows);
|
||||||
|
if (commitOnAttempt) {
|
||||||
|
try {
|
||||||
|
await insertRowsOnce(rows);
|
||||||
|
resolveInsertedItems(partitionKey, items);
|
||||||
|
} catch (err) {
|
||||||
|
for (const item of items) {
|
||||||
|
await handleFailedItem(partitionKey, item, err);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
await insertRowsWithRetry(rows);
|
||||||
|
resolveInsertedItems(partitionKey, items);
|
||||||
|
return;
|
||||||
|
} catch (err) {
|
||||||
|
if (items.length === 1) {
|
||||||
|
try {
|
||||||
|
await insertRowsWithRetry(items[0].rows);
|
||||||
|
resolveInsertedItems(partitionKey, items);
|
||||||
|
} catch (innerErr) {
|
||||||
|
await handleFailedItem(partitionKey, items[0], innerErr);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
const mid = Math.floor(items.length / 2);
|
||||||
|
await insertItemsDegraded(partitionKey, items.slice(0, mid));
|
||||||
|
await insertItemsDegraded(partitionKey, items.slice(mid));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const flushBatchForKey = async (partitionKey) => {
|
||||||
|
const state = getBatchState(partitionKey);
|
||||||
|
if (state.flushing) return state.flushing;
|
||||||
|
|
||||||
|
state.flushing = (async () => {
|
||||||
|
if (state.timer) {
|
||||||
|
clearTimeout(state.timer);
|
||||||
|
state.timer = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (state.items.length === 0) return;
|
||||||
|
|
||||||
|
const startedAt = Date.now();
|
||||||
|
const currentBatch = state.items;
|
||||||
|
state.items = [];
|
||||||
|
|
||||||
|
const pendingDbItems = [];
|
||||||
|
const unresolvedItems = [];
|
||||||
|
|
||||||
|
try {
|
||||||
|
for (const item of currentBatch) {
|
||||||
|
try {
|
||||||
|
const rows = parseMessageToRows(item.message);
|
||||||
|
pendingDbItems.push({ item, rows });
|
||||||
|
unresolvedItems.push(item);
|
||||||
|
} catch (err) {
|
||||||
|
metricCollector.increment('parse_error');
|
||||||
|
metricCollector.incrementKeyed('parse_error_by_partition', partitionKey, 1);
|
||||||
|
logger.error('Message processing failed (Parse/Validation)', { error: err.message });
|
||||||
|
await handleError(err, item.message);
|
||||||
|
item.resolve();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (pendingDbItems.length > 0) {
|
||||||
|
const firstTs = pendingDbItems[0]?.rows?.[0]?.ts_ms;
|
||||||
|
const dayKey = dayKeyFromTsMs(firstTs);
|
||||||
|
if (dayKey) {
|
||||||
|
const dayStartMs = Date.now();
|
||||||
|
await insertItemsDegraded(partitionKey, pendingDbItems);
|
||||||
|
metricCollector.incrementKeyed('db_insert_ms_sum_by_day', dayKey, Date.now() - dayStartMs);
|
||||||
|
} else {
|
||||||
|
await insertItemsDegraded(partitionKey, pendingDbItems);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
metricCollector.increment('batch_flush_count', 1);
|
||||||
|
metricCollector.increment('batch_flush_ms_sum', Date.now() - startedAt);
|
||||||
|
} catch (err) {
|
||||||
|
if (!commitOnAttempt && isDbConnectionError(err)) {
|
||||||
|
state.items = unresolvedItems.concat(state.items);
|
||||||
|
if (!state.timer) {
|
||||||
|
state.timer = setTimeout(() => {
|
||||||
|
state.timer = null;
|
||||||
|
flushBatchForKey(partitionKey);
|
||||||
|
}, 5000);
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
logger.error('Batch flush failed (non-network). Marking as consumed', {
|
||||||
|
error: err?.message,
|
||||||
|
partitionKey,
|
||||||
|
batchSize: currentBatch.length
|
||||||
|
});
|
||||||
|
|
||||||
|
for (const item of unresolvedItems) {
|
||||||
|
try {
|
||||||
|
await handleError(err, item.message);
|
||||||
|
} catch { }
|
||||||
|
item.resolve();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})().finally(() => {
|
||||||
|
state.flushing = null;
|
||||||
|
if (state.items.length > 0) {
|
||||||
|
if (state.items.length >= BATCH_SIZE) {
|
||||||
|
flushBatchForKey(partitionKey);
|
||||||
|
} else if (!state.timer) {
|
||||||
|
state.timer = setTimeout(() => {
|
||||||
|
state.timer = null;
|
||||||
|
flushBatchForKey(partitionKey);
|
||||||
|
}, BATCH_TIMEOUT_MS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
return state.flushing;
|
||||||
|
};
|
||||||
|
|
||||||
|
const handleMessage = (message) => {
|
||||||
|
if (message.topic) {
|
||||||
|
metricCollector.increment('kafka_pulled');
|
||||||
|
metricCollector.incrementKeyed('kafka_pulled_by_partition', `${message.topic}-${message.partition}`, 1);
|
||||||
|
}
|
||||||
|
|
||||||
|
// const messageValue = Buffer.isBuffer(message.value)
|
||||||
|
// ? message.value.toString('utf8')
|
||||||
|
// : message.value;
|
||||||
|
// const messageKey = Buffer.isBuffer(message.key)
|
||||||
|
// ? message.key.toString('utf8')
|
||||||
|
// : message.key;
|
||||||
|
|
||||||
|
// const logDetails = {
|
||||||
|
// topic: message.topic,
|
||||||
|
// partition: message.partition,
|
||||||
|
// offset: message.offset,
|
||||||
|
// key: messageKey,
|
||||||
|
// value: config.kafka.logMessages ? messageValue : undefined,
|
||||||
|
// valueLength: !config.kafka.logMessages && typeof messageValue === 'string' ? messageValue.length : null
|
||||||
|
// };
|
||||||
|
|
||||||
|
// logger.info('Kafka message received', logDetails);
|
||||||
|
|
||||||
|
const partitionKey = partitionKeyFromMessage(message);
|
||||||
|
const state = getBatchState(partitionKey);
|
||||||
|
|
||||||
|
return new Promise((resolve, reject) => {
|
||||||
|
state.items.push({ message, resolve, reject });
|
||||||
|
if (state.items.length >= BATCH_SIZE) {
|
||||||
|
flushBatchForKey(partitionKey);
|
||||||
|
} else if (!state.timer) {
|
||||||
|
state.timer = setTimeout(() => {
|
||||||
|
state.timer = null;
|
||||||
|
flushBatchForKey(partitionKey);
|
||||||
|
}, BATCH_TIMEOUT_MS);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
const consumers = createKafkaConsumers({
|
||||||
|
kafkaConfig: config.kafka,
|
||||||
|
onMessage: handleMessage,
|
||||||
|
onError: handleError
|
||||||
|
});
|
||||||
|
|
||||||
|
// Start retry worker (non-blocking)
|
||||||
|
startErrorRetryWorker({
|
||||||
|
client: redisClient,
|
||||||
|
queueKey: errorQueueKey,
|
||||||
|
redisIntegration,
|
||||||
|
handler: async (item) => {
|
||||||
|
if (!item?.value) {
|
||||||
|
throw new Error('Missing value in retry payload');
|
||||||
|
}
|
||||||
|
await handleMessage({ value: item.value });
|
||||||
|
}
|
||||||
|
}).catch(err => {
|
||||||
|
logger.error('Retry worker failed', { error: err?.message });
|
||||||
|
});
|
||||||
|
|
||||||
|
// Graceful Shutdown Logic
|
||||||
|
const shutdown = async (signal) => {
|
||||||
|
logger.info(`Received ${signal}, shutting down...`);
|
||||||
|
|
||||||
|
try {
|
||||||
|
// 1. Close Kafka Consumer
|
||||||
|
if (consumers && consumers.length > 0) {
|
||||||
|
await Promise.all(consumers.map(c => new Promise((resolve) => c.close(true, resolve))));
|
||||||
|
logger.info('Kafka consumer closed', { count: consumers.length });
|
||||||
|
}
|
||||||
|
|
||||||
|
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
|
||||||
|
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
|
||||||
|
|
||||||
|
// 3. Close Redis Client
|
||||||
|
await redisClient.quit();
|
||||||
|
logger.info('Redis client closed');
|
||||||
|
|
||||||
|
// 4. Close Database Pools
|
||||||
|
await dbManager.close();
|
||||||
|
await g5DbManager.close();
|
||||||
|
logger.info('Database connection closed');
|
||||||
|
|
||||||
|
process.exit(0);
|
||||||
|
} catch (err) {
|
||||||
|
logger.error('Error during shutdown', { error: err?.message });
|
||||||
|
process.exit(1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||||
|
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||||
|
};
|
||||||
|
|
||||||
|
bootstrap().catch((error) => {
|
||||||
|
logger.error('Service bootstrap failed', { error: error?.message });
|
||||||
|
process.exit(1);
|
||||||
|
});
|
||||||
175
docs/template/bls-onoffline-backend/src/kafka/consumer.js
vendored
Normal file
175
docs/template/bls-onoffline-backend/src/kafka/consumer.js
vendored
Normal file
@@ -0,0 +1,175 @@
|
|||||||
|
import kafka from 'kafka-node';
|
||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
const { ConsumerGroup } = kafka;
|
||||||
|
|
||||||
|
import { OffsetTracker } from './offsetTracker.js';
|
||||||
|
|
||||||
|
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
|
||||||
|
const kafkaHost = kafkaConfig.brokers.join(',');
|
||||||
|
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
|
||||||
|
const id = `${clientId}-${process.pid}-${Date.now()}`;
|
||||||
|
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 5000;
|
||||||
|
const commitIntervalMs = Number.isFinite(kafkaConfig.commitIntervalMs) ? kafkaConfig.commitIntervalMs : 200;
|
||||||
|
let inFlight = 0;
|
||||||
|
|
||||||
|
const tracker = new OffsetTracker();
|
||||||
|
let pendingCommits = new Map(); // key: `${topic}-${partition}` -> { topic, partition, offset }
|
||||||
|
let commitTimer = null;
|
||||||
|
|
||||||
|
const flushCommits = () => {
|
||||||
|
if (pendingCommits.size === 0) return;
|
||||||
|
const batch = pendingCommits;
|
||||||
|
pendingCommits = new Map();
|
||||||
|
|
||||||
|
consumer.sendOffsetCommitRequest(
|
||||||
|
Array.from(batch.values()),
|
||||||
|
(err) => {
|
||||||
|
if (err) {
|
||||||
|
for (const [k, v] of batch.entries()) {
|
||||||
|
pendingCommits.set(k, v);
|
||||||
|
}
|
||||||
|
logger.error('Kafka commit failed', { error: err?.message, count: batch.size });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
const scheduleCommitFlush = () => {
|
||||||
|
if (commitTimer) return;
|
||||||
|
commitTimer = setTimeout(() => {
|
||||||
|
commitTimer = null;
|
||||||
|
flushCommits();
|
||||||
|
}, commitIntervalMs);
|
||||||
|
};
|
||||||
|
|
||||||
|
const consumer = new ConsumerGroup(
|
||||||
|
{
|
||||||
|
kafkaHost,
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId,
|
||||||
|
id,
|
||||||
|
fromOffset: 'earliest',
|
||||||
|
protocol: ['roundrobin'],
|
||||||
|
outOfRangeOffset: 'latest',
|
||||||
|
autoCommit: false,
|
||||||
|
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
|
||||||
|
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
|
||||||
|
fetchMinBytes: kafkaConfig.fetchMinBytes,
|
||||||
|
fetchMaxWaitMs: kafkaConfig.fetchMaxWaitMs,
|
||||||
|
sasl: kafkaConfig.sasl
|
||||||
|
},
|
||||||
|
kafkaConfig.topic
|
||||||
|
);
|
||||||
|
|
||||||
|
const tryResume = () => {
|
||||||
|
if (inFlight < maxInFlight && consumer.paused) {
|
||||||
|
consumer.resume();
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
consumer.on('message', (message) => {
|
||||||
|
inFlight += 1;
|
||||||
|
tracker.add(message.topic, message.partition, message.offset);
|
||||||
|
|
||||||
|
if (inFlight >= maxInFlight) {
|
||||||
|
consumer.pause();
|
||||||
|
}
|
||||||
|
Promise.resolve(onMessage(message))
|
||||||
|
.then(() => {})
|
||||||
|
.catch((error) => {
|
||||||
|
logger.error('Kafka message handling failed', { error: error?.message });
|
||||||
|
if (onError) {
|
||||||
|
onError(error, message);
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.finally(() => {
|
||||||
|
const commitOffset = tracker.markDone(message.topic, message.partition, message.offset);
|
||||||
|
if (commitOffset !== null) {
|
||||||
|
const key = `${message.topic}-${message.partition}`;
|
||||||
|
pendingCommits.set(key, {
|
||||||
|
topic: message.topic,
|
||||||
|
partition: message.partition,
|
||||||
|
offset: commitOffset,
|
||||||
|
metadata: 'm'
|
||||||
|
});
|
||||||
|
scheduleCommitFlush();
|
||||||
|
}
|
||||||
|
inFlight -= 1;
|
||||||
|
tryResume();
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('error', (error) => {
|
||||||
|
logger.error('Kafka consumer error', { error: error?.message });
|
||||||
|
if (onError) {
|
||||||
|
onError(error);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('connect', () => {
|
||||||
|
logger.info(`Kafka Consumer connected`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('rebalancing', () => {
|
||||||
|
logger.info(`Kafka Consumer rebalancing`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
tracker.clear();
|
||||||
|
pendingCommits.clear();
|
||||||
|
if (commitTimer) {
|
||||||
|
clearTimeout(commitTimer);
|
||||||
|
commitTimer = null;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('rebalanced', () => {
|
||||||
|
logger.info('Kafka Consumer rebalanced', { clientId, groupId: kafkaConfig.groupId });
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('error', (err) => {
|
||||||
|
logger.error('Kafka Consumer Error', { error: err.message });
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('offsetOutOfRange', (err) => {
|
||||||
|
logger.warn('Offset out of range', { error: err.message, topic: err.topic, partition: err.partition });
|
||||||
|
});
|
||||||
|
|
||||||
|
|
||||||
|
consumer.on('offsetOutOfRange', (error) => {
|
||||||
|
logger.warn(`Kafka Consumer offset out of range`, {
|
||||||
|
error: error?.message,
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
consumer.on('close', () => {
|
||||||
|
if (commitTimer) {
|
||||||
|
clearTimeout(commitTimer);
|
||||||
|
commitTimer = null;
|
||||||
|
}
|
||||||
|
flushCommits();
|
||||||
|
logger.warn(`Kafka Consumer closed`, {
|
||||||
|
groupId: kafkaConfig.groupId,
|
||||||
|
clientId: clientId
|
||||||
|
});
|
||||||
|
});
|
||||||
|
|
||||||
|
return consumer;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
|
||||||
|
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
|
||||||
|
const count = Math.max(1, instances);
|
||||||
|
return Array.from({ length: count }, (_, idx) =>
|
||||||
|
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
|
||||||
|
);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) =>
|
||||||
|
createKafkaConsumers({ kafkaConfig, onMessage, onError })[0];
|
||||||
53
docs/template/bls-onoffline-backend/src/kafka/offsetTracker.js
vendored
Normal file
53
docs/template/bls-onoffline-backend/src/kafka/offsetTracker.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
export class OffsetTracker {
|
||||||
|
constructor() {
|
||||||
|
// Map<topic-partition, { nextCommitOffset: number|null, done: Set<number> }>
|
||||||
|
this.partitions = new Map();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when a message is received (before processing)
|
||||||
|
add(topic, partition, offset) {
|
||||||
|
const key = `${topic}-${partition}`;
|
||||||
|
if (!this.partitions.has(key)) {
|
||||||
|
this.partitions.set(key, { nextCommitOffset: null, done: new Set() });
|
||||||
|
}
|
||||||
|
const state = this.partitions.get(key);
|
||||||
|
const numericOffset = Number(offset);
|
||||||
|
if (!Number.isFinite(numericOffset)) return;
|
||||||
|
if (state.nextCommitOffset === null) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
} else if (numericOffset < state.nextCommitOffset) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when a message is successfully processed
|
||||||
|
// Returns the next offset to commit (if any advancement is possible), or null
|
||||||
|
markDone(topic, partition, offset) {
|
||||||
|
const key = `${topic}-${partition}`;
|
||||||
|
const state = this.partitions.get(key);
|
||||||
|
if (!state) return null;
|
||||||
|
|
||||||
|
const numericOffset = Number(offset);
|
||||||
|
if (!Number.isFinite(numericOffset)) return null;
|
||||||
|
|
||||||
|
state.done.add(numericOffset);
|
||||||
|
|
||||||
|
if (state.nextCommitOffset === null) {
|
||||||
|
state.nextCommitOffset = numericOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
let advanced = false;
|
||||||
|
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
|
||||||
|
state.done.delete(state.nextCommitOffset);
|
||||||
|
state.nextCommitOffset += 1;
|
||||||
|
advanced = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!advanced) return null;
|
||||||
|
return state.nextCommitOffset;
|
||||||
|
}
|
||||||
|
|
||||||
|
clear() {
|
||||||
|
this.partitions.clear();
|
||||||
|
}
|
||||||
|
}
|
||||||
142
docs/template/bls-onoffline-backend/src/processor/index.js
vendored
Normal file
142
docs/template/bls-onoffline-backend/src/processor/index.js
vendored
Normal file
@@ -0,0 +1,142 @@
|
|||||||
|
import { createGuid } from '../utils/uuid.js';
|
||||||
|
import { kafkaPayloadSchema } from '../schema/kafkaPayload.js';
|
||||||
|
|
||||||
|
const parseKafkaPayload = (value) => {
|
||||||
|
const raw = Buffer.isBuffer(value) ? value.toString('utf8') : value;
|
||||||
|
if (typeof raw !== 'string') {
|
||||||
|
throw new Error('Invalid kafka message value');
|
||||||
|
}
|
||||||
|
return JSON.parse(raw);
|
||||||
|
};
|
||||||
|
|
||||||
|
const normalizeText = (value, maxLength) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
const str = String(value);
|
||||||
|
if (maxLength && str.length > maxLength) {
|
||||||
|
return str.substring(0, maxLength);
|
||||||
|
}
|
||||||
|
return str;
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildRowsFromMessageValue = (value) => {
|
||||||
|
const payload = parseKafkaPayload(value);
|
||||||
|
return buildRowsFromPayload(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const buildRowsFromPayload = (rawPayload) => {
|
||||||
|
const payload = kafkaPayloadSchema.parse(rawPayload);
|
||||||
|
|
||||||
|
// Database limit is VARCHAR(255)
|
||||||
|
const rebootReason = normalizeText(payload.RebootReason, 255);
|
||||||
|
const currentStatusRaw = normalizeText(payload.CurrentStatus, 255);
|
||||||
|
const hasRebootReason = rebootReason !== null && rebootReason !== '';
|
||||||
|
const currentStatus = hasRebootReason ? 'on' : currentStatusRaw;
|
||||||
|
|
||||||
|
// Derive timestamp: UnixTime -> CurrentTime -> Date.now()
|
||||||
|
let tsMs = payload.UnixTime;
|
||||||
|
|
||||||
|
// Heuristic: If timestamp is small (e.g., < 100000000000), assume it's seconds and convert to ms
|
||||||
|
if (typeof tsMs === 'number' && tsMs < 100000000000) {
|
||||||
|
tsMs = tsMs * 1000;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!tsMs && payload.CurrentTime) {
|
||||||
|
const parsed = Date.parse(payload.CurrentTime);
|
||||||
|
if (!isNaN(parsed)) {
|
||||||
|
tsMs = parsed;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (!tsMs) {
|
||||||
|
tsMs = Date.now();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Ensure PK fields are not null
|
||||||
|
const mac = normalizeText(payload.MAC) || '';
|
||||||
|
const deviceId = normalizeText(payload.HostNumber) || '';
|
||||||
|
const roomId = normalizeText(payload.RoomNumber) || '';
|
||||||
|
|
||||||
|
// Handle hotel_id boundary for PostgreSQL smallint (-32768 to 32767)
|
||||||
|
let hotelId = payload.HotelCode;
|
||||||
|
if (typeof hotelId !== 'number' || Number.isNaN(hotelId) || hotelId < -32768 || hotelId > 32767) {
|
||||||
|
hotelId = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
const row = {
|
||||||
|
guid: createGuid(),
|
||||||
|
ts_ms: tsMs,
|
||||||
|
write_ts_ms: Date.now(),
|
||||||
|
hotel_id: hotelId,
|
||||||
|
mac: mac,
|
||||||
|
device_id: deviceId,
|
||||||
|
room_id: roomId,
|
||||||
|
ip: normalizeText(payload.EndPoint),
|
||||||
|
current_status: currentStatus,
|
||||||
|
launcher_version: normalizeText(payload.LauncherVersion, 255),
|
||||||
|
reboot_reason: rebootReason
|
||||||
|
};
|
||||||
|
|
||||||
|
return [row];
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parseMessageToRows = (message) => {
|
||||||
|
const rawValue = message.value.toString();
|
||||||
|
// logger.info('Processing message', { offset: message.offset, rawValuePreview: rawValue.substring(0, 100) });
|
||||||
|
|
||||||
|
let payload;
|
||||||
|
try {
|
||||||
|
payload = JSON.parse(rawValue);
|
||||||
|
} catch (e) {
|
||||||
|
const error = new Error(`JSON Parse Error: ${e.message}`);
|
||||||
|
error.type = 'PARSE_ERROR';
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
// logger.info('Payload parsed', { payload });
|
||||||
|
|
||||||
|
const validationResult = kafkaPayloadSchema.safeParse(payload);
|
||||||
|
|
||||||
|
if (!validationResult.success) {
|
||||||
|
const error = new Error(`Schema Validation Failed: ${JSON.stringify(validationResult.error.errors)}`);
|
||||||
|
error.type = 'VALIDATION_ERROR';
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return buildRowsFromPayload(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const processKafkaMessage = async ({ message, dbManager, config }) => {
|
||||||
|
let rows;
|
||||||
|
try {
|
||||||
|
rows = parseMessageToRows(message);
|
||||||
|
} catch (error) {
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
await dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows });
|
||||||
|
// if (rows.length > 0) {
|
||||||
|
// console.log(`Inserted ${rows.length} rows. Sample GUID: ${rows[0].guid}, TS: ${rows[0].ts_ms}`);
|
||||||
|
// }
|
||||||
|
} catch (error) {
|
||||||
|
error.type = 'DB_ERROR';
|
||||||
|
const sample = rows?.[0];
|
||||||
|
error.dbContext = {
|
||||||
|
rowsLength: rows?.length || 0,
|
||||||
|
sampleRow: sample
|
||||||
|
? {
|
||||||
|
guid: sample.guid,
|
||||||
|
ts_ms: sample.ts_ms,
|
||||||
|
mac: sample.mac,
|
||||||
|
device_id: sample.device_id,
|
||||||
|
room_id: sample.room_id,
|
||||||
|
current_status: sample.current_status
|
||||||
|
}
|
||||||
|
: null
|
||||||
|
};
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
|
||||||
|
return rows.length;
|
||||||
|
};
|
||||||
83
docs/template/bls-onoffline-backend/src/processor/udpParser.js
vendored
Normal file
83
docs/template/bls-onoffline-backend/src/processor/udpParser.js
vendored
Normal file
@@ -0,0 +1,83 @@
|
|||||||
|
const normalizeHex = (hex) => {
|
||||||
|
if (typeof hex !== 'string') {
|
||||||
|
return '';
|
||||||
|
}
|
||||||
|
let cleaned = hex.trim().replace(/^0x/i, '').replace(/\s+/g, '');
|
||||||
|
if (cleaned.length % 2 === 1) {
|
||||||
|
cleaned = `0${cleaned}`;
|
||||||
|
}
|
||||||
|
return cleaned;
|
||||||
|
};
|
||||||
|
|
||||||
|
const toHex = (value) => `0x${value.toString(16).padStart(2, '0')}`;
|
||||||
|
|
||||||
|
const readUInt16 = (buffer, offset) => buffer.readUInt16BE(offset);
|
||||||
|
|
||||||
|
export const parse0x36 = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const sysLockStatus = buffer.length > 0 ? buffer[0] : null;
|
||||||
|
const reportCount = buffer.length > 7 ? buffer[7] : null;
|
||||||
|
let offset = 8;
|
||||||
|
const devices = [];
|
||||||
|
for (let i = 0; i < (reportCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
devices.push({
|
||||||
|
dev_type: buffer[offset],
|
||||||
|
dev_addr: buffer[offset + 1],
|
||||||
|
dev_loop: readUInt16(buffer, offset + 2),
|
||||||
|
dev_data: readUInt16(buffer, offset + 4)
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
const faultCount = offset < buffer.length ? buffer[offset] : null;
|
||||||
|
offset += 1;
|
||||||
|
const faults = [];
|
||||||
|
for (let i = 0; i < (faultCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
faults.push({
|
||||||
|
fault_dev_type: buffer[offset],
|
||||||
|
fault_dev_addr: buffer[offset + 1],
|
||||||
|
fault_dev_loop: readUInt16(buffer, offset + 2),
|
||||||
|
error_type: buffer[offset + 4],
|
||||||
|
error_data: buffer[offset + 5]
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
sysLockStatus,
|
||||||
|
reportCount,
|
||||||
|
faultCount,
|
||||||
|
devices,
|
||||||
|
faults
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parse0x0fDownlink = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const controlCount = buffer.length > 0 ? buffer[0] : null;
|
||||||
|
let offset = 1;
|
||||||
|
const controlParams = [];
|
||||||
|
for (let i = 0; i < (controlCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||||
|
const typeValue = readUInt16(buffer, offset + 4);
|
||||||
|
controlParams.push({
|
||||||
|
dev_type: buffer[offset],
|
||||||
|
dev_addr: buffer[offset + 1],
|
||||||
|
loop: readUInt16(buffer, offset + 2),
|
||||||
|
type: typeValue,
|
||||||
|
type_l: buffer[offset + 4],
|
||||||
|
type_h: buffer[offset + 5]
|
||||||
|
});
|
||||||
|
offset += 6;
|
||||||
|
}
|
||||||
|
return {
|
||||||
|
controlCount,
|
||||||
|
controlParams
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
export const parse0x0fAck = (udpRaw) => {
|
||||||
|
const cleaned = normalizeHex(udpRaw);
|
||||||
|
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||||
|
const ackCode = buffer.length > 1 ? toHex(buffer[1]) : null;
|
||||||
|
return { ackCode };
|
||||||
|
};
|
||||||
53
docs/template/bls-onoffline-backend/src/redis/errorQueue.js
vendored
Normal file
53
docs/template/bls-onoffline-backend/src/redis/errorQueue.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
|||||||
|
import { logger } from '../utils/logger.js';
|
||||||
|
|
||||||
|
export const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
|
||||||
|
|
||||||
|
export const enqueueError = async (client, queueKey, payload) => {
|
||||||
|
try {
|
||||||
|
await client.rPush(queueKey, JSON.stringify(payload));
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Redis enqueue error failed', { error: error?.message });
|
||||||
|
throw error;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
export const startErrorRetryWorker = async ({
|
||||||
|
client,
|
||||||
|
queueKey,
|
||||||
|
handler,
|
||||||
|
redisIntegration,
|
||||||
|
maxAttempts = 5
|
||||||
|
}) => {
|
||||||
|
while (true) {
|
||||||
|
const result = await client.blPop(queueKey, 0);
|
||||||
|
const raw = result?.element;
|
||||||
|
if (!raw) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
let item;
|
||||||
|
try {
|
||||||
|
item = JSON.parse(raw);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Invalid error payload', { error: error?.message });
|
||||||
|
await redisIntegration.error('Invalid error payload', { module: 'redis', stack: error?.message });
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
const attempts = item.attempts || 0;
|
||||||
|
try {
|
||||||
|
await handler(item);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Retry handler failed', { error: error?.message, stack: error?.stack });
|
||||||
|
const nextPayload = {
|
||||||
|
...item,
|
||||||
|
attempts: attempts + 1,
|
||||||
|
lastError: error?.message,
|
||||||
|
lastAttemptAt: Date.now()
|
||||||
|
};
|
||||||
|
if (nextPayload.attempts >= maxAttempts) {
|
||||||
|
await redisIntegration.error('Retry attempts exceeded', { module: 'retry', stack: JSON.stringify(nextPayload) });
|
||||||
|
} else {
|
||||||
|
await enqueueError(client, queueKey, nextPayload);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
14
docs/template/bls-onoffline-backend/src/redis/redisClient.js
vendored
Normal file
14
docs/template/bls-onoffline-backend/src/redis/redisClient.js
vendored
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
import { createClient } from 'redis';
|
||||||
|
|
||||||
|
export const createRedisClient = async (config) => {
|
||||||
|
const client = createClient({
|
||||||
|
socket: {
|
||||||
|
host: config.host,
|
||||||
|
port: config.port
|
||||||
|
},
|
||||||
|
password: config.password,
|
||||||
|
database: config.db
|
||||||
|
});
|
||||||
|
await client.connect();
|
||||||
|
return client;
|
||||||
|
};
|
||||||
40
docs/template/bls-onoffline-backend/src/redis/redisIntegration.js
vendored
Normal file
40
docs/template/bls-onoffline-backend/src/redis/redisIntegration.js
vendored
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
export class RedisIntegration {
|
||||||
|
constructor(client, projectName, apiBaseUrl) {
|
||||||
|
this.client = client;
|
||||||
|
this.projectName = projectName;
|
||||||
|
this.apiBaseUrl = apiBaseUrl;
|
||||||
|
this.heartbeatKey = '项目心跳';
|
||||||
|
this.logKey = `${projectName}_项目控制台`;
|
||||||
|
}
|
||||||
|
|
||||||
|
async info(message, context) {
|
||||||
|
const payload = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
level: 'info',
|
||||||
|
message,
|
||||||
|
metadata: context || undefined
|
||||||
|
};
|
||||||
|
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
async error(message, context) {
|
||||||
|
const payload = {
|
||||||
|
timestamp: new Date().toISOString(),
|
||||||
|
level: 'error',
|
||||||
|
message,
|
||||||
|
metadata: context || undefined
|
||||||
|
};
|
||||||
|
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||||
|
}
|
||||||
|
|
||||||
|
startHeartbeat() {
|
||||||
|
setInterval(() => {
|
||||||
|
const payload = {
|
||||||
|
projectName: this.projectName,
|
||||||
|
apiBaseUrl: this.apiBaseUrl,
|
||||||
|
lastActiveAt: Date.now()
|
||||||
|
};
|
||||||
|
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
|
||||||
|
}, 3000);
|
||||||
|
}
|
||||||
|
}
|
||||||
32
docs/template/bls-onoffline-backend/src/schema/kafkaPayload.js
vendored
Normal file
32
docs/template/bls-onoffline-backend/src/schema/kafkaPayload.js
vendored
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
import { z } from 'zod';
|
||||||
|
|
||||||
|
const toNumber = (value) => {
|
||||||
|
if (value === undefined || value === null || value === '') {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
if (typeof value === 'number') {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
const parsed = Number(value);
|
||||||
|
return Number.isFinite(parsed) ? parsed : value;
|
||||||
|
};
|
||||||
|
|
||||||
|
const toStringAllowEmpty = (value) => {
|
||||||
|
if (value === undefined || value === null) {
|
||||||
|
return value;
|
||||||
|
}
|
||||||
|
return String(value);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const kafkaPayloadSchema = z.object({
|
||||||
|
HotelCode: z.preprocess(toNumber, z.number()),
|
||||||
|
MAC: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
HostNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
RoomNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
EndPoint: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
CurrentStatus: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
CurrentTime: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
UnixTime: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||||
|
LauncherVersion: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||||
|
RebootReason: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable()
|
||||||
|
});
|
||||||
21
docs/template/bls-onoffline-backend/src/utils/logger.js
vendored
Normal file
21
docs/template/bls-onoffline-backend/src/utils/logger.js
vendored
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
const format = (level, message, context) => {
|
||||||
|
const payload = {
|
||||||
|
level,
|
||||||
|
message,
|
||||||
|
timestamp: Date.now(),
|
||||||
|
...(context ? { context } : {})
|
||||||
|
};
|
||||||
|
return JSON.stringify(payload);
|
||||||
|
};
|
||||||
|
|
||||||
|
export const logger = {
|
||||||
|
info(message, context) {
|
||||||
|
process.stdout.write(`${format('info', message, context)}\n`);
|
||||||
|
},
|
||||||
|
error(message, context) {
|
||||||
|
process.stderr.write(`${format('error', message, context)}\n`);
|
||||||
|
},
|
||||||
|
warn(message, context) {
|
||||||
|
process.stderr.write(`${format('warn', message, context)}\n`);
|
||||||
|
}
|
||||||
|
};
|
||||||
43
docs/template/bls-onoffline-backend/src/utils/metricCollector.js
vendored
Normal file
43
docs/template/bls-onoffline-backend/src/utils/metricCollector.js
vendored
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
export class MetricCollector {
|
||||||
|
constructor() {
|
||||||
|
this.reset();
|
||||||
|
}
|
||||||
|
|
||||||
|
reset() {
|
||||||
|
this.metrics = {
|
||||||
|
kafka_pulled: 0,
|
||||||
|
parse_error: 0,
|
||||||
|
db_inserted: 0,
|
||||||
|
db_failed: 0,
|
||||||
|
db_insert_count: 0,
|
||||||
|
db_insert_ms_sum: 0,
|
||||||
|
batch_flush_count: 0,
|
||||||
|
batch_flush_ms_sum: 0
|
||||||
|
};
|
||||||
|
this.keyed = {};
|
||||||
|
}
|
||||||
|
|
||||||
|
increment(metric, count = 1) {
|
||||||
|
if (this.metrics.hasOwnProperty(metric)) {
|
||||||
|
this.metrics[metric] += count;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
incrementKeyed(metric, key, count = 1) {
|
||||||
|
if (!key) return;
|
||||||
|
if (!this.keyed[metric]) {
|
||||||
|
this.keyed[metric] = {};
|
||||||
|
}
|
||||||
|
if (!Object.prototype.hasOwnProperty.call(this.keyed[metric], key)) {
|
||||||
|
this.keyed[metric][key] = 0;
|
||||||
|
}
|
||||||
|
this.keyed[metric][key] += count;
|
||||||
|
}
|
||||||
|
|
||||||
|
getAndReset() {
|
||||||
|
const current = { ...this.metrics };
|
||||||
|
const keyed = JSON.parse(JSON.stringify(this.keyed));
|
||||||
|
this.reset();
|
||||||
|
return { ...current, keyed };
|
||||||
|
}
|
||||||
|
}
|
||||||
3
docs/template/bls-onoffline-backend/src/utils/uuid.js
vendored
Normal file
3
docs/template/bls-onoffline-backend/src/utils/uuid.js
vendored
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
import { randomUUID } from 'crypto';
|
||||||
|
|
||||||
|
export const createGuid = () => randomUUID().replace(/-/g, '');
|
||||||
45
docs/template/bls-onoffline-backend/tests/processor.test.js
vendored
Normal file
45
docs/template/bls-onoffline-backend/tests/processor.test.js
vendored
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
import { describe, it, expect } from 'vitest';
|
||||||
|
import { buildRowsFromPayload } from '../src/processor/index.js';
|
||||||
|
|
||||||
|
describe('Processor Logic', () => {
|
||||||
|
const basePayload = {
|
||||||
|
HotelCode: '1085',
|
||||||
|
MAC: '00:1A:2B:3C:4D:5E',
|
||||||
|
HostNumber: '091123987456',
|
||||||
|
RoomNumber: '8888房',
|
||||||
|
EndPoint: '50.2.60.1:6543',
|
||||||
|
CurrentStatus: 'off',
|
||||||
|
CurrentTime: '2026-02-02T10:30:00Z',
|
||||||
|
UnixTime: 1770000235000,
|
||||||
|
LauncherVersion: '1.0.0'
|
||||||
|
};
|
||||||
|
|
||||||
|
it('should validate required fields', () => {
|
||||||
|
expect(() => buildRowsFromPayload({})).toThrow();
|
||||||
|
expect(() => buildRowsFromPayload({ ...basePayload, HotelCode: undefined })).toThrow();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should use current_status from payload for non-reboot data', () => {
|
||||||
|
const rows = buildRowsFromPayload({ ...basePayload, RebootReason: null });
|
||||||
|
expect(rows).toHaveLength(1);
|
||||||
|
expect(rows[0].current_status).toBe('off');
|
||||||
|
expect(rows[0].reboot_reason).toBeNull();
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should override current_status to on for reboot data', () => {
|
||||||
|
const rows = buildRowsFromPayload({ ...basePayload, CurrentStatus: 'off', RebootReason: '0x01' });
|
||||||
|
expect(rows).toHaveLength(1);
|
||||||
|
expect(rows[0].current_status).toBe('on');
|
||||||
|
expect(rows[0].reboot_reason).toBe('0x01');
|
||||||
|
});
|
||||||
|
|
||||||
|
it('should keep empty optional fields as empty strings', () => {
|
||||||
|
const rows = buildRowsFromPayload({
|
||||||
|
...basePayload,
|
||||||
|
LauncherVersion: '',
|
||||||
|
RebootReason: ''
|
||||||
|
});
|
||||||
|
expect(rows[0].launcher_version).toBe('');
|
||||||
|
expect(rows[0].reboot_reason).toBe('');
|
||||||
|
});
|
||||||
|
});
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user