feat: 添加 Kafka 消费者和消息处理功能
- 新增 Kafka 消费者实现,支持消息处理和错误处理。 - 实现 OffsetTracker 类,用于跟踪消息偏移量。 - 新增消息解析和数据库插入逻辑,支持从 Kafka 消息构建数据库行。 - 实现 UDP 数据包解析功能,支持不同类型的 UDP 消息。 - 新增 Redis 错误队列处理,支持错误重试机制。 - 实现 Redis 客户端和集成类,支持日志记录和心跳机制。 - 添加 Zod 验证模式,确保 Kafka 消息有效性。 - 新增日志记录和指标收集工具,支持系统监控。 - 添加 UUID 生成工具,支持唯一标识符生成。 - 编写处理器逻辑的单元测试,确保功能正确性。 - 配置 Vite 构建工具,支持 Node.js 环境下的构建。
This commit is contained in:
51
docs/template/bls-onoffline-backend/.env
vendored
Normal file
51
docs/template/bls-onoffline-backend/.env
vendored
Normal file
@@ -0,0 +1,51 @@
|
||||
KAFKA_BROKERS=kafka.blv-oa.com:9092
|
||||
KAFKA_CLIENT_ID=bls-onoffline-producer
|
||||
KAFKA_GROUP_ID=bls-onoffline-consumer
|
||||
KAFKA_TOPICS=blwlog4Nodejs-rcu-onoffline-topic-0
|
||||
KAFKA_AUTO_COMMIT=false
|
||||
KAFKA_AUTO_COMMIT_INTERVAL_MS=5000
|
||||
KAFKA_SASL_ENABLED=true
|
||||
KAFKA_SASL_MECHANISM=plain
|
||||
KAFKA_SASL_USERNAME=blwmomo
|
||||
KAFKA_SASL_PASSWORD=blwmomo
|
||||
KAFKA_SSL_ENABLED=false
|
||||
KAFKA_CONSUMER_INSTANCES=3
|
||||
KAFKA_MAX_IN_FLIGHT=5000
|
||||
KAFKA_BATCH_SIZE=1000
|
||||
KAFKA_BATCH_TIMEOUT_MS=20
|
||||
KAFKA_COMMIT_INTERVAL_MS=200
|
||||
KAFKA_COMMIT_ON_ATTEMPT=true
|
||||
KAFKA_FETCH_MAX_BYTES=10485760
|
||||
KAFKA_FETCH_MAX_WAIT_MS=100
|
||||
KAFKA_FETCH_MIN_BYTES=1
|
||||
|
||||
#POSTGRES_HOST=10.8.8.109
|
||||
#POSTGRES_PORT=5433
|
||||
#POSTGRES_DATABASE=log_platform
|
||||
#POSTGRES_USER=log_admin
|
||||
#POSTGRES_PASSWORD=YourActualStrongPasswordForPostgres!
|
||||
#POSTGRES_MAX_CONNECTIONS=6
|
||||
#POSTGRES_IDLE_TIMEOUT_MS=30000
|
||||
#DB_SCHEMA=onoffline
|
||||
#DB_TABLE=onoffline_record
|
||||
|
||||
# =========================
|
||||
# PostgreSQL 配置 G5库专用
|
||||
# =========================
|
||||
POSTGRES_HOST_G5=10.8.8.80
|
||||
POSTGRES_PORT_G5=5434
|
||||
POSTGRES_DATABASE_G5=log_platform
|
||||
POSTGRES_USER_G5=log_admin
|
||||
POSTGRES_PASSWORD_G5=H3IkLUt8K!x
|
||||
POSTGRES_IDLE_TIMEOUT_MS_G5=30000
|
||||
|
||||
PORT=3001
|
||||
LOG_LEVEL=info
|
||||
|
||||
# Redis connection
|
||||
REDIS_HOST=10.8.8.109
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
REDIS_DB=15
|
||||
REDIS_CONNECT_TIMEOUT_MS=5000
|
||||
REDIS_PROJECT_NAME=bls-onoffline
|
||||
31
docs/template/bls-onoffline-backend/.env.example
vendored
Normal file
31
docs/template/bls-onoffline-backend/.env.example
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Server Configuration
|
||||
PORT=3001
|
||||
NODE_ENV=development
|
||||
|
||||
# Kafka Configuration
|
||||
KAFKA_BROKERS=localhost:9092
|
||||
KAFKA_TOPIC=blwlog4Nodejs-rcu-onoffline-topic
|
||||
KAFKA_GROUP_ID=bls-onoffline-group
|
||||
KAFKA_CLIENT_ID=bls-onoffline-client
|
||||
KAFKA_CONSUMER_INSTANCES=1
|
||||
# KAFKA_SASL_USERNAME=
|
||||
# KAFKA_SASL_PASSWORD=
|
||||
# KAFKA_SASL_MECHANISM=plain
|
||||
|
||||
# Database Configuration (PostgreSQL)
|
||||
DB_HOST=localhost
|
||||
DB_PORT=5432
|
||||
DB_USER=postgres
|
||||
DB_PASSWORD=password
|
||||
DB_DATABASE=log_platform
|
||||
DB_SCHEMA=public
|
||||
DB_TABLE=onoffline_record
|
||||
DB_MAX_CONNECTIONS=10
|
||||
|
||||
# Redis Configuration
|
||||
REDIS_HOST=localhost
|
||||
REDIS_PORT=6379
|
||||
REDIS_PASSWORD=
|
||||
REDIS_DB=0
|
||||
REDIS_PROJECT_NAME=bls-onoffline
|
||||
REDIS_API_BASE_URL=http://localhost:3001
|
||||
18
docs/template/bls-onoffline-backend/AGENTS.md
vendored
Normal file
18
docs/template/bls-onoffline-backend/AGENTS.md
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
<!-- OPENSPEC:START -->
|
||||
# OpenSpec Instructions
|
||||
|
||||
These instructions are for AI assistants working in this project.
|
||||
|
||||
Always open `@/openspec/AGENTS.md` when the request:
|
||||
- Mentions planning or proposals (words like proposal, spec, change, plan)
|
||||
- Introduces new capabilities, breaking changes, architecture shifts, or big performance/security work
|
||||
- Sounds ambiguous and you need the authoritative spec before coding
|
||||
|
||||
Use `@/openspec/AGENTS.md` to learn:
|
||||
- How to create and apply change proposals
|
||||
- Spec format and conventions
|
||||
- Project structure and guidelines
|
||||
|
||||
Keep this managed block so 'openspec update' can refresh the instructions.
|
||||
|
||||
<!-- OPENSPEC:END -->
|
||||
30
docs/template/bls-onoffline-backend/README.md
vendored
Normal file
30
docs/template/bls-onoffline-backend/README.md
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
bls-onoffline-backend
|
||||
|
||||
安装与运行
|
||||
- Node.js 22+
|
||||
- npm install
|
||||
- npm run dev
|
||||
|
||||
构建与测试
|
||||
- npm run build
|
||||
- npm run test
|
||||
- npm run lint
|
||||
|
||||
规范校验
|
||||
- npm run spec:lint
|
||||
- npm run spec:validate
|
||||
|
||||
环境变量
|
||||
- 复制 .env.example 为 .env 并按实际环境配置
|
||||
|
||||
数据库初始化
|
||||
- 运行服务前请先通过根目录 SQL_Script 下脚本完成建库与分区维护
|
||||
- `../SQL_Script/create_database.sql`:建库(psql)
|
||||
- `../SQL_Script/create_schema_and_parent_table.sql`:建 schema 与主分区表
|
||||
- `../SQL_Script/create_partition_for_day.sql`:按日建分区模板
|
||||
- `../SQL_Script/generate_init_sql.js`:生成建库+建表 SQL
|
||||
- `../SQL_Script/generate_partition_sql.js`:生成单日分区 SQL
|
||||
- `../SQL_Script/generate_partition_range_sql.js`:生成批量分区 SQL
|
||||
|
||||
规范说明
|
||||
- 规格文件位于 spec/onoffline-spec.md
|
||||
1086
docs/template/bls-onoffline-backend/dist/index.js
vendored
Normal file
1086
docs/template/bls-onoffline-backend/dist/index.js
vendored
Normal file
File diff suppressed because it is too large
Load Diff
22
docs/template/bls-onoffline-backend/ecosystem.config.cjs
vendored
Normal file
22
docs/template/bls-onoffline-backend/ecosystem.config.cjs
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
module.exports = {
|
||||
apps: [{
|
||||
name: 'bls-onoffline',
|
||||
script: 'dist/index.js',
|
||||
instances: 1,
|
||||
exec_mode: 'fork',
|
||||
autorestart: true,
|
||||
watch: false,
|
||||
max_memory_restart: '1G',
|
||||
env_file: '.env',
|
||||
env: {
|
||||
NODE_ENV: 'production',
|
||||
PORT: 3001
|
||||
},
|
||||
error_file: './logs/error.log',
|
||||
out_file: './logs/out.log',
|
||||
log_date_format: 'YYYY-MM-DD HH:mm:ss Z',
|
||||
merge_logs: true,
|
||||
kill_timeout: 5000,
|
||||
time: true
|
||||
}]
|
||||
};
|
||||
456
docs/template/bls-onoffline-backend/openspec/AGENTS.md
vendored
Normal file
456
docs/template/bls-onoffline-backend/openspec/AGENTS.md
vendored
Normal file
@@ -0,0 +1,456 @@
|
||||
# OpenSpec Instructions
|
||||
|
||||
Instructions for AI coding assistants using OpenSpec for spec-driven development.
|
||||
|
||||
## TL;DR Quick Checklist
|
||||
|
||||
- Search existing work: `openspec spec list --long`, `openspec list` (use `rg` only for full-text search)
|
||||
- Decide scope: new capability vs modify existing capability
|
||||
- Pick a unique `change-id`: kebab-case, verb-led (`add-`, `update-`, `remove-`, `refactor-`)
|
||||
- Scaffold: `proposal.md`, `tasks.md`, `design.md` (only if needed), and delta specs per affected capability
|
||||
- Write deltas: use `## ADDED|MODIFIED|REMOVED|RENAMED Requirements`; include at least one `#### Scenario:` per requirement
|
||||
- Validate: `openspec validate [change-id] --strict` and fix issues
|
||||
- Request approval: Do not start implementation until proposal is approved
|
||||
|
||||
## Three-Stage Workflow
|
||||
|
||||
### Stage 1: Creating Changes
|
||||
Create proposal when you need to:
|
||||
- Add features or functionality
|
||||
- Make breaking changes (API, schema)
|
||||
- Change architecture or patterns
|
||||
- Optimize performance (changes behavior)
|
||||
- Update security patterns
|
||||
|
||||
Triggers (examples):
|
||||
- "Help me create a change proposal"
|
||||
- "Help me plan a change"
|
||||
- "Help me create a proposal"
|
||||
- "I want to create a spec proposal"
|
||||
- "I want to create a spec"
|
||||
|
||||
Loose matching guidance:
|
||||
- Contains one of: `proposal`, `change`, `spec`
|
||||
- With one of: `create`, `plan`, `make`, `start`, `help`
|
||||
|
||||
Skip proposal for:
|
||||
- Bug fixes (restore intended behavior)
|
||||
- Typos, formatting, comments
|
||||
- Dependency updates (non-breaking)
|
||||
- Configuration changes
|
||||
- Tests for existing behavior
|
||||
|
||||
**Workflow**
|
||||
1. Review `openspec/project.md`, `openspec list`, and `openspec list --specs` to understand current context.
|
||||
2. Choose a unique verb-led `change-id` and scaffold `proposal.md`, `tasks.md`, optional `design.md`, and spec deltas under `openspec/changes/<id>/`.
|
||||
3. Draft spec deltas using `## ADDED|MODIFIED|REMOVED Requirements` with at least one `#### Scenario:` per requirement.
|
||||
4. Run `openspec validate <id> --strict` and resolve any issues before sharing the proposal.
|
||||
|
||||
### Stage 2: Implementing Changes
|
||||
Track these steps as TODOs and complete them one by one.
|
||||
1. **Read proposal.md** - Understand what's being built
|
||||
2. **Read design.md** (if exists) - Review technical decisions
|
||||
3. **Read tasks.md** - Get implementation checklist
|
||||
4. **Implement tasks sequentially** - Complete in order
|
||||
5. **Confirm completion** - Ensure every item in `tasks.md` is finished before updating statuses
|
||||
6. **Update checklist** - After all work is done, set every task to `- [x]` so the list reflects reality
|
||||
7. **Approval gate** - Do not start implementation until the proposal is reviewed and approved
|
||||
|
||||
### Stage 3: Archiving Changes
|
||||
After deployment, create separate PR to:
|
||||
- Move `changes/[name]/` → `changes/archive/YYYY-MM-DD-[name]/`
|
||||
- Update `specs/` if capabilities changed
|
||||
- Use `openspec archive <change-id> --skip-specs --yes` for tooling-only changes (always pass the change ID explicitly)
|
||||
- Run `openspec validate --strict` to confirm the archived change passes checks
|
||||
|
||||
## Before Any Task
|
||||
|
||||
**Context Checklist:**
|
||||
- [ ] Read relevant specs in `specs/[capability]/spec.md`
|
||||
- [ ] Check pending changes in `changes/` for conflicts
|
||||
- [ ] Read `openspec/project.md` for conventions
|
||||
- [ ] Run `openspec list` to see active changes
|
||||
- [ ] Run `openspec list --specs` to see existing capabilities
|
||||
|
||||
**Before Creating Specs:**
|
||||
- Always check if capability already exists
|
||||
- Prefer modifying existing specs over creating duplicates
|
||||
- Use `openspec show [spec]` to review current state
|
||||
- If request is ambiguous, ask 1–2 clarifying questions before scaffolding
|
||||
|
||||
### Search Guidance
|
||||
- Enumerate specs: `openspec spec list --long` (or `--json` for scripts)
|
||||
- Enumerate changes: `openspec list` (or `openspec change list --json` - deprecated but available)
|
||||
- Show details:
|
||||
- Spec: `openspec show <spec-id> --type spec` (use `--json` for filters)
|
||||
- Change: `openspec show <change-id> --json --deltas-only`
|
||||
- Full-text search (use ripgrep): `rg -n "Requirement:|Scenario:" openspec/specs`
|
||||
|
||||
## Quick Start
|
||||
|
||||
### CLI Commands
|
||||
|
||||
```bash
|
||||
# Essential commands
|
||||
openspec list # List active changes
|
||||
openspec list --specs # List specifications
|
||||
openspec show [item] # Display change or spec
|
||||
openspec validate [item] # Validate changes or specs
|
||||
openspec archive <change-id> [--yes|-y] # Archive after deployment (add --yes for non-interactive runs)
|
||||
|
||||
# Project management
|
||||
openspec init [path] # Initialize OpenSpec
|
||||
openspec update [path] # Update instruction files
|
||||
|
||||
# Interactive mode
|
||||
openspec show # Prompts for selection
|
||||
openspec validate # Bulk validation mode
|
||||
|
||||
# Debugging
|
||||
openspec show [change] --json --deltas-only
|
||||
openspec validate [change] --strict
|
||||
```
|
||||
|
||||
### Command Flags
|
||||
|
||||
- `--json` - Machine-readable output
|
||||
- `--type change|spec` - Disambiguate items
|
||||
- `--strict` - Comprehensive validation
|
||||
- `--no-interactive` - Disable prompts
|
||||
- `--skip-specs` - Archive without spec updates
|
||||
- `--yes`/`-y` - Skip confirmation prompts (non-interactive archive)
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
openspec/
|
||||
├── project.md # Project conventions
|
||||
├── specs/ # Current truth - what IS built
|
||||
│ └── [capability]/ # Single focused capability
|
||||
│ ├── spec.md # Requirements and scenarios
|
||||
│ └── design.md # Technical patterns
|
||||
├── changes/ # Proposals - what SHOULD change
|
||||
│ ├── [change-name]/
|
||||
│ │ ├── proposal.md # Why, what, impact
|
||||
│ │ ├── tasks.md # Implementation checklist
|
||||
│ │ ├── design.md # Technical decisions (optional; see criteria)
|
||||
│ │ └── specs/ # Delta changes
|
||||
│ │ └── [capability]/
|
||||
│ │ └── spec.md # ADDED/MODIFIED/REMOVED
|
||||
│ └── archive/ # Completed changes
|
||||
```
|
||||
|
||||
## Creating Change Proposals
|
||||
|
||||
### Decision Tree
|
||||
|
||||
```
|
||||
New request?
|
||||
├─ Bug fix restoring spec behavior? → Fix directly
|
||||
├─ Typo/format/comment? → Fix directly
|
||||
├─ New feature/capability? → Create proposal
|
||||
├─ Breaking change? → Create proposal
|
||||
├─ Architecture change? → Create proposal
|
||||
└─ Unclear? → Create proposal (safer)
|
||||
```
|
||||
|
||||
### Proposal Structure
|
||||
|
||||
1. **Create directory:** `changes/[change-id]/` (kebab-case, verb-led, unique)
|
||||
|
||||
2. **Write proposal.md:**
|
||||
```markdown
|
||||
# Change: [Brief description of change]
|
||||
|
||||
## Why
|
||||
[1-2 sentences on problem/opportunity]
|
||||
|
||||
## What Changes
|
||||
- [Bullet list of changes]
|
||||
- [Mark breaking changes with **BREAKING**]
|
||||
|
||||
## Impact
|
||||
- Affected specs: [list capabilities]
|
||||
- Affected code: [key files/systems]
|
||||
```
|
||||
|
||||
3. **Create spec deltas:** `specs/[capability]/spec.md`
|
||||
```markdown
|
||||
## ADDED Requirements
|
||||
### Requirement: New Feature
|
||||
The system SHALL provide...
|
||||
|
||||
#### Scenario: Success case
|
||||
- **WHEN** user performs action
|
||||
- **THEN** expected result
|
||||
|
||||
## MODIFIED Requirements
|
||||
### Requirement: Existing Feature
|
||||
[Complete modified requirement]
|
||||
|
||||
## REMOVED Requirements
|
||||
### Requirement: Old Feature
|
||||
**Reason**: [Why removing]
|
||||
**Migration**: [How to handle]
|
||||
```
|
||||
If multiple capabilities are affected, create multiple delta files under `changes/[change-id]/specs/<capability>/spec.md`—one per capability.
|
||||
|
||||
4. **Create tasks.md:**
|
||||
```markdown
|
||||
## 1. Implementation
|
||||
- [ ] 1.1 Create database schema
|
||||
- [ ] 1.2 Implement API endpoint
|
||||
- [ ] 1.3 Add frontend component
|
||||
- [ ] 1.4 Write tests
|
||||
```
|
||||
|
||||
5. **Create design.md when needed:**
|
||||
Create `design.md` if any of the following apply; otherwise omit it:
|
||||
- Cross-cutting change (multiple services/modules) or a new architectural pattern
|
||||
- New external dependency or significant data model changes
|
||||
- Security, performance, or migration complexity
|
||||
- Ambiguity that benefits from technical decisions before coding
|
||||
|
||||
Minimal `design.md` skeleton:
|
||||
```markdown
|
||||
## Context
|
||||
[Background, constraints, stakeholders]
|
||||
|
||||
## Goals / Non-Goals
|
||||
- Goals: [...]
|
||||
- Non-Goals: [...]
|
||||
|
||||
## Decisions
|
||||
- Decision: [What and why]
|
||||
- Alternatives considered: [Options + rationale]
|
||||
|
||||
## Risks / Trade-offs
|
||||
- [Risk] → Mitigation
|
||||
|
||||
## Migration Plan
|
||||
[Steps, rollback]
|
||||
|
||||
## Open Questions
|
||||
- [...]
|
||||
```
|
||||
|
||||
## Spec File Format
|
||||
|
||||
### Critical: Scenario Formatting
|
||||
|
||||
**CORRECT** (use #### headers):
|
||||
```markdown
|
||||
#### Scenario: User login success
|
||||
- **WHEN** valid credentials provided
|
||||
- **THEN** return JWT token
|
||||
```
|
||||
|
||||
**WRONG** (don't use bullets or bold):
|
||||
```markdown
|
||||
- **Scenario: User login** ❌
|
||||
**Scenario**: User login ❌
|
||||
### Scenario: User login ❌
|
||||
```
|
||||
|
||||
Every requirement MUST have at least one scenario.
|
||||
|
||||
### Requirement Wording
|
||||
- Use SHALL/MUST for normative requirements (avoid should/may unless intentionally non-normative)
|
||||
|
||||
### Delta Operations
|
||||
|
||||
- `## ADDED Requirements` - New capabilities
|
||||
- `## MODIFIED Requirements` - Changed behavior
|
||||
- `## REMOVED Requirements` - Deprecated features
|
||||
- `## RENAMED Requirements` - Name changes
|
||||
|
||||
Headers matched with `trim(header)` - whitespace ignored.
|
||||
|
||||
#### When to use ADDED vs MODIFIED
|
||||
- ADDED: Introduces a new capability or sub-capability that can stand alone as a requirement. Prefer ADDED when the change is orthogonal (e.g., adding "Slash Command Configuration") rather than altering the semantics of an existing requirement.
|
||||
- MODIFIED: Changes the behavior, scope, or acceptance criteria of an existing requirement. Always paste the full, updated requirement content (header + all scenarios). The archiver will replace the entire requirement with what you provide here; partial deltas will drop previous details.
|
||||
- RENAMED: Use when only the name changes. If you also change behavior, use RENAMED (name) plus MODIFIED (content) referencing the new name.
|
||||
|
||||
Common pitfall: Using MODIFIED to add a new concern without including the previous text. This causes loss of detail at archive time. If you aren’t explicitly changing the existing requirement, add a new requirement under ADDED instead.
|
||||
|
||||
Authoring a MODIFIED requirement correctly:
|
||||
1) Locate the existing requirement in `openspec/specs/<capability>/spec.md`.
|
||||
2) Copy the entire requirement block (from `### Requirement: ...` through its scenarios).
|
||||
3) Paste it under `## MODIFIED Requirements` and edit to reflect the new behavior.
|
||||
4) Ensure the header text matches exactly (whitespace-insensitive) and keep at least one `#### Scenario:`.
|
||||
|
||||
Example for RENAMED:
|
||||
```markdown
|
||||
## RENAMED Requirements
|
||||
- FROM: `### Requirement: Login`
|
||||
- TO: `### Requirement: User Authentication`
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Errors
|
||||
|
||||
**"Change must have at least one delta"**
|
||||
- Check `changes/[name]/specs/` exists with .md files
|
||||
- Verify files have operation prefixes (## ADDED Requirements)
|
||||
|
||||
**"Requirement must have at least one scenario"**
|
||||
- Check scenarios use `#### Scenario:` format (4 hashtags)
|
||||
- Don't use bullet points or bold for scenario headers
|
||||
|
||||
**Silent scenario parsing failures**
|
||||
- Exact format required: `#### Scenario: Name`
|
||||
- Debug with: `openspec show [change] --json --deltas-only`
|
||||
|
||||
### Validation Tips
|
||||
|
||||
```bash
|
||||
# Always use strict mode for comprehensive checks
|
||||
openspec validate [change] --strict
|
||||
|
||||
# Debug delta parsing
|
||||
openspec show [change] --json | jq '.deltas'
|
||||
|
||||
# Check specific requirement
|
||||
openspec show [spec] --json -r 1
|
||||
```
|
||||
|
||||
## Happy Path Script
|
||||
|
||||
```bash
|
||||
# 1) Explore current state
|
||||
openspec spec list --long
|
||||
openspec list
|
||||
# Optional full-text search:
|
||||
# rg -n "Requirement:|Scenario:" openspec/specs
|
||||
# rg -n "^#|Requirement:" openspec/changes
|
||||
|
||||
# 2) Choose change id and scaffold
|
||||
CHANGE=add-two-factor-auth
|
||||
mkdir -p openspec/changes/$CHANGE/{specs/auth}
|
||||
printf "## Why\n...\n\n## What Changes\n- ...\n\n## Impact\n- ...\n" > openspec/changes/$CHANGE/proposal.md
|
||||
printf "## 1. Implementation\n- [ ] 1.1 ...\n" > openspec/changes/$CHANGE/tasks.md
|
||||
|
||||
# 3) Add deltas (example)
|
||||
cat > openspec/changes/$CHANGE/specs/auth/spec.md << 'EOF'
|
||||
## ADDED Requirements
|
||||
### Requirement: Two-Factor Authentication
|
||||
Users MUST provide a second factor during login.
|
||||
|
||||
#### Scenario: OTP required
|
||||
- **WHEN** valid credentials are provided
|
||||
- **THEN** an OTP challenge is required
|
||||
EOF
|
||||
|
||||
# 4) Validate
|
||||
openspec validate $CHANGE --strict
|
||||
```
|
||||
|
||||
## Multi-Capability Example
|
||||
|
||||
```
|
||||
openspec/changes/add-2fa-notify/
|
||||
├── proposal.md
|
||||
├── tasks.md
|
||||
└── specs/
|
||||
├── auth/
|
||||
│ └── spec.md # ADDED: Two-Factor Authentication
|
||||
└── notifications/
|
||||
└── spec.md # ADDED: OTP email notification
|
||||
```
|
||||
|
||||
auth/spec.md
|
||||
```markdown
|
||||
## ADDED Requirements
|
||||
### Requirement: Two-Factor Authentication
|
||||
...
|
||||
```
|
||||
|
||||
notifications/spec.md
|
||||
```markdown
|
||||
## ADDED Requirements
|
||||
### Requirement: OTP Email Notification
|
||||
...
|
||||
```
|
||||
|
||||
## Best Practices
|
||||
|
||||
### Simplicity First
|
||||
- Default to <100 lines of new code
|
||||
- Single-file implementations until proven insufficient
|
||||
- Avoid frameworks without clear justification
|
||||
- Choose boring, proven patterns
|
||||
|
||||
### Complexity Triggers
|
||||
Only add complexity with:
|
||||
- Performance data showing current solution too slow
|
||||
- Concrete scale requirements (>1000 users, >100MB data)
|
||||
- Multiple proven use cases requiring abstraction
|
||||
|
||||
### Clear References
|
||||
- Use `file.ts:42` format for code locations
|
||||
- Reference specs as `specs/auth/spec.md`
|
||||
- Link related changes and PRs
|
||||
|
||||
### Capability Naming
|
||||
- Use verb-noun: `user-auth`, `payment-capture`
|
||||
- Single purpose per capability
|
||||
- 10-minute understandability rule
|
||||
- Split if description needs "AND"
|
||||
|
||||
### Change ID Naming
|
||||
- Use kebab-case, short and descriptive: `add-two-factor-auth`
|
||||
- Prefer verb-led prefixes: `add-`, `update-`, `remove-`, `refactor-`
|
||||
- Ensure uniqueness; if taken, append `-2`, `-3`, etc.
|
||||
|
||||
## Tool Selection Guide
|
||||
|
||||
| Task | Tool | Why |
|
||||
|------|------|-----|
|
||||
| Find files by pattern | Glob | Fast pattern matching |
|
||||
| Search code content | Grep | Optimized regex search |
|
||||
| Read specific files | Read | Direct file access |
|
||||
| Explore unknown scope | Task | Multi-step investigation |
|
||||
|
||||
## Error Recovery
|
||||
|
||||
### Change Conflicts
|
||||
1. Run `openspec list` to see active changes
|
||||
2. Check for overlapping specs
|
||||
3. Coordinate with change owners
|
||||
4. Consider combining proposals
|
||||
|
||||
### Validation Failures
|
||||
1. Run with `--strict` flag
|
||||
2. Check JSON output for details
|
||||
3. Verify spec file format
|
||||
4. Ensure scenarios properly formatted
|
||||
|
||||
### Missing Context
|
||||
1. Read project.md first
|
||||
2. Check related specs
|
||||
3. Review recent archives
|
||||
4. Ask for clarification
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Stage Indicators
|
||||
- `changes/` - Proposed, not yet built
|
||||
- `specs/` - Built and deployed
|
||||
- `archive/` - Completed changes
|
||||
|
||||
### File Purposes
|
||||
- `proposal.md` - Why and what
|
||||
- `tasks.md` - Implementation steps
|
||||
- `design.md` - Technical decisions
|
||||
- `spec.md` - Requirements and behavior
|
||||
|
||||
### CLI Essentials
|
||||
```bash
|
||||
openspec list # What's in progress?
|
||||
openspec show [item] # View details
|
||||
openspec validate --strict # Is it correct?
|
||||
openspec archive <change-id> [--yes|-y] # Mark complete (add --yes for automation)
|
||||
```
|
||||
|
||||
Remember: Specs are truth. Changes are proposals. Keep them in sync.
|
||||
@@ -0,0 +1,17 @@
|
||||
# Change: Fix Kafka Partitioning and Schema Issues
|
||||
|
||||
## Why
|
||||
Production deployment revealed issues with data ingestion:
|
||||
1. Kafka Topic name changed to include partition suffix.
|
||||
2. Legacy data contains second-level timestamps (1970s) causing partition lookup failures in PostgreSQL (which expects ms).
|
||||
3. Variable-length fields (reboot reason, status) exceeded VARCHAR(10) limits, causing crashes.
|
||||
|
||||
## What Changes
|
||||
- **Modified Requirement**: Update Kafka Topic to `blwlog4Nodejs-rcu-onoffline-topic-0`.
|
||||
- **New Requirement**: Implement heuristic timestamp conversion (Sec -> MS) for values < 100B.
|
||||
- **New Requirement**: Truncate specific fields to VARCHAR(255) to prevent DB rejection.
|
||||
- **Modified Requirement**: Update DB Schema to VARCHAR(255) for robustness.
|
||||
|
||||
## Impact
|
||||
- Affected specs: `onoffline`
|
||||
- Affected code: `src/processor/index.js`, `scripts/init_db.sql`
|
||||
@@ -0,0 +1,25 @@
|
||||
## MODIFIED Requirements
|
||||
### Requirement: 消费并落库
|
||||
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||
|
||||
#### Scenario: 非重启数据写入
|
||||
- **GIVEN** RebootReason 为空或不存在
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||
|
||||
## ADDED Requirements
|
||||
### Requirement: 字段长度限制与截断
|
||||
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||
|
||||
#### Scenario: 超长字段处理
|
||||
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** 字段被截断为前 255 个字符并入库
|
||||
|
||||
### Requirement: 时间戳单位自动识别
|
||||
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||
|
||||
#### Scenario: 秒级时间戳转换
|
||||
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||
- **WHEN** 解析时间戳
|
||||
- **THEN** 自动乘以 1000 转换为毫秒
|
||||
@@ -0,0 +1,6 @@
|
||||
## 1. Implementation
|
||||
- [x] Update Kafka Topic in .env and config
|
||||
- [x] Implement timestamp unit detection and conversion in processor
|
||||
- [x] Implement field truncation logic in processor
|
||||
- [x] Update database schema definition (init_db.sql) to VARCHAR(255)
|
||||
- [x] Verify data ingestion with production stream
|
||||
@@ -0,0 +1,18 @@
|
||||
# Change: Optimize Kafka Consumption Performance
|
||||
|
||||
## Why
|
||||
User reports extremely slow Kafka consumption. Current implementation processes and inserts messages one-by-one, which creates a bottleneck at the database network round-trip time (RTT).
|
||||
|
||||
## What Changes
|
||||
- **New Requirement**: Implement Batch Processing for Kafka messages.
|
||||
- **Refactor**: Decouple message parsing from insertion in `processor`.
|
||||
- **Logic**:
|
||||
- Accumulate messages in a buffer (e.g., 500ms or 500 items).
|
||||
- Perform Batch Insert into PostgreSQL.
|
||||
- Implement Row-by-Row fallback for batch failures (to isolate bad data).
|
||||
- Handle DB connection errors with retry loop at batch level.
|
||||
|
||||
## Impact
|
||||
- Affected specs: `onoffline`
|
||||
- Affected code: `src/index.js`, `src/processor/index.js`
|
||||
- Performance: Expected 10x-100x throughput increase.
|
||||
@@ -0,0 +1,13 @@
|
||||
## ADDED Requirements
|
||||
### Requirement: 批量消费与写入
|
||||
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量。
|
||||
|
||||
#### Scenario: 批量写入
|
||||
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||
- **THEN** 执行一次批量数据库插入操作
|
||||
|
||||
#### Scenario: 写入失败降级
|
||||
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||
- **WHEN** 捕获异常
|
||||
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||
@@ -0,0 +1,5 @@
|
||||
## 1. Implementation
|
||||
- [ ] Refactor `src/processor/index.js` to export `parseMessageToRows`
|
||||
- [ ] Implement `BatchProcessor` logic in `src/index.js`
|
||||
- [ ] Update `handleMessage` to use `BatchProcessor`
|
||||
- [ ] Verify performance improvement
|
||||
@@ -0,0 +1,11 @@
|
||||
# Proposal: Refactor Partition Indexes
|
||||
|
||||
## Goal
|
||||
利用 PostgreSQL 默认的支持,改变每日分区创立时的索引策略,不再在代码中对每个分区单独创建索引。
|
||||
|
||||
## Context
|
||||
当前 `PartitionManager` 在动态创建子分区后,会隐式调用查询在子分区上创建六个单列索引。由于我们使用的是 PostgreSQL 11+,且我们在初始化脚本中的主分区表 `onoffline.onoffline_record` 上已经创建了所有的索引,此主表上的索引会自动应用于所有的子分区,不需要我们在创建分区时另外手动添加。
|
||||
|
||||
## Proposed Changes
|
||||
1. 在 `src/db/partitionManager.js` 中移除子分区显式创建索引的方法 `ensurePartitionIndexes` 以及针对已有子分区的循环索引检查函数 `ensureIndexesForExistingPartitions`。
|
||||
2. 在更新分区流程 `ensurePartitions` 以及 `ensurePartitionsForTimestamps` 中,移除对 `ensurePartitionIndexes` 的调用。
|
||||
@@ -0,0 +1,11 @@
|
||||
# Spec Delta: onoffline-backend
|
||||
|
||||
## MODIFIED Requirements
|
||||
|
||||
### Requirement: 数据库分区策略
|
||||
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||
|
||||
#### Scenario: 分区预创建
|
||||
- **GIVEN** 系统启动或每日凌晨
|
||||
- **WHEN** 运行分区维护任务
|
||||
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||
@@ -0,0 +1,6 @@
|
||||
# Tasks: Refactor Partition Indexes
|
||||
|
||||
- [x] refactor `src/db/partitionManager.js`: remove `ensurePartitionIndexes` and `ensureIndexesForExistingPartitions`.
|
||||
- [x] refactor `src/db/partitionManager.js`: update `ensurePartitions` and `ensurePartitionsForTimestamps` to remove calls to `ensurePartitionIndexes`.
|
||||
- [x] refactor `src/db/initializer.js` (and any other occurrences) to reflect the removal.
|
||||
- [x] update openspec requirements to clarify that index propagation relies on PostgreSQL parent-table indexes.
|
||||
@@ -0,0 +1,14 @@
|
||||
# Change: remove runtime db provisioning
|
||||
|
||||
## Why
|
||||
当前服务在运行时承担了建库、建表和分区维护职责,导致服务职责边界不清晰,也会引入启动阶段 DDL 风险。现已将该能力剥离到根目录 `SQL_Script/`,需要通过 OpenSpec 正式记录为规范变更。
|
||||
|
||||
## What Changes
|
||||
- 移除服务启动阶段的数据库初始化与定时分区维护要求。
|
||||
- 移除服务在写入失败时自动创建缺失分区的要求。
|
||||
- 明确数据库结构与分区维护由外部脚本(`SQL_Script/`)负责。
|
||||
- 保留服务的核心职责:Kafka 消费、解析、写库、重试与监控。
|
||||
|
||||
## Impact
|
||||
- Affected specs: `openspec/specs/onoffline/spec.md`
|
||||
- Affected code: `src/index.js`, `src/config/config.js`, `src/db/initializer.js`, `src/db/partitionManager.js`, `scripts/init_db.sql`, `scripts/verify_partitions.js`, `../SQL_Script/*`
|
||||
@@ -0,0 +1,32 @@
|
||||
## MODIFIED Requirements
|
||||
|
||||
### Requirement: 数据库分区策略
|
||||
系统 SHALL 使用 Range Partitioning 按天分区;运行服务本身 SHALL NOT 执行建库、建表、分区创建或定时分区维护。
|
||||
|
||||
#### Scenario: 服务启动不执行 DDL
|
||||
- **GIVEN** 服务进程启动
|
||||
- **WHEN** 进入 bootstrap 过程
|
||||
- **THEN** 仅初始化消费、处理、监控相关能力,不执行数据库创建、表结构初始化与分区创建
|
||||
|
||||
#### Scenario: 分区由外部脚本维护
|
||||
- **GIVEN** 需要创建数据库对象或新增未来分区
|
||||
- **WHEN** 执行外部 SQL/JS 工具
|
||||
- **THEN** 通过根目录 `SQL_Script/` 完成建库和分区维护,而不是由服务运行时自动执行
|
||||
|
||||
### Requirement: 批量消费与写入
|
||||
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||
|
||||
#### Scenario: 批量写入
|
||||
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||
- **THEN** 执行一次批量数据库插入操作
|
||||
|
||||
#### Scenario: 写入失败降级
|
||||
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||
- **WHEN** 捕获异常
|
||||
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||
|
||||
#### Scenario: 分区缺失错误处理
|
||||
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||
- **WHEN** 服务处理该错误
|
||||
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||
@@ -0,0 +1,12 @@
|
||||
## 1. Implementation
|
||||
- [x] 1.1 Remove runtime DB initialization from bootstrap flow (`src/index.js`).
|
||||
- [x] 1.2 Remove scheduled partition maintenance job from runtime service.
|
||||
- [x] 1.3 Remove runtime missing-partition auto-fix behavior.
|
||||
- [x] 1.4 Remove legacy DB provisioning modules and scripts from service project.
|
||||
- [x] 1.5 Add external SQL/JS provisioning scripts under root `SQL_Script/` for DB/schema/partition management.
|
||||
- [x] 1.6 Update project docs to point DB provisioning to `SQL_Script/`.
|
||||
|
||||
## 2. Validation
|
||||
- [x] 2.1 Run `npm run lint` in `bls-onoffline-backend`.
|
||||
- [x] 2.2 Run `npm run build` in `bls-onoffline-backend`.
|
||||
- [x] 2.3 Run `openspec validate remove-runtime-db-provisioning --strict`.
|
||||
31
docs/template/bls-onoffline-backend/openspec/project.md
vendored
Normal file
31
docs/template/bls-onoffline-backend/openspec/project.md
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
# Project Context
|
||||
|
||||
## Purpose
|
||||
[Describe your project's purpose and goals]
|
||||
|
||||
## Tech Stack
|
||||
- [List your primary technologies]
|
||||
- [e.g., TypeScript, React, Node.js]
|
||||
|
||||
## Project Conventions
|
||||
|
||||
### Code Style
|
||||
[Describe your code style preferences, formatting rules, and naming conventions]
|
||||
|
||||
### Architecture Patterns
|
||||
[Document your architectural decisions and patterns]
|
||||
|
||||
### Testing Strategy
|
||||
[Explain your testing approach and requirements]
|
||||
|
||||
### Git Workflow
|
||||
[Describe your branching strategy and commit conventions]
|
||||
|
||||
## Domain Context
|
||||
[Add domain-specific knowledge that AI assistants need to understand]
|
||||
|
||||
## Important Constraints
|
||||
[List any technical, business, or regulatory constraints]
|
||||
|
||||
## External Dependencies
|
||||
[Document key external services, APIs, or systems]
|
||||
103
docs/template/bls-onoffline-backend/openspec/specs/onoffline/spec.md
vendored
Normal file
103
docs/template/bls-onoffline-backend/openspec/specs/onoffline/spec.md
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
# Spec: onoffline-backend
|
||||
|
||||
## Purpose
|
||||
从 Kafka 消费设备上下线事件并按规则写入 PostgreSQL 分区表,确保高可靠性、幂等写入和错误恢复能力。
|
||||
## Requirements
|
||||
### Requirement: 消费并落库
|
||||
系统 SHALL 从 blwlog4Nodejs-rcu-onoffline-topic-0 消费消息,并写入 log_platform.onoffline.onoffline_record。
|
||||
|
||||
#### Scenario: 非重启数据写入
|
||||
- **GIVEN** RebootReason 为空或不存在
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** current_status 等于 CurrentStatus (截断至 255 字符)
|
||||
|
||||
### Requirement: 重启数据处理
|
||||
系统 SHALL 在 RebootReason 非空时强制 current_status 为 on。
|
||||
|
||||
#### Scenario: 重启数据写入
|
||||
- **GIVEN** RebootReason 为非空值
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** current_status 等于 on
|
||||
|
||||
### Requirement: 空值保留
|
||||
系统 SHALL 保留上游空值,不对字段进行补 0。
|
||||
|
||||
#### Scenario: 空值写入
|
||||
- **GIVEN** LauncherVersion 或 RebootReason 为空字符串
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** 数据库存储值为对应的空字符串
|
||||
|
||||
### Requirement: 数据库分区策略
|
||||
系统 SHALL 使用 Range Partitioning 按天分区,并自动维护未来 30 天的分区表,子表依赖 PostgreSQL 原生机制继承主表索引。
|
||||
|
||||
#### Scenario: 分区预创建
|
||||
- **GIVEN** 系统启动或每日凌晨
|
||||
- **WHEN** 运行分区维护任务
|
||||
- **THEN** 确保数据库中存在未来 30 天的分区表,无需对子表显式创建单列表索引
|
||||
|
||||
### Requirement: 消费可靠性 (At-Least-Once)
|
||||
系统 SHALL 仅在数据成功写入数据库后,才向 Kafka 提交消费位点。
|
||||
|
||||
#### Scenario: 逐条确认与顺序提交
|
||||
- **GIVEN** 并发处理多条消息 (Offset 1, 2, 3)
|
||||
- **WHEN** Offset 2 先完成,Offset 1 尚未完成
|
||||
- **THEN** 系统不提交 Offset 2,直到 Offset 1 也完成,才提交 Offset 3 (即 1, 2, 3 都完成)
|
||||
|
||||
### Requirement: 数据库离线保护
|
||||
系统 SHALL 在数据库连接丢失时暂停消费,防止数据堆积或丢失。
|
||||
|
||||
#### Scenario: 数据库断连
|
||||
- **GIVEN** 数据库连接失败 (ECONNREFUSED 等)
|
||||
- **WHEN** 消费者尝试写入
|
||||
- **THEN** 暂停 Kafka 消费 1 分钟,并进入轮询检测模式,直到数据库恢复
|
||||
|
||||
### Requirement: 幂等写入
|
||||
系统 SHALL 处理重复消费的数据,防止主键冲突。
|
||||
|
||||
#### Scenario: 重复数据处理
|
||||
- **GIVEN** Kafka 重新投递已处理过的消息
|
||||
- **WHEN** 尝试写入数据库
|
||||
- **THEN** 使用 `ON CONFLICT DO NOTHING` 忽略冲突,视为处理成功
|
||||
|
||||
### Requirement: 性能与日志
|
||||
系统 SHALL 最小化正常运行时的日志输出。
|
||||
|
||||
#### Scenario: 正常运行日志
|
||||
- **GIVEN** 数据正常处理
|
||||
- **WHEN** 写入成功
|
||||
- **THEN** 不输出单条日志,仅每分钟输出聚合统计 (Pulled/Inserted)
|
||||
|
||||
### Requirement: 字段长度限制与截断
|
||||
系统 SHALL 将部分变长字段截断至数据库允许的最大长度 (VARCHAR(255)),防止写入失败。
|
||||
|
||||
#### Scenario: 超长字段处理
|
||||
- **GIVEN** LauncherVersion, CurrentStatus 或 RebootReason 超过 255 字符
|
||||
- **WHEN** 消息被处理
|
||||
- **THEN** 字段被截断为前 255 个字符并入库
|
||||
|
||||
### Requirement: 时间戳单位自动识别
|
||||
系统 SHALL 自动识别 UnixTime 字段是秒还是毫秒,并统一转换为毫秒。
|
||||
|
||||
#### Scenario: 秒级时间戳转换
|
||||
- **GIVEN** UnixTime < 100000000000 (约 1973 年前)
|
||||
- **WHEN** 解析时间戳
|
||||
- **THEN** 自动乘以 1000 转换为毫秒
|
||||
|
||||
### Requirement: 批量消费与写入
|
||||
系统 SHALL 对 Kafka 消息进行缓冲,并按批次写入数据库,以提高吞吐量;当写入失败时,系统 SHALL 执行连接恢复重试与降级策略,但不在运行时创建数据库分区。
|
||||
|
||||
#### Scenario: 批量写入
|
||||
- **GIVEN** 短时间内收到多条消息 (e.g., 500条)
|
||||
- **WHEN** 缓冲区满或超时 (e.g., 200ms)
|
||||
- **THEN** 执行一次批量数据库插入操作
|
||||
|
||||
#### Scenario: 写入失败降级
|
||||
- **GIVEN** 批量写入因数据错误失败 (非连接错误)
|
||||
- **WHEN** 捕获异常
|
||||
- **THEN** 自动降级为逐条写入,以隔离错误数据并确保有效数据入库
|
||||
|
||||
#### Scenario: 分区缺失错误处理
|
||||
- **GIVEN** 写入时数据库返回分区缺失错误
|
||||
- **WHEN** 服务处理该错误
|
||||
- **THEN** 服务记录错误并按既有错误处理机制处理,不在运行时执行分区创建
|
||||
|
||||
11
docs/template/bls-onoffline-backend/openspec/specs/onoffline/status.md
vendored
Normal file
11
docs/template/bls-onoffline-backend/openspec/specs/onoffline/status.md
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
|
||||
## Implementation Status
|
||||
- **Date**: 2026-02-04
|
||||
- **Status**: Completed
|
||||
- **Notes**:
|
||||
- 已完成核心消费逻辑、分区管理、数据库幂等写入。
|
||||
- 已处理数据库连接泄露 (EADDRINUSE) 问题,增加了离线保护机制。
|
||||
- 已修复时间戳单位问题 (Seconds -> MS)。
|
||||
- 已将关键字段长度扩展至 VARCHAR(255) 并增加了代码层截断保护。
|
||||
- 验证了数据积压消费能力。
|
||||
- 本阶段开发任务已归档。
|
||||
3526
docs/template/bls-onoffline-backend/package-lock.json
generated
vendored
Normal file
3526
docs/template/bls-onoffline-backend/package-lock.json
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
27
docs/template/bls-onoffline-backend/package.json
vendored
Normal file
27
docs/template/bls-onoffline-backend/package.json
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
{
|
||||
"name": "bls-onoffline-backend",
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"private": true,
|
||||
"scripts": {
|
||||
"dev": "node src/index.js",
|
||||
"build": "vite build --ssr src/index.js --outDir dist",
|
||||
"test": "vitest run",
|
||||
"lint": "node scripts/lint.js",
|
||||
"spec:lint": "openspec validate --specs --strict --no-interactive",
|
||||
"spec:validate": "openspec validate --specs --no-interactive",
|
||||
"start": "node dist/index.js"
|
||||
},
|
||||
"dependencies": {
|
||||
"dotenv": "^16.4.5",
|
||||
"kafka-node": "^5.0.0",
|
||||
"node-cron": "^4.2.1",
|
||||
"pg": "^8.11.5",
|
||||
"redis": "^4.6.13",
|
||||
"zod": "^4.3.6"
|
||||
},
|
||||
"devDependencies": {
|
||||
"vite": "^5.4.0",
|
||||
"vitest": "^4.0.18"
|
||||
}
|
||||
}
|
||||
41
docs/template/bls-onoffline-backend/scripts/lint.js
vendored
Normal file
41
docs/template/bls-onoffline-backend/scripts/lint.js
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
import fs from 'fs';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { spawnSync } from 'child_process';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const projectRoot = path.resolve(__dirname, '..');
|
||||
const targets = ['src', 'tests'];
|
||||
|
||||
const collectFiles = (dir) => {
|
||||
if (!fs.existsSync(dir)) {
|
||||
return [];
|
||||
}
|
||||
const entries = fs.readdirSync(dir, { withFileTypes: true });
|
||||
return entries.flatMap((entry) => {
|
||||
const fullPath = path.join(dir, entry.name);
|
||||
if (entry.isDirectory()) {
|
||||
return collectFiles(fullPath);
|
||||
}
|
||||
if (entry.isFile() && fullPath.endsWith('.js')) {
|
||||
return [fullPath];
|
||||
}
|
||||
return [];
|
||||
});
|
||||
};
|
||||
|
||||
const files = targets.flatMap((target) => collectFiles(path.join(projectRoot, target)));
|
||||
|
||||
const failures = [];
|
||||
|
||||
files.forEach((file) => {
|
||||
const result = spawnSync(process.execPath, ['--check', file], { stdio: 'inherit' });
|
||||
if (result.status !== 0) {
|
||||
failures.push(file);
|
||||
}
|
||||
});
|
||||
|
||||
if (failures.length > 0) {
|
||||
process.exit(1);
|
||||
}
|
||||
36
docs/template/bls-onoffline-backend/scripts/verify_data.js
vendored
Normal file
36
docs/template/bls-onoffline-backend/scripts/verify_data.js
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
|
||||
import { config } from '../src/config/config.js';
|
||||
import dbManager from '../src/db/databaseManager.js';
|
||||
import { logger } from '../src/utils/logger.js';
|
||||
|
||||
const verifyData = async () => {
|
||||
const client = await dbManager.pool.connect();
|
||||
try {
|
||||
console.log('Verifying data in database...');
|
||||
|
||||
// Count total rows
|
||||
const countSql = `SELECT count(*) FROM ${config.db.schema}.${config.db.table}`;
|
||||
const countRes = await client.query(countSql);
|
||||
console.log(`Total rows in ${config.db.schema}.${config.db.table}: ${countRes.rows[0].count}`);
|
||||
|
||||
// Check recent rows
|
||||
const recentSql = `
|
||||
SELECT * FROM ${config.db.schema}.${config.db.table}
|
||||
ORDER BY ts_ms DESC
|
||||
LIMIT 5
|
||||
`;
|
||||
const recentRes = await client.query(recentSql);
|
||||
console.log('Recent 5 rows:');
|
||||
recentRes.rows.forEach(row => {
|
||||
console.log(JSON.stringify(row));
|
||||
});
|
||||
|
||||
} catch (err) {
|
||||
console.error('Error verifying data:', err);
|
||||
} finally {
|
||||
client.release();
|
||||
await dbManager.pool.end();
|
||||
}
|
||||
};
|
||||
|
||||
verifyData();
|
||||
50
docs/template/bls-onoffline-backend/spec/onoffline-spec.md
vendored
Normal file
50
docs/template/bls-onoffline-backend/spec/onoffline-spec.md
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
bls-onoffline-backend 规格说明
|
||||
|
||||
1. Kafka 数据结构
|
||||
{
|
||||
"HotelCode": "1085",
|
||||
"MAC": "00:1A:2B:3C:4D:5E",
|
||||
"HostNumber": "091123987456",
|
||||
"RoomNumber": "8888房",
|
||||
"EndPoint": "50.2.60.1:6543",
|
||||
"CurrentStatus": "on",
|
||||
"CurrentTime": "2026-02-02T10:30:00Z",
|
||||
"UnixTime": 1770000235000,
|
||||
"LauncherVersion": "1.0.0",
|
||||
"RebootReason": "1"
|
||||
}
|
||||
|
||||
2. Kafka 主题
|
||||
Topic:blwlog4Nodejs-rcu-onoffline-topic
|
||||
|
||||
3. 数据库结构
|
||||
数据库:log_platform
|
||||
表:onoffline_record
|
||||
字段:
|
||||
guid varchar(32)
|
||||
ts_ms int8
|
||||
write_ts_ms int8
|
||||
hotel_id int2
|
||||
mac varchar(21)
|
||||
device_id varchar(64)
|
||||
room_id varchar(64)
|
||||
ip varchar(21)
|
||||
current_status varchar(10)
|
||||
launcher_version varchar(10)
|
||||
reboot_reason varchar(10)
|
||||
主键:(ts_ms, mac, device_id, room_id)
|
||||
按 ts_ms 每日分区
|
||||
|
||||
G5库结构(双写,临时接入):
|
||||
库同为:log_platform
|
||||
表:onoffline_record_g5
|
||||
差异字段:
|
||||
- guid 为 int4,由库自己生成。
|
||||
- record_source 固定为 CRICS。
|
||||
- current_status 为 int2,on映射为1,off映射为2,其余为0。
|
||||
支持通过环境变量开关双写。
|
||||
|
||||
4. 数据处理规则
|
||||
非重启数据:reboot_reason 为空或不存在,current_status 取 CurrentStatus
|
||||
重启数据:reboot_reason 不为空,current_status 固定为 on
|
||||
其余字段直接按 Kafka 原值落库,空值不补 0
|
||||
72
docs/template/bls-onoffline-backend/src/config/config.js
vendored
Normal file
72
docs/template/bls-onoffline-backend/src/config/config.js
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
import dotenv from 'dotenv';
|
||||
|
||||
dotenv.config();
|
||||
|
||||
const parseNumber = (value, defaultValue) => {
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) ? parsed : defaultValue;
|
||||
};
|
||||
|
||||
const parseList = (value) =>
|
||||
(value || '')
|
||||
.split(',')
|
||||
.map((item) => item.trim())
|
||||
.filter(Boolean);
|
||||
|
||||
export const config = {
|
||||
env: process.env.NODE_ENV || 'development',
|
||||
port: parseNumber(process.env.PORT, 3001),
|
||||
kafka: {
|
||||
brokers: parseList(process.env.KAFKA_BROKERS),
|
||||
topic: process.env.KAFKA_TOPIC || process.env.KAFKA_TOPICS || 'blwlog4Nodejs-rcu-onoffline-topic',
|
||||
groupId: process.env.KAFKA_GROUP_ID || 'bls-onoffline-group',
|
||||
clientId: process.env.KAFKA_CLIENT_ID || 'bls-onoffline-client',
|
||||
consumerInstances: parseNumber(process.env.KAFKA_CONSUMER_INSTANCES, 1),
|
||||
maxInFlight: parseNumber(process.env.KAFKA_MAX_IN_FLIGHT, 20000),
|
||||
fetchMaxBytes: parseNumber(process.env.KAFKA_FETCH_MAX_BYTES, 50 * 1024 * 1024),
|
||||
fetchMinBytes: parseNumber(process.env.KAFKA_FETCH_MIN_BYTES, 256 * 1024),
|
||||
fetchMaxWaitMs: parseNumber(process.env.KAFKA_FETCH_MAX_WAIT_MS, 100),
|
||||
autoCommitIntervalMs: parseNumber(process.env.KAFKA_AUTO_COMMIT_INTERVAL_MS, 5000),
|
||||
commitIntervalMs: parseNumber(process.env.KAFKA_COMMIT_INTERVAL_MS, 200),
|
||||
commitOnAttempt: process.env.KAFKA_COMMIT_ON_ATTEMPT === 'true',
|
||||
batchSize: parseNumber(process.env.KAFKA_BATCH_SIZE, 5000),
|
||||
batchTimeoutMs: parseNumber(process.env.KAFKA_BATCH_TIMEOUT_MS, 50),
|
||||
logMessages: process.env.KAFKA_LOG_MESSAGES === 'true',
|
||||
sasl: process.env.KAFKA_SASL_USERNAME && process.env.KAFKA_SASL_PASSWORD ? {
|
||||
mechanism: process.env.KAFKA_SASL_MECHANISM || 'plain',
|
||||
username: process.env.KAFKA_SASL_USERNAME,
|
||||
password: process.env.KAFKA_SASL_PASSWORD
|
||||
} : undefined
|
||||
},
|
||||
db: {
|
||||
host: process.env.DB_HOST || process.env.POSTGRES_HOST || 'localhost',
|
||||
port: parseNumber(process.env.DB_PORT || process.env.POSTGRES_PORT, 5432),
|
||||
user: process.env.DB_USER || process.env.POSTGRES_USER || 'postgres',
|
||||
password: process.env.DB_PASSWORD || process.env.POSTGRES_PASSWORD || '',
|
||||
database: process.env.DB_DATABASE || process.env.POSTGRES_DATABASE || 'log_platform',
|
||||
max: parseNumber(process.env.DB_MAX_CONNECTIONS || process.env.POSTGRES_MAX_CONNECTIONS, 10),
|
||||
ssl: process.env.DB_SSL === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||
schema: process.env.DB_SCHEMA || 'onoffline',
|
||||
table: process.env.DB_TABLE || 'onoffline_record'
|
||||
},
|
||||
g5db: {
|
||||
enabled: !!process.env.POSTGRES_HOST_G5,
|
||||
host: process.env.POSTGRES_HOST_G5,
|
||||
port: parseNumber(process.env.POSTGRES_PORT_G5, 5434),
|
||||
user: process.env.POSTGRES_USER_G5,
|
||||
password: process.env.POSTGRES_PASSWORD_G5,
|
||||
database: process.env.POSTGRES_DATABASE_G5,
|
||||
max: parseNumber(process.env.POSTGRES_MAX_CONNECTIONS_G5, 3),
|
||||
ssl: process.env.POSTGRES_SSL_G5 === 'true' ? { rejectUnauthorized: false } : undefined,
|
||||
schema: process.env.DB_SCHEMA_G5 || 'onoffline',
|
||||
table: process.env.DB_TABLE_G5 || 'onoffline_record_g5'
|
||||
},
|
||||
redis: {
|
||||
host: process.env.REDIS_HOST || 'localhost',
|
||||
port: parseNumber(process.env.REDIS_PORT, 6379),
|
||||
password: process.env.REDIS_PASSWORD || undefined,
|
||||
db: parseNumber(process.env.REDIS_DB, 0),
|
||||
projectName: process.env.REDIS_PROJECT_NAME || 'bls-onoffline',
|
||||
apiBaseUrl: process.env.REDIS_API_BASE_URL || `http://localhost:${parseNumber(process.env.PORT, 3001)}`
|
||||
}
|
||||
};
|
||||
108
docs/template/bls-onoffline-backend/src/db/databaseManager.js
vendored
Normal file
108
docs/template/bls-onoffline-backend/src/db/databaseManager.js
vendored
Normal file
@@ -0,0 +1,108 @@
|
||||
import pg from 'pg';
|
||||
import { config } from '../config/config.js';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const { Pool } = pg;
|
||||
|
||||
const columns = [
|
||||
'guid',
|
||||
'ts_ms',
|
||||
'write_ts_ms',
|
||||
'hotel_id',
|
||||
'mac',
|
||||
'device_id',
|
||||
'room_id',
|
||||
'ip',
|
||||
'current_status',
|
||||
'launcher_version',
|
||||
'reboot_reason'
|
||||
];
|
||||
|
||||
export class DatabaseManager {
|
||||
constructor(dbConfig) {
|
||||
this.pool = new Pool({
|
||||
host: dbConfig.host,
|
||||
port: dbConfig.port,
|
||||
user: dbConfig.user,
|
||||
password: dbConfig.password,
|
||||
database: dbConfig.database,
|
||||
max: dbConfig.max,
|
||||
ssl: dbConfig.ssl
|
||||
});
|
||||
}
|
||||
|
||||
async insertRows({ schema, table, rows }) {
|
||||
if (!rows || rows.length === 0) {
|
||||
return;
|
||||
}
|
||||
const statement = `
|
||||
INSERT INTO ${schema}.${table} (${columns.join(', ')})
|
||||
SELECT *
|
||||
FROM UNNEST(
|
||||
$1::text[],
|
||||
$2::int8[],
|
||||
$3::int8[],
|
||||
$4::int2[],
|
||||
$5::text[],
|
||||
$6::text[],
|
||||
$7::text[],
|
||||
$8::text[],
|
||||
$9::text[],
|
||||
$10::text[],
|
||||
$11::text[]
|
||||
)
|
||||
ON CONFLICT DO NOTHING
|
||||
`;
|
||||
try {
|
||||
const params = columns.map((column) => rows.map((row) => row[column] ?? null));
|
||||
await this.pool.query(statement, params);
|
||||
} catch (error) {
|
||||
logger.error('Database insert failed', {
|
||||
error: error?.message,
|
||||
schema,
|
||||
table,
|
||||
rowsLength: rows.length
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async checkConnection() {
|
||||
let client;
|
||||
try {
|
||||
const connectPromise = this.pool.connect();
|
||||
|
||||
// Create a timeout promise that rejects after 5000ms
|
||||
const timeoutPromise = new Promise((_, reject) => {
|
||||
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||
});
|
||||
|
||||
try {
|
||||
// Race the connection attempt against the timeout
|
||||
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||
} catch (raceError) {
|
||||
// If we timed out, the connectPromise might still resolve later.
|
||||
// We must ensure that if it does, the client is released back to the pool immediately.
|
||||
connectPromise.then(c => c.release()).catch(() => {});
|
||||
throw raceError;
|
||||
}
|
||||
|
||||
await client.query('SELECT 1');
|
||||
return true;
|
||||
} catch (err) {
|
||||
logger.error('Database check connection failed', { error: err.message });
|
||||
return false;
|
||||
} finally {
|
||||
if (client) {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async close() {
|
||||
await this.pool.end();
|
||||
}
|
||||
}
|
||||
|
||||
const dbManager = new DatabaseManager(config.db);
|
||||
export default dbManager;
|
||||
121
docs/template/bls-onoffline-backend/src/db/g5DatabaseManager.js
vendored
Normal file
121
docs/template/bls-onoffline-backend/src/db/g5DatabaseManager.js
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
import pg from 'pg';
|
||||
import { config } from '../config/config.js';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const { Pool } = pg;
|
||||
|
||||
const g5Columns = [
|
||||
'ts_ms',
|
||||
'write_ts_ms',
|
||||
'hotel_id',
|
||||
'mac',
|
||||
'device_id',
|
||||
'room_id',
|
||||
'ip',
|
||||
'current_status',
|
||||
'launcher_version',
|
||||
'reboot_reason',
|
||||
'record_source'
|
||||
];
|
||||
|
||||
export class G5DatabaseManager {
|
||||
constructor(dbConfig) {
|
||||
if (!dbConfig.enabled) return;
|
||||
this.pool = new Pool({
|
||||
host: dbConfig.host,
|
||||
port: dbConfig.port,
|
||||
user: dbConfig.user,
|
||||
password: dbConfig.password,
|
||||
database: dbConfig.database,
|
||||
max: dbConfig.max,
|
||||
ssl: dbConfig.ssl
|
||||
});
|
||||
}
|
||||
|
||||
async insertRows({ schema, table, rows }) {
|
||||
if (!this.pool || !rows || rows.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
const statement = `
|
||||
INSERT INTO ${schema}.${table} (${g5Columns.join(', ')})
|
||||
SELECT *
|
||||
FROM UNNEST(
|
||||
$1::int8[],
|
||||
$2::int8[],
|
||||
$3::int2[],
|
||||
$4::text[],
|
||||
$5::text[],
|
||||
$6::text[],
|
||||
$7::text[],
|
||||
$8::int2[],
|
||||
$9::text[],
|
||||
$10::text[],
|
||||
$11::text[]
|
||||
)
|
||||
ON CONFLICT DO NOTHING
|
||||
`;
|
||||
|
||||
try {
|
||||
const params = g5Columns.map((column) => {
|
||||
return rows.map((row) => {
|
||||
if (column === 'record_source') {
|
||||
return 'CRICS';
|
||||
}
|
||||
if (column === 'current_status') {
|
||||
// current_status in G5 is int2
|
||||
if (row.current_status === 'on') return 1;
|
||||
if (row.current_status === 'off') return 2;
|
||||
return 0;
|
||||
}
|
||||
return row[column] ?? null;
|
||||
});
|
||||
});
|
||||
|
||||
await this.pool.query(statement, params);
|
||||
} catch (error) {
|
||||
logger.error('G5 Database insert failed', {
|
||||
error: error?.message,
|
||||
schema,
|
||||
table,
|
||||
rowsLength: rows.length
|
||||
});
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
async checkConnection() {
|
||||
if (!this.pool) return true; // Pretend it's ok if disabled
|
||||
let client;
|
||||
try {
|
||||
const connectPromise = this.pool.connect();
|
||||
const timeoutPromise = new Promise((_, reject) => {
|
||||
setTimeout(() => reject(new Error('Connection timeout')), 5000);
|
||||
});
|
||||
try {
|
||||
client = await Promise.race([connectPromise, timeoutPromise]);
|
||||
} catch (raceError) {
|
||||
connectPromise.then(c => c.release()).catch(() => { });
|
||||
throw raceError;
|
||||
}
|
||||
await client.query('SELECT 1');
|
||||
return true;
|
||||
} catch (err) {
|
||||
logger.error('G5 Database check connection failed', { error: err.message });
|
||||
return false;
|
||||
} finally {
|
||||
if (client) {
|
||||
client.release();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async close() {
|
||||
if (this.pool) {
|
||||
await this.pool.end();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const g5DbManager = new G5DatabaseManager(config.g5db);
|
||||
export default g5DbManager;
|
||||
469
docs/template/bls-onoffline-backend/src/index.js
vendored
Normal file
469
docs/template/bls-onoffline-backend/src/index.js
vendored
Normal file
@@ -0,0 +1,469 @@
|
||||
import cron from 'node-cron';
|
||||
import { config } from './config/config.js';
|
||||
import dbManager from './db/databaseManager.js';
|
||||
import g5DbManager from './db/g5DatabaseManager.js';
|
||||
import { createKafkaConsumers } from './kafka/consumer.js';
|
||||
import { parseMessageToRows } from './processor/index.js';
|
||||
import { createRedisClient } from './redis/redisClient.js';
|
||||
import { RedisIntegration } from './redis/redisIntegration.js';
|
||||
import { buildErrorQueueKey, enqueueError, startErrorRetryWorker } from './redis/errorQueue.js';
|
||||
import { MetricCollector } from './utils/metricCollector.js';
|
||||
import { logger } from './utils/logger.js';
|
||||
|
||||
const bootstrap = async () => {
|
||||
// Log startup config (masked)
|
||||
logger.info('Starting application with config', {
|
||||
env: process.env.NODE_ENV,
|
||||
db: {
|
||||
host: config.db.host,
|
||||
port: config.db.port,
|
||||
user: config.db.user,
|
||||
database: config.db.database,
|
||||
schema: config.db.schema
|
||||
},
|
||||
kafka: {
|
||||
brokers: config.kafka.brokers,
|
||||
topic: config.kafka.topic,
|
||||
groupId: config.kafka.groupId
|
||||
},
|
||||
redis: {
|
||||
host: config.redis.host,
|
||||
port: config.redis.port
|
||||
}
|
||||
});
|
||||
|
||||
// Metric Collector
|
||||
const metricCollector = new MetricCollector();
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
// Moved after redisIntegration initialization
|
||||
|
||||
const redisClient = await createRedisClient(config.redis);
|
||||
const redisIntegration = new RedisIntegration(
|
||||
redisClient,
|
||||
config.redis.projectName,
|
||||
config.redis.apiBaseUrl
|
||||
);
|
||||
redisIntegration.startHeartbeat();
|
||||
|
||||
// 1.1 Setup Metric Reporting Cron Job (Every minute)
|
||||
cron.schedule('* * * * *', async () => {
|
||||
const metrics = metricCollector.getAndReset();
|
||||
const flushAvgMs = metrics.batch_flush_count > 0 ? (metrics.batch_flush_ms_sum / metrics.batch_flush_count).toFixed(1) : '0.0';
|
||||
const dbAvgMs = metrics.db_insert_count > 0 ? (metrics.db_insert_ms_sum / metrics.db_insert_count).toFixed(1) : '0.0';
|
||||
const report = `[Metrics] Pulled:${metrics.kafka_pulled} ParseErr:${metrics.parse_error} Inserted:${metrics.db_inserted} Failed:${metrics.db_failed} FlushAvg:${flushAvgMs}ms DbAvg:${dbAvgMs}ms`;
|
||||
console.log(report);
|
||||
logger.info(report);
|
||||
|
||||
try {
|
||||
await redisIntegration.info('Minute Metrics', metrics);
|
||||
} catch (err) {
|
||||
logger.error('Failed to report metrics to Redis', { error: err?.message });
|
||||
}
|
||||
});
|
||||
|
||||
const errorQueueKey = buildErrorQueueKey(config.redis.projectName);
|
||||
|
||||
const handleError = async (error, message) => {
|
||||
logger.error('Kafka processing error', {
|
||||
error: error?.message,
|
||||
type: error?.type,
|
||||
stack: error?.stack
|
||||
});
|
||||
try {
|
||||
await redisIntegration.error('Kafka processing error', {
|
||||
module: 'kafka',
|
||||
stack: error?.stack || error?.message
|
||||
});
|
||||
} catch (redisError) {
|
||||
logger.error('Redis error log failed', { error: redisError?.message });
|
||||
}
|
||||
if (message) {
|
||||
const messageValue = Buffer.isBuffer(message.value)
|
||||
? message.value.toString('utf8')
|
||||
: message.value;
|
||||
try {
|
||||
await enqueueError(redisClient, errorQueueKey, {
|
||||
attempts: 0,
|
||||
value: messageValue,
|
||||
meta: {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: message.offset,
|
||||
key: message.key
|
||||
},
|
||||
timestamp: Date.now()
|
||||
});
|
||||
} catch (enqueueError) {
|
||||
logger.error('Enqueue error payload failed', { error: enqueueError?.message });
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const configuredBatchSize = Number.isFinite(config.kafka.batchSize) ? config.kafka.batchSize : 1000;
|
||||
const configuredBatchTimeoutMs = Number.isFinite(config.kafka.batchTimeoutMs) ? config.kafka.batchTimeoutMs : 20;
|
||||
const configuredMaxInFlight = Number.isFinite(config.kafka.maxInFlight) ? config.kafka.maxInFlight : 5000;
|
||||
|
||||
const BATCH_SIZE = Math.max(10, Math.min(configuredBatchSize, configuredMaxInFlight));
|
||||
const BATCH_TIMEOUT_MS = Math.max(1, configuredBatchTimeoutMs);
|
||||
const commitOnAttempt = config.kafka.commitOnAttempt === true;
|
||||
|
||||
const batchStates = new Map();
|
||||
|
||||
const partitionKeyFromMessage = (message) => {
|
||||
if (message?.topic !== undefined && message?.partition !== undefined) {
|
||||
return `${message.topic}-${message.partition}`;
|
||||
}
|
||||
return 'retry';
|
||||
};
|
||||
|
||||
const dayKeyFromTsMs = (tsMs) => {
|
||||
const numeric = typeof tsMs === 'string' ? Number(tsMs) : tsMs;
|
||||
if (!Number.isFinite(numeric)) return null;
|
||||
const d = new Date(numeric);
|
||||
if (Number.isNaN(d.getTime())) return null;
|
||||
const yyyy = d.getFullYear();
|
||||
const mm = String(d.getMonth() + 1).padStart(2, '0');
|
||||
const dd = String(d.getDate()).padStart(2, '0');
|
||||
return `${yyyy}${mm}${dd}`;
|
||||
};
|
||||
|
||||
const getBatchState = (key) => {
|
||||
if (!batchStates.has(key)) {
|
||||
batchStates.set(key, { items: [], timer: null, flushing: null });
|
||||
}
|
||||
return batchStates.get(key);
|
||||
};
|
||||
|
||||
const isDbConnectionError = (err) => {
|
||||
const code = err?.code;
|
||||
if (typeof code === 'string') {
|
||||
const networkCodes = new Set([
|
||||
'ECONNREFUSED',
|
||||
'ECONNRESET',
|
||||
'EPIPE',
|
||||
'ETIMEDOUT',
|
||||
'ENOTFOUND',
|
||||
'EHOSTUNREACH',
|
||||
'ENETUNREACH',
|
||||
'57P03',
|
||||
'08006',
|
||||
'08001',
|
||||
'08000',
|
||||
'08003'
|
||||
]);
|
||||
if (networkCodes.has(code)) return true;
|
||||
}
|
||||
|
||||
const message = typeof err?.message === 'string' ? err.message : '';
|
||||
if (!message) return false;
|
||||
const lower = message.toLowerCase();
|
||||
return (
|
||||
lower.includes('connection timeout') ||
|
||||
lower.includes('connection terminated') ||
|
||||
lower.includes('connection refused') ||
|
||||
lower.includes('terminating connection') ||
|
||||
lower.includes('econnrefused') ||
|
||||
lower.includes('econnreset') ||
|
||||
lower.includes('etimedout') ||
|
||||
lower.includes('could not connect') ||
|
||||
lower.includes('the database system is starting up') ||
|
||||
lower.includes('no pg_hba.conf entry')
|
||||
);
|
||||
};
|
||||
|
||||
const insertRowsWithRetry = async (rows) => {
|
||||
const startedAt = Date.now();
|
||||
while (true) {
|
||||
try {
|
||||
const promises = [
|
||||
dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows })
|
||||
];
|
||||
if (config.g5db.enabled) {
|
||||
promises.push(g5DbManager.insertRows({ schema: config.g5db.schema, table: config.g5db.table, rows }).catch(e => {
|
||||
logger.error('G5 Database insert failed but non-blocking', { error: e.message });
|
||||
}));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
|
||||
metricCollector.increment('db_insert_count', 1);
|
||||
metricCollector.increment('db_insert_ms_sum', Date.now() - startedAt);
|
||||
return;
|
||||
} catch (err) {
|
||||
if (isDbConnectionError(err)) {
|
||||
logger.error('Database offline during batch insert. Retrying in 5s...', { error: err.message });
|
||||
await new Promise(r => setTimeout(r, 5000));
|
||||
while (!(await dbManager.checkConnection())) {
|
||||
logger.warn('Database still offline. Waiting 5s...');
|
||||
await new Promise(r => setTimeout(r, 5000));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
const insertRowsOnce = async (rows) => {
|
||||
const startedAt = Date.now();
|
||||
const promises = [
|
||||
dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows })
|
||||
];
|
||||
if (config.g5db.enabled) {
|
||||
promises.push(g5DbManager.insertRows({ schema: config.g5db.schema, table: config.g5db.table, rows }).catch(e => {
|
||||
logger.error('G5 Database insert failed in insertOnce', { error: e.message });
|
||||
}));
|
||||
}
|
||||
await Promise.all(promises);
|
||||
metricCollector.increment('db_insert_count', 1);
|
||||
metricCollector.increment('db_insert_ms_sum', Date.now() - startedAt);
|
||||
};
|
||||
|
||||
const resolveInsertedItems = (partitionKey, items) => {
|
||||
let insertedRows = 0;
|
||||
for (const p of items) {
|
||||
insertedRows += p.rows.length;
|
||||
const dayKey = dayKeyFromTsMs(p.rows?.[0]?.ts_ms);
|
||||
if (dayKey) {
|
||||
metricCollector.incrementKeyed('db_inserted_by_day', dayKey, p.rows.length);
|
||||
}
|
||||
p.item.resolve();
|
||||
}
|
||||
metricCollector.increment('db_inserted', insertedRows);
|
||||
metricCollector.incrementKeyed('db_inserted_by_partition', partitionKey, insertedRows);
|
||||
};
|
||||
|
||||
const handleFailedItem = async (partitionKey, p, err) => {
|
||||
metricCollector.increment('db_failed');
|
||||
metricCollector.incrementKeyed('db_failed_by_partition', partitionKey, 1);
|
||||
const dayKey = dayKeyFromTsMs(p.rows?.[0]?.ts_ms);
|
||||
if (dayKey) {
|
||||
metricCollector.incrementKeyed('db_failed_by_day', dayKey, 1);
|
||||
}
|
||||
await handleError(err, p.item.message);
|
||||
p.item.resolve();
|
||||
};
|
||||
|
||||
const insertItemsDegraded = async (partitionKey, items) => {
|
||||
if (items.length === 0) return;
|
||||
const rows = items.flatMap(p => p.rows);
|
||||
if (commitOnAttempt) {
|
||||
try {
|
||||
await insertRowsOnce(rows);
|
||||
resolveInsertedItems(partitionKey, items);
|
||||
} catch (err) {
|
||||
for (const item of items) {
|
||||
await handleFailedItem(partitionKey, item, err);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
try {
|
||||
await insertRowsWithRetry(rows);
|
||||
resolveInsertedItems(partitionKey, items);
|
||||
return;
|
||||
} catch (err) {
|
||||
if (items.length === 1) {
|
||||
try {
|
||||
await insertRowsWithRetry(items[0].rows);
|
||||
resolveInsertedItems(partitionKey, items);
|
||||
} catch (innerErr) {
|
||||
await handleFailedItem(partitionKey, items[0], innerErr);
|
||||
}
|
||||
return;
|
||||
}
|
||||
const mid = Math.floor(items.length / 2);
|
||||
await insertItemsDegraded(partitionKey, items.slice(0, mid));
|
||||
await insertItemsDegraded(partitionKey, items.slice(mid));
|
||||
}
|
||||
};
|
||||
|
||||
const flushBatchForKey = async (partitionKey) => {
|
||||
const state = getBatchState(partitionKey);
|
||||
if (state.flushing) return state.flushing;
|
||||
|
||||
state.flushing = (async () => {
|
||||
if (state.timer) {
|
||||
clearTimeout(state.timer);
|
||||
state.timer = null;
|
||||
}
|
||||
|
||||
if (state.items.length === 0) return;
|
||||
|
||||
const startedAt = Date.now();
|
||||
const currentBatch = state.items;
|
||||
state.items = [];
|
||||
|
||||
const pendingDbItems = [];
|
||||
const unresolvedItems = [];
|
||||
|
||||
try {
|
||||
for (const item of currentBatch) {
|
||||
try {
|
||||
const rows = parseMessageToRows(item.message);
|
||||
pendingDbItems.push({ item, rows });
|
||||
unresolvedItems.push(item);
|
||||
} catch (err) {
|
||||
metricCollector.increment('parse_error');
|
||||
metricCollector.incrementKeyed('parse_error_by_partition', partitionKey, 1);
|
||||
logger.error('Message processing failed (Parse/Validation)', { error: err.message });
|
||||
await handleError(err, item.message);
|
||||
item.resolve();
|
||||
}
|
||||
}
|
||||
|
||||
if (pendingDbItems.length > 0) {
|
||||
const firstTs = pendingDbItems[0]?.rows?.[0]?.ts_ms;
|
||||
const dayKey = dayKeyFromTsMs(firstTs);
|
||||
if (dayKey) {
|
||||
const dayStartMs = Date.now();
|
||||
await insertItemsDegraded(partitionKey, pendingDbItems);
|
||||
metricCollector.incrementKeyed('db_insert_ms_sum_by_day', dayKey, Date.now() - dayStartMs);
|
||||
} else {
|
||||
await insertItemsDegraded(partitionKey, pendingDbItems);
|
||||
}
|
||||
}
|
||||
|
||||
metricCollector.increment('batch_flush_count', 1);
|
||||
metricCollector.increment('batch_flush_ms_sum', Date.now() - startedAt);
|
||||
} catch (err) {
|
||||
if (!commitOnAttempt && isDbConnectionError(err)) {
|
||||
state.items = unresolvedItems.concat(state.items);
|
||||
if (!state.timer) {
|
||||
state.timer = setTimeout(() => {
|
||||
state.timer = null;
|
||||
flushBatchForKey(partitionKey);
|
||||
}, 5000);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
logger.error('Batch flush failed (non-network). Marking as consumed', {
|
||||
error: err?.message,
|
||||
partitionKey,
|
||||
batchSize: currentBatch.length
|
||||
});
|
||||
|
||||
for (const item of unresolvedItems) {
|
||||
try {
|
||||
await handleError(err, item.message);
|
||||
} catch { }
|
||||
item.resolve();
|
||||
}
|
||||
}
|
||||
})().finally(() => {
|
||||
state.flushing = null;
|
||||
if (state.items.length > 0) {
|
||||
if (state.items.length >= BATCH_SIZE) {
|
||||
flushBatchForKey(partitionKey);
|
||||
} else if (!state.timer) {
|
||||
state.timer = setTimeout(() => {
|
||||
state.timer = null;
|
||||
flushBatchForKey(partitionKey);
|
||||
}, BATCH_TIMEOUT_MS);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
return state.flushing;
|
||||
};
|
||||
|
||||
const handleMessage = (message) => {
|
||||
if (message.topic) {
|
||||
metricCollector.increment('kafka_pulled');
|
||||
metricCollector.incrementKeyed('kafka_pulled_by_partition', `${message.topic}-${message.partition}`, 1);
|
||||
}
|
||||
|
||||
// const messageValue = Buffer.isBuffer(message.value)
|
||||
// ? message.value.toString('utf8')
|
||||
// : message.value;
|
||||
// const messageKey = Buffer.isBuffer(message.key)
|
||||
// ? message.key.toString('utf8')
|
||||
// : message.key;
|
||||
|
||||
// const logDetails = {
|
||||
// topic: message.topic,
|
||||
// partition: message.partition,
|
||||
// offset: message.offset,
|
||||
// key: messageKey,
|
||||
// value: config.kafka.logMessages ? messageValue : undefined,
|
||||
// valueLength: !config.kafka.logMessages && typeof messageValue === 'string' ? messageValue.length : null
|
||||
// };
|
||||
|
||||
// logger.info('Kafka message received', logDetails);
|
||||
|
||||
const partitionKey = partitionKeyFromMessage(message);
|
||||
const state = getBatchState(partitionKey);
|
||||
|
||||
return new Promise((resolve, reject) => {
|
||||
state.items.push({ message, resolve, reject });
|
||||
if (state.items.length >= BATCH_SIZE) {
|
||||
flushBatchForKey(partitionKey);
|
||||
} else if (!state.timer) {
|
||||
state.timer = setTimeout(() => {
|
||||
state.timer = null;
|
||||
flushBatchForKey(partitionKey);
|
||||
}, BATCH_TIMEOUT_MS);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
const consumers = createKafkaConsumers({
|
||||
kafkaConfig: config.kafka,
|
||||
onMessage: handleMessage,
|
||||
onError: handleError
|
||||
});
|
||||
|
||||
// Start retry worker (non-blocking)
|
||||
startErrorRetryWorker({
|
||||
client: redisClient,
|
||||
queueKey: errorQueueKey,
|
||||
redisIntegration,
|
||||
handler: async (item) => {
|
||||
if (!item?.value) {
|
||||
throw new Error('Missing value in retry payload');
|
||||
}
|
||||
await handleMessage({ value: item.value });
|
||||
}
|
||||
}).catch(err => {
|
||||
logger.error('Retry worker failed', { error: err?.message });
|
||||
});
|
||||
|
||||
// Graceful Shutdown Logic
|
||||
const shutdown = async (signal) => {
|
||||
logger.info(`Received ${signal}, shutting down...`);
|
||||
|
||||
try {
|
||||
// 1. Close Kafka Consumer
|
||||
if (consumers && consumers.length > 0) {
|
||||
await Promise.all(consumers.map(c => new Promise((resolve) => c.close(true, resolve))));
|
||||
logger.info('Kafka consumer closed', { count: consumers.length });
|
||||
}
|
||||
|
||||
// 2. Stop Redis Heartbeat (if method exists, otherwise just close client)
|
||||
// redisIntegration.stopHeartbeat(); // Assuming implementation or just rely on client close
|
||||
|
||||
// 3. Close Redis Client
|
||||
await redisClient.quit();
|
||||
logger.info('Redis client closed');
|
||||
|
||||
// 4. Close Database Pools
|
||||
await dbManager.close();
|
||||
await g5DbManager.close();
|
||||
logger.info('Database connection closed');
|
||||
|
||||
process.exit(0);
|
||||
} catch (err) {
|
||||
logger.error('Error during shutdown', { error: err?.message });
|
||||
process.exit(1);
|
||||
}
|
||||
};
|
||||
|
||||
process.on('SIGTERM', () => shutdown('SIGTERM'));
|
||||
process.on('SIGINT', () => shutdown('SIGINT'));
|
||||
};
|
||||
|
||||
bootstrap().catch((error) => {
|
||||
logger.error('Service bootstrap failed', { error: error?.message });
|
||||
process.exit(1);
|
||||
});
|
||||
175
docs/template/bls-onoffline-backend/src/kafka/consumer.js
vendored
Normal file
175
docs/template/bls-onoffline-backend/src/kafka/consumer.js
vendored
Normal file
@@ -0,0 +1,175 @@
|
||||
import kafka from 'kafka-node';
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
const { ConsumerGroup } = kafka;
|
||||
|
||||
import { OffsetTracker } from './offsetTracker.js';
|
||||
|
||||
const createOneConsumer = ({ kafkaConfig, onMessage, onError, instanceIndex }) => {
|
||||
const kafkaHost = kafkaConfig.brokers.join(',');
|
||||
const clientId = instanceIndex === 0 ? kafkaConfig.clientId : `${kafkaConfig.clientId}-${instanceIndex}`;
|
||||
const id = `${clientId}-${process.pid}-${Date.now()}`;
|
||||
const maxInFlight = Number.isFinite(kafkaConfig.maxInFlight) ? kafkaConfig.maxInFlight : 5000;
|
||||
const commitIntervalMs = Number.isFinite(kafkaConfig.commitIntervalMs) ? kafkaConfig.commitIntervalMs : 200;
|
||||
let inFlight = 0;
|
||||
|
||||
const tracker = new OffsetTracker();
|
||||
let pendingCommits = new Map(); // key: `${topic}-${partition}` -> { topic, partition, offset }
|
||||
let commitTimer = null;
|
||||
|
||||
const flushCommits = () => {
|
||||
if (pendingCommits.size === 0) return;
|
||||
const batch = pendingCommits;
|
||||
pendingCommits = new Map();
|
||||
|
||||
consumer.sendOffsetCommitRequest(
|
||||
Array.from(batch.values()),
|
||||
(err) => {
|
||||
if (err) {
|
||||
for (const [k, v] of batch.entries()) {
|
||||
pendingCommits.set(k, v);
|
||||
}
|
||||
logger.error('Kafka commit failed', { error: err?.message, count: batch.size });
|
||||
}
|
||||
}
|
||||
);
|
||||
};
|
||||
|
||||
const scheduleCommitFlush = () => {
|
||||
if (commitTimer) return;
|
||||
commitTimer = setTimeout(() => {
|
||||
commitTimer = null;
|
||||
flushCommits();
|
||||
}, commitIntervalMs);
|
||||
};
|
||||
|
||||
const consumer = new ConsumerGroup(
|
||||
{
|
||||
kafkaHost,
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId,
|
||||
id,
|
||||
fromOffset: 'earliest',
|
||||
protocol: ['roundrobin'],
|
||||
outOfRangeOffset: 'latest',
|
||||
autoCommit: false,
|
||||
autoCommitIntervalMs: kafkaConfig.autoCommitIntervalMs,
|
||||
fetchMaxBytes: kafkaConfig.fetchMaxBytes,
|
||||
fetchMinBytes: kafkaConfig.fetchMinBytes,
|
||||
fetchMaxWaitMs: kafkaConfig.fetchMaxWaitMs,
|
||||
sasl: kafkaConfig.sasl
|
||||
},
|
||||
kafkaConfig.topic
|
||||
);
|
||||
|
||||
const tryResume = () => {
|
||||
if (inFlight < maxInFlight && consumer.paused) {
|
||||
consumer.resume();
|
||||
}
|
||||
};
|
||||
|
||||
consumer.on('message', (message) => {
|
||||
inFlight += 1;
|
||||
tracker.add(message.topic, message.partition, message.offset);
|
||||
|
||||
if (inFlight >= maxInFlight) {
|
||||
consumer.pause();
|
||||
}
|
||||
Promise.resolve(onMessage(message))
|
||||
.then(() => {})
|
||||
.catch((error) => {
|
||||
logger.error('Kafka message handling failed', { error: error?.message });
|
||||
if (onError) {
|
||||
onError(error, message);
|
||||
}
|
||||
})
|
||||
.finally(() => {
|
||||
const commitOffset = tracker.markDone(message.topic, message.partition, message.offset);
|
||||
if (commitOffset !== null) {
|
||||
const key = `${message.topic}-${message.partition}`;
|
||||
pendingCommits.set(key, {
|
||||
topic: message.topic,
|
||||
partition: message.partition,
|
||||
offset: commitOffset,
|
||||
metadata: 'm'
|
||||
});
|
||||
scheduleCommitFlush();
|
||||
}
|
||||
inFlight -= 1;
|
||||
tryResume();
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('error', (error) => {
|
||||
logger.error('Kafka consumer error', { error: error?.message });
|
||||
if (onError) {
|
||||
onError(error);
|
||||
}
|
||||
});
|
||||
|
||||
consumer.on('connect', () => {
|
||||
logger.info(`Kafka Consumer connected`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('rebalancing', () => {
|
||||
logger.info(`Kafka Consumer rebalancing`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
tracker.clear();
|
||||
pendingCommits.clear();
|
||||
if (commitTimer) {
|
||||
clearTimeout(commitTimer);
|
||||
commitTimer = null;
|
||||
}
|
||||
});
|
||||
|
||||
consumer.on('rebalanced', () => {
|
||||
logger.info('Kafka Consumer rebalanced', { clientId, groupId: kafkaConfig.groupId });
|
||||
});
|
||||
|
||||
consumer.on('error', (err) => {
|
||||
logger.error('Kafka Consumer Error', { error: err.message });
|
||||
});
|
||||
|
||||
consumer.on('offsetOutOfRange', (err) => {
|
||||
logger.warn('Offset out of range', { error: err.message, topic: err.topic, partition: err.partition });
|
||||
});
|
||||
|
||||
|
||||
consumer.on('offsetOutOfRange', (error) => {
|
||||
logger.warn(`Kafka Consumer offset out of range`, {
|
||||
error: error?.message,
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
consumer.on('close', () => {
|
||||
if (commitTimer) {
|
||||
clearTimeout(commitTimer);
|
||||
commitTimer = null;
|
||||
}
|
||||
flushCommits();
|
||||
logger.warn(`Kafka Consumer closed`, {
|
||||
groupId: kafkaConfig.groupId,
|
||||
clientId: clientId
|
||||
});
|
||||
});
|
||||
|
||||
return consumer;
|
||||
};
|
||||
|
||||
export const createKafkaConsumers = ({ kafkaConfig, onMessage, onError }) => {
|
||||
const instances = Number.isFinite(kafkaConfig.consumerInstances) ? kafkaConfig.consumerInstances : 1;
|
||||
const count = Math.max(1, instances);
|
||||
return Array.from({ length: count }, (_, idx) =>
|
||||
createOneConsumer({ kafkaConfig, onMessage, onError, instanceIndex: idx })
|
||||
);
|
||||
};
|
||||
|
||||
export const createKafkaConsumer = ({ kafkaConfig, onMessage, onError }) =>
|
||||
createKafkaConsumers({ kafkaConfig, onMessage, onError })[0];
|
||||
53
docs/template/bls-onoffline-backend/src/kafka/offsetTracker.js
vendored
Normal file
53
docs/template/bls-onoffline-backend/src/kafka/offsetTracker.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
export class OffsetTracker {
|
||||
constructor() {
|
||||
// Map<topic-partition, { nextCommitOffset: number|null, done: Set<number> }>
|
||||
this.partitions = new Map();
|
||||
}
|
||||
|
||||
// Called when a message is received (before processing)
|
||||
add(topic, partition, offset) {
|
||||
const key = `${topic}-${partition}`;
|
||||
if (!this.partitions.has(key)) {
|
||||
this.partitions.set(key, { nextCommitOffset: null, done: new Set() });
|
||||
}
|
||||
const state = this.partitions.get(key);
|
||||
const numericOffset = Number(offset);
|
||||
if (!Number.isFinite(numericOffset)) return;
|
||||
if (state.nextCommitOffset === null) {
|
||||
state.nextCommitOffset = numericOffset;
|
||||
} else if (numericOffset < state.nextCommitOffset) {
|
||||
state.nextCommitOffset = numericOffset;
|
||||
}
|
||||
}
|
||||
|
||||
// Called when a message is successfully processed
|
||||
// Returns the next offset to commit (if any advancement is possible), or null
|
||||
markDone(topic, partition, offset) {
|
||||
const key = `${topic}-${partition}`;
|
||||
const state = this.partitions.get(key);
|
||||
if (!state) return null;
|
||||
|
||||
const numericOffset = Number(offset);
|
||||
if (!Number.isFinite(numericOffset)) return null;
|
||||
|
||||
state.done.add(numericOffset);
|
||||
|
||||
if (state.nextCommitOffset === null) {
|
||||
state.nextCommitOffset = numericOffset;
|
||||
}
|
||||
|
||||
let advanced = false;
|
||||
while (state.nextCommitOffset !== null && state.done.has(state.nextCommitOffset)) {
|
||||
state.done.delete(state.nextCommitOffset);
|
||||
state.nextCommitOffset += 1;
|
||||
advanced = true;
|
||||
}
|
||||
|
||||
if (!advanced) return null;
|
||||
return state.nextCommitOffset;
|
||||
}
|
||||
|
||||
clear() {
|
||||
this.partitions.clear();
|
||||
}
|
||||
}
|
||||
142
docs/template/bls-onoffline-backend/src/processor/index.js
vendored
Normal file
142
docs/template/bls-onoffline-backend/src/processor/index.js
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
import { createGuid } from '../utils/uuid.js';
|
||||
import { kafkaPayloadSchema } from '../schema/kafkaPayload.js';
|
||||
|
||||
const parseKafkaPayload = (value) => {
|
||||
const raw = Buffer.isBuffer(value) ? value.toString('utf8') : value;
|
||||
if (typeof raw !== 'string') {
|
||||
throw new Error('Invalid kafka message value');
|
||||
}
|
||||
return JSON.parse(raw);
|
||||
};
|
||||
|
||||
const normalizeText = (value, maxLength) => {
|
||||
if (value === undefined || value === null) {
|
||||
return null;
|
||||
}
|
||||
const str = String(value);
|
||||
if (maxLength && str.length > maxLength) {
|
||||
return str.substring(0, maxLength);
|
||||
}
|
||||
return str;
|
||||
};
|
||||
|
||||
export const buildRowsFromMessageValue = (value) => {
|
||||
const payload = parseKafkaPayload(value);
|
||||
return buildRowsFromPayload(payload);
|
||||
};
|
||||
|
||||
export const buildRowsFromPayload = (rawPayload) => {
|
||||
const payload = kafkaPayloadSchema.parse(rawPayload);
|
||||
|
||||
// Database limit is VARCHAR(255)
|
||||
const rebootReason = normalizeText(payload.RebootReason, 255);
|
||||
const currentStatusRaw = normalizeText(payload.CurrentStatus, 255);
|
||||
const hasRebootReason = rebootReason !== null && rebootReason !== '';
|
||||
const currentStatus = hasRebootReason ? 'on' : currentStatusRaw;
|
||||
|
||||
// Derive timestamp: UnixTime -> CurrentTime -> Date.now()
|
||||
let tsMs = payload.UnixTime;
|
||||
|
||||
// Heuristic: If timestamp is small (e.g., < 100000000000), assume it's seconds and convert to ms
|
||||
if (typeof tsMs === 'number' && tsMs < 100000000000) {
|
||||
tsMs = tsMs * 1000;
|
||||
}
|
||||
|
||||
if (!tsMs && payload.CurrentTime) {
|
||||
const parsed = Date.parse(payload.CurrentTime);
|
||||
if (!isNaN(parsed)) {
|
||||
tsMs = parsed;
|
||||
}
|
||||
}
|
||||
if (!tsMs) {
|
||||
tsMs = Date.now();
|
||||
}
|
||||
|
||||
// Ensure PK fields are not null
|
||||
const mac = normalizeText(payload.MAC) || '';
|
||||
const deviceId = normalizeText(payload.HostNumber) || '';
|
||||
const roomId = normalizeText(payload.RoomNumber) || '';
|
||||
|
||||
// Handle hotel_id boundary for PostgreSQL smallint (-32768 to 32767)
|
||||
let hotelId = payload.HotelCode;
|
||||
if (typeof hotelId !== 'number' || Number.isNaN(hotelId) || hotelId < -32768 || hotelId > 32767) {
|
||||
hotelId = 0;
|
||||
}
|
||||
|
||||
const row = {
|
||||
guid: createGuid(),
|
||||
ts_ms: tsMs,
|
||||
write_ts_ms: Date.now(),
|
||||
hotel_id: hotelId,
|
||||
mac: mac,
|
||||
device_id: deviceId,
|
||||
room_id: roomId,
|
||||
ip: normalizeText(payload.EndPoint),
|
||||
current_status: currentStatus,
|
||||
launcher_version: normalizeText(payload.LauncherVersion, 255),
|
||||
reboot_reason: rebootReason
|
||||
};
|
||||
|
||||
return [row];
|
||||
};
|
||||
|
||||
export const parseMessageToRows = (message) => {
|
||||
const rawValue = message.value.toString();
|
||||
// logger.info('Processing message', { offset: message.offset, rawValuePreview: rawValue.substring(0, 100) });
|
||||
|
||||
let payload;
|
||||
try {
|
||||
payload = JSON.parse(rawValue);
|
||||
} catch (e) {
|
||||
const error = new Error(`JSON Parse Error: ${e.message}`);
|
||||
error.type = 'PARSE_ERROR';
|
||||
throw error;
|
||||
}
|
||||
|
||||
// logger.info('Payload parsed', { payload });
|
||||
|
||||
const validationResult = kafkaPayloadSchema.safeParse(payload);
|
||||
|
||||
if (!validationResult.success) {
|
||||
const error = new Error(`Schema Validation Failed: ${JSON.stringify(validationResult.error.errors)}`);
|
||||
error.type = 'VALIDATION_ERROR';
|
||||
throw error;
|
||||
}
|
||||
|
||||
return buildRowsFromPayload(payload);
|
||||
};
|
||||
|
||||
export const processKafkaMessage = async ({ message, dbManager, config }) => {
|
||||
let rows;
|
||||
try {
|
||||
rows = parseMessageToRows(message);
|
||||
} catch (error) {
|
||||
throw error;
|
||||
}
|
||||
|
||||
try {
|
||||
await dbManager.insertRows({ schema: config.db.schema, table: config.db.table, rows });
|
||||
// if (rows.length > 0) {
|
||||
// console.log(`Inserted ${rows.length} rows. Sample GUID: ${rows[0].guid}, TS: ${rows[0].ts_ms}`);
|
||||
// }
|
||||
} catch (error) {
|
||||
error.type = 'DB_ERROR';
|
||||
const sample = rows?.[0];
|
||||
error.dbContext = {
|
||||
rowsLength: rows?.length || 0,
|
||||
sampleRow: sample
|
||||
? {
|
||||
guid: sample.guid,
|
||||
ts_ms: sample.ts_ms,
|
||||
mac: sample.mac,
|
||||
device_id: sample.device_id,
|
||||
room_id: sample.room_id,
|
||||
current_status: sample.current_status
|
||||
}
|
||||
: null
|
||||
};
|
||||
throw error;
|
||||
}
|
||||
|
||||
return rows.length;
|
||||
};
|
||||
83
docs/template/bls-onoffline-backend/src/processor/udpParser.js
vendored
Normal file
83
docs/template/bls-onoffline-backend/src/processor/udpParser.js
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
const normalizeHex = (hex) => {
|
||||
if (typeof hex !== 'string') {
|
||||
return '';
|
||||
}
|
||||
let cleaned = hex.trim().replace(/^0x/i, '').replace(/\s+/g, '');
|
||||
if (cleaned.length % 2 === 1) {
|
||||
cleaned = `0${cleaned}`;
|
||||
}
|
||||
return cleaned;
|
||||
};
|
||||
|
||||
const toHex = (value) => `0x${value.toString(16).padStart(2, '0')}`;
|
||||
|
||||
const readUInt16 = (buffer, offset) => buffer.readUInt16BE(offset);
|
||||
|
||||
export const parse0x36 = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const sysLockStatus = buffer.length > 0 ? buffer[0] : null;
|
||||
const reportCount = buffer.length > 7 ? buffer[7] : null;
|
||||
let offset = 8;
|
||||
const devices = [];
|
||||
for (let i = 0; i < (reportCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
devices.push({
|
||||
dev_type: buffer[offset],
|
||||
dev_addr: buffer[offset + 1],
|
||||
dev_loop: readUInt16(buffer, offset + 2),
|
||||
dev_data: readUInt16(buffer, offset + 4)
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
const faultCount = offset < buffer.length ? buffer[offset] : null;
|
||||
offset += 1;
|
||||
const faults = [];
|
||||
for (let i = 0; i < (faultCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
faults.push({
|
||||
fault_dev_type: buffer[offset],
|
||||
fault_dev_addr: buffer[offset + 1],
|
||||
fault_dev_loop: readUInt16(buffer, offset + 2),
|
||||
error_type: buffer[offset + 4],
|
||||
error_data: buffer[offset + 5]
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
return {
|
||||
sysLockStatus,
|
||||
reportCount,
|
||||
faultCount,
|
||||
devices,
|
||||
faults
|
||||
};
|
||||
};
|
||||
|
||||
export const parse0x0fDownlink = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const controlCount = buffer.length > 0 ? buffer[0] : null;
|
||||
let offset = 1;
|
||||
const controlParams = [];
|
||||
for (let i = 0; i < (controlCount || 0) && offset + 5 < buffer.length; i += 1) {
|
||||
const typeValue = readUInt16(buffer, offset + 4);
|
||||
controlParams.push({
|
||||
dev_type: buffer[offset],
|
||||
dev_addr: buffer[offset + 1],
|
||||
loop: readUInt16(buffer, offset + 2),
|
||||
type: typeValue,
|
||||
type_l: buffer[offset + 4],
|
||||
type_h: buffer[offset + 5]
|
||||
});
|
||||
offset += 6;
|
||||
}
|
||||
return {
|
||||
controlCount,
|
||||
controlParams
|
||||
};
|
||||
};
|
||||
|
||||
export const parse0x0fAck = (udpRaw) => {
|
||||
const cleaned = normalizeHex(udpRaw);
|
||||
const buffer = cleaned ? Buffer.from(cleaned, 'hex') : Buffer.alloc(0);
|
||||
const ackCode = buffer.length > 1 ? toHex(buffer[1]) : null;
|
||||
return { ackCode };
|
||||
};
|
||||
53
docs/template/bls-onoffline-backend/src/redis/errorQueue.js
vendored
Normal file
53
docs/template/bls-onoffline-backend/src/redis/errorQueue.js
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
import { logger } from '../utils/logger.js';
|
||||
|
||||
export const buildErrorQueueKey = (projectName) => `${projectName}_error_queue`;
|
||||
|
||||
export const enqueueError = async (client, queueKey, payload) => {
|
||||
try {
|
||||
await client.rPush(queueKey, JSON.stringify(payload));
|
||||
} catch (error) {
|
||||
logger.error('Redis enqueue error failed', { error: error?.message });
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
export const startErrorRetryWorker = async ({
|
||||
client,
|
||||
queueKey,
|
||||
handler,
|
||||
redisIntegration,
|
||||
maxAttempts = 5
|
||||
}) => {
|
||||
while (true) {
|
||||
const result = await client.blPop(queueKey, 0);
|
||||
const raw = result?.element;
|
||||
if (!raw) {
|
||||
continue;
|
||||
}
|
||||
let item;
|
||||
try {
|
||||
item = JSON.parse(raw);
|
||||
} catch (error) {
|
||||
logger.error('Invalid error payload', { error: error?.message });
|
||||
await redisIntegration.error('Invalid error payload', { module: 'redis', stack: error?.message });
|
||||
continue;
|
||||
}
|
||||
const attempts = item.attempts || 0;
|
||||
try {
|
||||
await handler(item);
|
||||
} catch (error) {
|
||||
logger.error('Retry handler failed', { error: error?.message, stack: error?.stack });
|
||||
const nextPayload = {
|
||||
...item,
|
||||
attempts: attempts + 1,
|
||||
lastError: error?.message,
|
||||
lastAttemptAt: Date.now()
|
||||
};
|
||||
if (nextPayload.attempts >= maxAttempts) {
|
||||
await redisIntegration.error('Retry attempts exceeded', { module: 'retry', stack: JSON.stringify(nextPayload) });
|
||||
} else {
|
||||
await enqueueError(client, queueKey, nextPayload);
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
14
docs/template/bls-onoffline-backend/src/redis/redisClient.js
vendored
Normal file
14
docs/template/bls-onoffline-backend/src/redis/redisClient.js
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
import { createClient } from 'redis';
|
||||
|
||||
export const createRedisClient = async (config) => {
|
||||
const client = createClient({
|
||||
socket: {
|
||||
host: config.host,
|
||||
port: config.port
|
||||
},
|
||||
password: config.password,
|
||||
database: config.db
|
||||
});
|
||||
await client.connect();
|
||||
return client;
|
||||
};
|
||||
40
docs/template/bls-onoffline-backend/src/redis/redisIntegration.js
vendored
Normal file
40
docs/template/bls-onoffline-backend/src/redis/redisIntegration.js
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
export class RedisIntegration {
|
||||
constructor(client, projectName, apiBaseUrl) {
|
||||
this.client = client;
|
||||
this.projectName = projectName;
|
||||
this.apiBaseUrl = apiBaseUrl;
|
||||
this.heartbeatKey = '项目心跳';
|
||||
this.logKey = `${projectName}_项目控制台`;
|
||||
}
|
||||
|
||||
async info(message, context) {
|
||||
const payload = {
|
||||
timestamp: new Date().toISOString(),
|
||||
level: 'info',
|
||||
message,
|
||||
metadata: context || undefined
|
||||
};
|
||||
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||
}
|
||||
|
||||
async error(message, context) {
|
||||
const payload = {
|
||||
timestamp: new Date().toISOString(),
|
||||
level: 'error',
|
||||
message,
|
||||
metadata: context || undefined
|
||||
};
|
||||
await this.client.rPush(this.logKey, JSON.stringify(payload));
|
||||
}
|
||||
|
||||
startHeartbeat() {
|
||||
setInterval(() => {
|
||||
const payload = {
|
||||
projectName: this.projectName,
|
||||
apiBaseUrl: this.apiBaseUrl,
|
||||
lastActiveAt: Date.now()
|
||||
};
|
||||
this.client.rPush(this.heartbeatKey, JSON.stringify(payload));
|
||||
}, 3000);
|
||||
}
|
||||
}
|
||||
32
docs/template/bls-onoffline-backend/src/schema/kafkaPayload.js
vendored
Normal file
32
docs/template/bls-onoffline-backend/src/schema/kafkaPayload.js
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
import { z } from 'zod';
|
||||
|
||||
const toNumber = (value) => {
|
||||
if (value === undefined || value === null || value === '') {
|
||||
return value;
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
return value;
|
||||
}
|
||||
const parsed = Number(value);
|
||||
return Number.isFinite(parsed) ? parsed : value;
|
||||
};
|
||||
|
||||
const toStringAllowEmpty = (value) => {
|
||||
if (value === undefined || value === null) {
|
||||
return value;
|
||||
}
|
||||
return String(value);
|
||||
};
|
||||
|
||||
export const kafkaPayloadSchema = z.object({
|
||||
HotelCode: z.preprocess(toNumber, z.number()),
|
||||
MAC: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
HostNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
RoomNumber: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
EndPoint: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
CurrentStatus: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
CurrentTime: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
UnixTime: z.preprocess(toNumber, z.number().nullable()).optional().nullable(),
|
||||
LauncherVersion: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable(),
|
||||
RebootReason: z.preprocess(toStringAllowEmpty, z.string().nullable()).optional().nullable()
|
||||
});
|
||||
21
docs/template/bls-onoffline-backend/src/utils/logger.js
vendored
Normal file
21
docs/template/bls-onoffline-backend/src/utils/logger.js
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
const format = (level, message, context) => {
|
||||
const payload = {
|
||||
level,
|
||||
message,
|
||||
timestamp: Date.now(),
|
||||
...(context ? { context } : {})
|
||||
};
|
||||
return JSON.stringify(payload);
|
||||
};
|
||||
|
||||
export const logger = {
|
||||
info(message, context) {
|
||||
process.stdout.write(`${format('info', message, context)}\n`);
|
||||
},
|
||||
error(message, context) {
|
||||
process.stderr.write(`${format('error', message, context)}\n`);
|
||||
},
|
||||
warn(message, context) {
|
||||
process.stderr.write(`${format('warn', message, context)}\n`);
|
||||
}
|
||||
};
|
||||
43
docs/template/bls-onoffline-backend/src/utils/metricCollector.js
vendored
Normal file
43
docs/template/bls-onoffline-backend/src/utils/metricCollector.js
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
export class MetricCollector {
|
||||
constructor() {
|
||||
this.reset();
|
||||
}
|
||||
|
||||
reset() {
|
||||
this.metrics = {
|
||||
kafka_pulled: 0,
|
||||
parse_error: 0,
|
||||
db_inserted: 0,
|
||||
db_failed: 0,
|
||||
db_insert_count: 0,
|
||||
db_insert_ms_sum: 0,
|
||||
batch_flush_count: 0,
|
||||
batch_flush_ms_sum: 0
|
||||
};
|
||||
this.keyed = {};
|
||||
}
|
||||
|
||||
increment(metric, count = 1) {
|
||||
if (this.metrics.hasOwnProperty(metric)) {
|
||||
this.metrics[metric] += count;
|
||||
}
|
||||
}
|
||||
|
||||
incrementKeyed(metric, key, count = 1) {
|
||||
if (!key) return;
|
||||
if (!this.keyed[metric]) {
|
||||
this.keyed[metric] = {};
|
||||
}
|
||||
if (!Object.prototype.hasOwnProperty.call(this.keyed[metric], key)) {
|
||||
this.keyed[metric][key] = 0;
|
||||
}
|
||||
this.keyed[metric][key] += count;
|
||||
}
|
||||
|
||||
getAndReset() {
|
||||
const current = { ...this.metrics };
|
||||
const keyed = JSON.parse(JSON.stringify(this.keyed));
|
||||
this.reset();
|
||||
return { ...current, keyed };
|
||||
}
|
||||
}
|
||||
3
docs/template/bls-onoffline-backend/src/utils/uuid.js
vendored
Normal file
3
docs/template/bls-onoffline-backend/src/utils/uuid.js
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import { randomUUID } from 'crypto';
|
||||
|
||||
export const createGuid = () => randomUUID().replace(/-/g, '');
|
||||
45
docs/template/bls-onoffline-backend/tests/processor.test.js
vendored
Normal file
45
docs/template/bls-onoffline-backend/tests/processor.test.js
vendored
Normal file
@@ -0,0 +1,45 @@
|
||||
import { describe, it, expect } from 'vitest';
|
||||
import { buildRowsFromPayload } from '../src/processor/index.js';
|
||||
|
||||
describe('Processor Logic', () => {
|
||||
const basePayload = {
|
||||
HotelCode: '1085',
|
||||
MAC: '00:1A:2B:3C:4D:5E',
|
||||
HostNumber: '091123987456',
|
||||
RoomNumber: '8888房',
|
||||
EndPoint: '50.2.60.1:6543',
|
||||
CurrentStatus: 'off',
|
||||
CurrentTime: '2026-02-02T10:30:00Z',
|
||||
UnixTime: 1770000235000,
|
||||
LauncherVersion: '1.0.0'
|
||||
};
|
||||
|
||||
it('should validate required fields', () => {
|
||||
expect(() => buildRowsFromPayload({})).toThrow();
|
||||
expect(() => buildRowsFromPayload({ ...basePayload, HotelCode: undefined })).toThrow();
|
||||
});
|
||||
|
||||
it('should use current_status from payload for non-reboot data', () => {
|
||||
const rows = buildRowsFromPayload({ ...basePayload, RebootReason: null });
|
||||
expect(rows).toHaveLength(1);
|
||||
expect(rows[0].current_status).toBe('off');
|
||||
expect(rows[0].reboot_reason).toBeNull();
|
||||
});
|
||||
|
||||
it('should override current_status to on for reboot data', () => {
|
||||
const rows = buildRowsFromPayload({ ...basePayload, CurrentStatus: 'off', RebootReason: '0x01' });
|
||||
expect(rows).toHaveLength(1);
|
||||
expect(rows[0].current_status).toBe('on');
|
||||
expect(rows[0].reboot_reason).toBe('0x01');
|
||||
});
|
||||
|
||||
it('should keep empty optional fields as empty strings', () => {
|
||||
const rows = buildRowsFromPayload({
|
||||
...basePayload,
|
||||
LauncherVersion: '',
|
||||
RebootReason: ''
|
||||
});
|
||||
expect(rows[0].launcher_version).toBe('');
|
||||
expect(rows[0].reboot_reason).toBe('');
|
||||
});
|
||||
});
|
||||
12
docs/template/bls-onoffline-backend/vite.config.js
vendored
Normal file
12
docs/template/bls-onoffline-backend/vite.config.js
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import { defineConfig } from 'vite';
|
||||
|
||||
export default defineConfig({
|
||||
build: {
|
||||
ssr: 'src/index.js',
|
||||
outDir: 'dist',
|
||||
target: 'node18',
|
||||
rollupOptions: {
|
||||
external: ['dotenv', 'kafka-node', 'pg', 'redis']
|
||||
}
|
||||
}
|
||||
});
|
||||
Reference in New Issue
Block a user