mirror of
https://github.com/telemt/telemt.git
synced 2026-04-15 09:34:10 +03:00
Compare commits
39 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d38d7f2bee | ||
|
|
8b47fc3575 | ||
|
|
122e4729c5 | ||
|
|
08138451d8 | ||
|
|
267619d276 | ||
|
|
f710a2192a | ||
|
|
b40eed126d | ||
|
|
0e2d42624f | ||
|
|
1f486e0df2 | ||
|
|
a4af254107 | ||
|
|
3f0c53b010 | ||
|
|
890bd98b17 | ||
|
|
02cfe1305c | ||
|
|
81843cc56c | ||
|
|
f86ced8e62 | ||
|
|
e2e471a78c | ||
|
|
9aed6c8631 | ||
|
|
5a0e44e311 | ||
|
|
a917dcc162 | ||
|
|
872b47067a | ||
|
|
ef51d0f62d | ||
|
|
75bfbe6e95 | ||
|
|
fc2ac3d10f | ||
|
|
d8dcbbb61e | ||
|
|
d08ddd718a | ||
|
|
1dfe38c5db | ||
|
|
829dc16fa3 | ||
|
|
fab79ccc69 | ||
|
|
9e0b871c8f | ||
|
|
23af3cad5d | ||
|
|
c1990d81c2 | ||
|
|
065cf21c66 | ||
|
|
4011812fda | ||
|
|
b5d0564f2a | ||
|
|
cfe8fc72a5 | ||
|
|
3e4b98b002 | ||
|
|
427d65627c | ||
|
|
ae8124d6c6 | ||
|
|
06b9693cf0 |
78
.github/workflows/release.yml
vendored
78
.github/workflows/release.yml
vendored
@@ -3,11 +3,12 @@ name: Release
|
|||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
tags:
|
tags:
|
||||||
- '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc.
|
- '[0-9]+.[0-9]+.[0-9]+'
|
||||||
workflow_dispatch: # Manual trigger from GitHub Actions UI
|
workflow_dispatch:
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
env:
|
env:
|
||||||
CARGO_TERM_COLOR: always
|
CARGO_TERM_COLOR: always
|
||||||
@@ -37,11 +38,9 @@ jobs:
|
|||||||
asset_name: telemt-aarch64-linux-musl
|
asset_name: telemt-aarch64-linux-musl
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
|
|
||||||
- name: Install stable Rust toolchain
|
- uses: dtolnay/rust-toolchain@v1
|
||||||
uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1 # v1
|
|
||||||
with:
|
with:
|
||||||
toolchain: stable
|
toolchain: stable
|
||||||
targets: ${{ matrix.target }}
|
targets: ${{ matrix.target }}
|
||||||
@@ -51,8 +50,7 @@ jobs:
|
|||||||
sudo apt-get update
|
sudo apt-get update
|
||||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||||
|
|
||||||
- name: Cache cargo registry & build artifacts
|
- uses: actions/cache@v4
|
||||||
uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2
|
|
||||||
with:
|
with:
|
||||||
path: |
|
path: |
|
||||||
~/.cargo/registry
|
~/.cargo/registry
|
||||||
@@ -76,8 +74,7 @@ jobs:
|
|||||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
- name: Upload artifact
|
- uses: actions/upload-artifact@v4
|
||||||
uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0
|
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.asset_name }}
|
name: ${{ matrix.asset_name }}
|
||||||
path: |
|
path: |
|
||||||
@@ -85,30 +82,37 @@ jobs:
|
|||||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||||
|
|
||||||
build-docker-image:
|
build-docker-image:
|
||||||
|
needs: build
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
packages: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
- uses: docker/setup-qemu-action@v3
|
||||||
uses: docker/setup-qemu-action@v3
|
- uses: docker/setup-buildx-action@v3
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
- name: Login to GHCR
|
||||||
uses: docker/setup-buildx-action@v2
|
uses: docker/login-action@v3
|
||||||
|
|
||||||
- name: Login to GitHub Container Registry
|
|
||||||
uses: docker/login-action@v2
|
|
||||||
with:
|
with:
|
||||||
registry: ghcr.io
|
registry: ghcr.io
|
||||||
username: ${{ github.repository_owner }}
|
username: ${{ github.actor }}
|
||||||
password: ${{ secrets.TOKEN_GH_DEPLOY }}
|
password: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
|
- name: Extract version
|
||||||
|
id: vars
|
||||||
|
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Build and push
|
- name: Build and push
|
||||||
uses: docker/build-push-action@v6
|
uses: docker/build-push-action@v6
|
||||||
with:
|
with:
|
||||||
context: .
|
context: .
|
||||||
push: true
|
push: true
|
||||||
tags: ${{ github.ref }}
|
tags: |
|
||||||
|
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||||
|
ghcr.io/${{ github.repository }}:latest
|
||||||
|
|
||||||
release:
|
release:
|
||||||
name: Create Release
|
name: Create Release
|
||||||
@@ -118,40 +122,14 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- uses: actions/checkout@v4
|
||||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Download all artifacts
|
- uses: actions/download-artifact@v4
|
||||||
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
|
|
||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
- name: Update version in Cargo.toml and Cargo.lock
|
|
||||||
run: |
|
|
||||||
# Extract version from tag (remove 'v' prefix if present)
|
|
||||||
VERSION="${GITHUB_REF#refs/tags/}"
|
|
||||||
VERSION="${VERSION#v}"
|
|
||||||
|
|
||||||
# Install cargo-edit for version bumping
|
|
||||||
cargo install cargo-edit
|
|
||||||
|
|
||||||
# Update Cargo.toml version
|
|
||||||
cargo set-version "$VERSION"
|
|
||||||
|
|
||||||
# Configure git
|
|
||||||
git config user.name "github-actions[bot]"
|
|
||||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
|
||||||
|
|
||||||
# Commit and push changes
|
|
||||||
#git add Cargo.toml Cargo.lock
|
|
||||||
#git commit -m "chore: bump version to $VERSION" || echo "No changes to commit"
|
|
||||||
#git push origin HEAD:main
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@v2
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
|
|||||||
430
AGENTS.md
430
AGENTS.md
@@ -1,40 +1,410 @@
|
|||||||
# AGENTS.md
|
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
||||||
|
|
||||||
** Use general system promt from AGENTS_SYSTEM_PROMT.md **
|
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
||||||
** Additional techiques and architectury details are here **
|
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
||||||
|
|
||||||
This file provides guidance to agents when working with code in this repository.
|
---
|
||||||
|
|
||||||
## Build & Test Commands
|
### 0. Priority Resolution — Scope Control
|
||||||
```bash
|
|
||||||
cargo build --release # Production build
|
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||||
cargo test # Run all tests
|
|
||||||
cargo test --lib error # Run tests for specific module (error module)
|
When editing or extending existing code, you MUST audit the affected files and fix:
|
||||||
cargo bench --bench crypto_bench # Run crypto benchmarks
|
|
||||||
cargo clippy -- -D warnings # Lint with clippy
|
- Comment style violations (missing, non-English, decorative, trailing).
|
||||||
|
- Missing or incorrect documentation on public items.
|
||||||
|
- Comment placement issues (trailing comments → move above the code).
|
||||||
|
|
||||||
|
These are **coordinated changes** — they are always in scope.
|
||||||
|
|
||||||
|
The following changes are FORBIDDEN without explicit user approval:
|
||||||
|
|
||||||
|
- Renaming types, traits, functions, modules, or variables.
|
||||||
|
- Altering business logic, control flow, or data transformations.
|
||||||
|
- Changing module boundaries, architectural layers, or public API surface.
|
||||||
|
- Adding or removing functions, structs, enums, or trait implementations.
|
||||||
|
- Fixing compiler warnings or removing unused code.
|
||||||
|
|
||||||
|
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
||||||
|
|
||||||
|
The user can override this behavior with explicit commands:
|
||||||
|
|
||||||
|
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
||||||
|
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
||||||
|
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
||||||
|
|
||||||
|
### Core Rule
|
||||||
|
|
||||||
|
The codebase must never enter an invalid intermediate state.
|
||||||
|
No response may leave the repository in a condition that requires follow-up fixes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 1. Comments and Documentation
|
||||||
|
|
||||||
|
- All comments MUST be written in English.
|
||||||
|
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
||||||
|
- Place all comments on separate lines above the relevant code.
|
||||||
|
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Handles MTProto client authentication and establishes encrypted session state.
|
||||||
|
fn handle_authenticated_client(...) { ... }
|
||||||
```
|
```
|
||||||
|
|
||||||
## Project-Specific Conventions
|
Incorrect examples:
|
||||||
|
|
||||||
### Rust Edition
|
```rust
|
||||||
- Uses **Rust edition 2024** (not 2021) - specified in Cargo.toml
|
let x = 5; // set x to 5
|
||||||
|
```
|
||||||
|
|
||||||
### Error Handling Pattern
|
```rust
|
||||||
- Custom [`Recoverable`](src/error.rs:110) trait distinguishes recoverable vs fatal errors
|
// This function does stuff
|
||||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client for masking - do not drop them
|
fn do_stuff() { ... }
|
||||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
```
|
||||||
|
|
||||||
### Configuration Auto-Migration
|
---
|
||||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config with defaults and migrations
|
|
||||||
- DC203 override is auto-injected if missing (required for CDN/media)
|
|
||||||
- `show_link` top-level migrates to `general.links.show`
|
|
||||||
|
|
||||||
### Middle-End Proxy Requirements
|
### 2. File Size and Module Structure
|
||||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
|
||||||
- Falls back to direct mode on STUN/interface mismatch unless `stun_iface_mismatch_ignore=true`
|
- Files MUST NOT exceed 350–550 lines.
|
||||||
- Proxy-secret from Telegram is separate from user secrets
|
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
||||||
|
- Parent modules MUST declare and describe their submodules.
|
||||||
|
- Maintain clear architectural boundaries between modules.
|
||||||
|
|
||||||
|
Correct example:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// Client connection handling logic.
|
||||||
|
// Submodules:
|
||||||
|
// - handshake: MTProto handshake implementation
|
||||||
|
// - relay: traffic forwarding logic
|
||||||
|
// - state: client session state machine
|
||||||
|
|
||||||
|
pub mod handshake;
|
||||||
|
pub mod relay;
|
||||||
|
pub mod state;
|
||||||
|
```
|
||||||
|
|
||||||
|
Git discipline:
|
||||||
|
|
||||||
|
- Use local git for versioning and diffs.
|
||||||
|
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 3. Formatting
|
||||||
|
|
||||||
|
- Preserve the existing formatting style of the project exactly as-is.
|
||||||
|
- Reformat code only when explicitly instructed to do so.
|
||||||
|
- Do not run `cargo fmt` unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 4. Change Safety and Validation
|
||||||
|
|
||||||
|
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
||||||
|
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
||||||
|
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
||||||
|
- Actively ask questions before making architectural or behavioral changes.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 5. Warnings and Unused Code
|
||||||
|
|
||||||
|
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
||||||
|
- These may be intentional or part of work-in-progress code.
|
||||||
|
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 6. Architectural Integrity
|
||||||
|
|
||||||
|
- Preserve existing architecture unless explicitly instructed to refactor.
|
||||||
|
- Do not introduce hidden behavioral changes.
|
||||||
|
- Do not introduce implicit refactors.
|
||||||
|
- Keep changes minimal, isolated, and intentional.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 7. When Modifying Code
|
||||||
|
|
||||||
|
You MUST:
|
||||||
|
|
||||||
|
- Maintain architectural consistency with the existing codebase.
|
||||||
|
- Document non-obvious logic with comments that describe *why*, not *what*.
|
||||||
|
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
||||||
|
- Keep all existing symbol names unless renaming is explicitly requested.
|
||||||
|
- Preserve global formatting as-is
|
||||||
|
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
||||||
|
|
||||||
|
You MUST NOT:
|
||||||
|
|
||||||
|
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
||||||
|
- Refactor code outside the requested scope
|
||||||
|
- Make speculative improvements
|
||||||
|
- Spawn multiple agents for EDITING
|
||||||
|
- Produce partial changes
|
||||||
|
- Introduce references to entities that are not yet implemented
|
||||||
|
- Leave TODO placeholders in production paths
|
||||||
|
|
||||||
|
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
||||||
|
|
||||||
|
Every change must:
|
||||||
|
- compile,
|
||||||
|
- pass type checks,
|
||||||
|
- have no broken imports,
|
||||||
|
- preserve invariants,
|
||||||
|
- not rely on future patches.
|
||||||
|
|
||||||
|
If the task requires multiple phases:
|
||||||
|
- either implement all required phases,
|
||||||
|
- or explicitly refuse and explain missing dependencies.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 8. Decision Process for Complex Changes
|
||||||
|
|
||||||
|
When facing a non-trivial modification, follow this sequence:
|
||||||
|
|
||||||
|
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
||||||
|
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
||||||
|
3. **Propose**: Describe the intended change before implementing it.
|
||||||
|
4. **Implement**: Make the minimal, isolated change.
|
||||||
|
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 9. Context Awareness
|
||||||
|
|
||||||
|
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
||||||
|
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
||||||
|
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
||||||
|
- Spawn multiple agents for SEARCHING information, code, functions
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
### 10. Response Format
|
||||||
|
|
||||||
|
#### Language Policy
|
||||||
|
|
||||||
|
- Code, comments, commit messages, documentation ONLY ON **English**!
|
||||||
|
- Reasoning and explanations in response text on language from promt
|
||||||
|
|
||||||
|
#### Response Structure
|
||||||
|
|
||||||
|
Your response MUST consist of two sections:
|
||||||
|
|
||||||
|
**Section 1: `## Reasoning`**
|
||||||
|
|
||||||
|
- What needs to be done and why.
|
||||||
|
- Which files and modules are affected.
|
||||||
|
- Architectural decisions and their rationale.
|
||||||
|
- Potential risks or side effects.
|
||||||
|
|
||||||
|
**Section 2: `## Changes`**
|
||||||
|
|
||||||
|
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
||||||
|
- For files **under 200 lines**: return the full file with all changes applied.
|
||||||
|
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
||||||
|
- New files: full file content.
|
||||||
|
- End with a suggested git commit message in English.
|
||||||
|
|
||||||
|
#### Reporting Out-of-Scope Issues
|
||||||
|
|
||||||
|
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
||||||
|
|
||||||
|
- Do not fix them silently.
|
||||||
|
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
||||||
|
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
||||||
|
|
||||||
|
#### Splitting Protocol
|
||||||
|
|
||||||
|
If the response exceeds the output limit:
|
||||||
|
|
||||||
|
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
||||||
|
2. List the files that will be provided in subsequent parts.
|
||||||
|
3. Wait for user confirmation before continuing.
|
||||||
|
4. No single file may be split across parts.
|
||||||
|
|
||||||
|
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
||||||
|
|
||||||
|
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
||||||
|
|
||||||
|
### 11.1 Non-Negotiable Invariants
|
||||||
|
|
||||||
|
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
||||||
|
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
||||||
|
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
||||||
|
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
||||||
|
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
||||||
|
|
||||||
|
### 11.2 Minimal Surface Area Rule
|
||||||
|
|
||||||
|
- Touch the smallest number of files possible.
|
||||||
|
- Prefer local changes over cross-cutting edits.
|
||||||
|
- Do not “align style” across a file/module—only adjust the modified region.
|
||||||
|
- Do not reorder items, imports, or code unless required for correctness.
|
||||||
|
|
||||||
|
### 11.3 No Implicit Contract Changes
|
||||||
|
|
||||||
|
Contracts include:
|
||||||
|
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
||||||
|
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
||||||
|
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
||||||
|
|
||||||
|
Rule:
|
||||||
|
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
||||||
|
|
||||||
|
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
||||||
|
|
||||||
|
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
||||||
|
- Do not add logging/metrics on hot paths unless requested.
|
||||||
|
- Do not add new locks or broaden lock scope.
|
||||||
|
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
||||||
|
- Avoid `String` building for errors/logs if it changes current patterns.
|
||||||
|
|
||||||
|
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
||||||
|
|
||||||
|
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
||||||
|
|
||||||
|
- No blocking calls inside async contexts.
|
||||||
|
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
||||||
|
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
||||||
|
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
||||||
|
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
||||||
|
|
||||||
|
### 11.6 Error Semantics Integrity
|
||||||
|
|
||||||
|
- Do not replace structured errors with generic strings.
|
||||||
|
- Do not widen/narrow error types or change error categories without explicit approval.
|
||||||
|
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
||||||
|
|
||||||
|
### 11.7 “No New Abstractions” Default
|
||||||
|
|
||||||
|
Default stance:
|
||||||
|
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
||||||
|
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
||||||
|
|
||||||
|
### 11.8 Negative-Diff Protection
|
||||||
|
|
||||||
|
Avoid “diff inflation” patterns:
|
||||||
|
- mass edits,
|
||||||
|
- moving code between files,
|
||||||
|
- rewrapping long lines,
|
||||||
|
- rearranging module order,
|
||||||
|
- renaming for aesthetics.
|
||||||
|
|
||||||
|
If a diff becomes large, STOP and ask before proceeding.
|
||||||
|
|
||||||
|
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
||||||
|
|
||||||
|
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
||||||
|
- Do not enforce global “best practices” that the codebase does not already use.
|
||||||
|
|
||||||
|
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
||||||
|
|
||||||
|
For non-trivial changes:
|
||||||
|
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
||||||
|
2) Implement exactly that plan—no extra improvements.
|
||||||
|
|
||||||
|
### 11.11 Pre-Response Checklist (Hard Gate)
|
||||||
|
|
||||||
|
Before final output, verify internally:
|
||||||
|
|
||||||
|
- No unresolved symbols / broken imports.
|
||||||
|
- No partially updated call sites.
|
||||||
|
- No new public surface changes unless requested.
|
||||||
|
- No transitional states / TODO placeholders replacing working code.
|
||||||
|
- Changes are atomic: the repository remains buildable and runnable.
|
||||||
|
- Any behavior change is explicitly stated.
|
||||||
|
|
||||||
|
If any check fails: fix it before responding.
|
||||||
|
|
||||||
|
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
||||||
|
|
||||||
|
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
||||||
|
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
||||||
|
|
||||||
|
### 11.13 Visionary Guardrail: Preserve Optionality
|
||||||
|
|
||||||
|
When multiple valid designs exist, prefer the one that:
|
||||||
|
- minimally constrains future evolution,
|
||||||
|
- preserves existing extension points,
|
||||||
|
- avoids locking the project into a new paradigm,
|
||||||
|
- keeps interfaces stable and implementation local.
|
||||||
|
|
||||||
|
Default to reversible changes.
|
||||||
|
|
||||||
|
### 11.14 Stop Conditions
|
||||||
|
|
||||||
|
STOP and ask targeted questions if:
|
||||||
|
- required context is missing,
|
||||||
|
- a change would cross module boundaries,
|
||||||
|
- a contract might change,
|
||||||
|
- concurrency/protocol invariants are unclear,
|
||||||
|
- the diff is growing beyond a minimal patch.
|
||||||
|
|
||||||
|
No guessing.
|
||||||
|
|
||||||
|
### 12. Invariant Preservation
|
||||||
|
|
||||||
|
You MUST explicitly preserve:
|
||||||
|
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
||||||
|
- Memory safety assumptions (no hidden `unsafe` expansions).
|
||||||
|
- Lock ordering and deadlock invariants.
|
||||||
|
- State machine correctness (no new invalid transitions).
|
||||||
|
- Backward compatibility of serialized formats (if applicable).
|
||||||
|
|
||||||
|
If a change touches concurrency, networking, protocol logic, or state machines,
|
||||||
|
you MUST explain why existing invariants remain valid.
|
||||||
|
|
||||||
|
### 13. Error Handling Policy
|
||||||
|
|
||||||
|
- Do not replace structured errors with generic strings.
|
||||||
|
- Preserve existing error propagation semantics.
|
||||||
|
- Do not widen or narrow error types without approval.
|
||||||
|
- Avoid introducing panics in production paths.
|
||||||
|
- Prefer explicit error mapping over implicit conversions.
|
||||||
|
|
||||||
|
### 14. Test Safety
|
||||||
|
|
||||||
|
- Do not modify existing tests unless the task explicitly requires it.
|
||||||
|
- Do not weaken assertions.
|
||||||
|
- Preserve determinism in testable components.
|
||||||
|
|
||||||
|
### 15. Security Constraints
|
||||||
|
|
||||||
|
- Do not weaken cryptographic assumptions.
|
||||||
|
- Do not modify key derivation logic without explicit request.
|
||||||
|
- Do not change constant-time behavior.
|
||||||
|
- Do not introduce logging of secrets.
|
||||||
|
- Preserve TLS/MTProto protocol correctness.
|
||||||
|
|
||||||
|
### 16. Logging Policy
|
||||||
|
|
||||||
|
- Do not introduce excessive logging in hot paths.
|
||||||
|
- Do not log sensitive data.
|
||||||
|
- Preserve existing log levels and style.
|
||||||
|
|
||||||
|
### 17. Pre-Response Verification Checklist
|
||||||
|
|
||||||
|
Before producing the final answer, verify internally:
|
||||||
|
|
||||||
|
- The change compiles conceptually.
|
||||||
|
- No unresolved symbols exist.
|
||||||
|
- All modified call sites are updated.
|
||||||
|
- No accidental behavioral changes were introduced.
|
||||||
|
- Architectural boundaries remain intact.
|
||||||
|
|
||||||
|
### 18. Atomic Change Principle
|
||||||
|
Every patch must be **atomic and production-safe**.
|
||||||
|
* **Self-contained** — no dependency on future patches or unimplemented components.
|
||||||
|
* **Build-safe** — the project must compile successfully after the change.
|
||||||
|
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
||||||
|
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
||||||
|
|
||||||
|
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
||||||
|
|
||||||
### TLS Fronting Behavior
|
|
||||||
- Invalid handshakes are transparently proxied to `mask_host` for DPI evasion
|
|
||||||
- `fake_cert_len` is randomized at startup (1024-4096 bytes)
|
|
||||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
|
||||||
|
|||||||
@@ -1,410 +0,0 @@
|
|||||||
## System Prompt — Production Rust Codebase: Modification and Architecture Guidelines
|
|
||||||
|
|
||||||
You are a senior Rust Engineer and pricipal Rust Architect acting as a strict code reviewer and implementation partner.
|
|
||||||
Your responses are precise, minimal, and architecturally sound. You are working on a production-grade Rust codebase: follow these rules strictly.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 0. Priority Resolution — Scope Control
|
|
||||||
|
|
||||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
|
||||||
|
|
||||||
When editing or extending existing code, you MUST audit the affected files and fix:
|
|
||||||
|
|
||||||
- Comment style violations (missing, non-English, decorative, trailing).
|
|
||||||
- Missing or incorrect documentation on public items.
|
|
||||||
- Comment placement issues (trailing comments → move above the code).
|
|
||||||
|
|
||||||
These are **coordinated changes** — they are always in scope.
|
|
||||||
|
|
||||||
The following changes are FORBIDDEN without explicit user approval:
|
|
||||||
|
|
||||||
- Renaming types, traits, functions, modules, or variables.
|
|
||||||
- Altering business logic, control flow, or data transformations.
|
|
||||||
- Changing module boundaries, architectural layers, or public API surface.
|
|
||||||
- Adding or removing functions, structs, enums, or trait implementations.
|
|
||||||
- Fixing compiler warnings or removing unused code.
|
|
||||||
|
|
||||||
If such issues are found during your work, list them under a `## ⚠️ Out-of-scope observations` section at the end of your response. Include file path, context, and a brief description. Do not apply these changes.
|
|
||||||
|
|
||||||
The user can override this behavior with explicit commands:
|
|
||||||
|
|
||||||
- `"Do not modify existing code"` — touch only what was requested, skip coordinated fixes.
|
|
||||||
- `"Make minimal changes"` — no coordinated fixes, narrowest possible diff.
|
|
||||||
- `"Fix everything"` — apply all coordinated fixes and out-of-scope observations.
|
|
||||||
|
|
||||||
### Core Rule
|
|
||||||
|
|
||||||
The codebase must never enter an invalid intermediate state.
|
|
||||||
No response may leave the repository in a condition that requires follow-up fixes.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 1. Comments and Documentation
|
|
||||||
|
|
||||||
- All comments MUST be written in English.
|
|
||||||
- Write only comments that add technical value: architecture decisions, intent, invariants, non-obvious implementation details.
|
|
||||||
- Place all comments on separate lines above the relevant code.
|
|
||||||
- Use `///` doc-comments for public items. Use `//` for internal clarifications.
|
|
||||||
|
|
||||||
Correct example:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// Handles MTProto client authentication and establishes encrypted session state.
|
|
||||||
fn handle_authenticated_client(...) { ... }
|
|
||||||
```
|
|
||||||
|
|
||||||
Incorrect examples:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
let x = 5; // set x to 5
|
|
||||||
```
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// This function does stuff
|
|
||||||
fn do_stuff() { ... }
|
|
||||||
```
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 2. File Size and Module Structure
|
|
||||||
|
|
||||||
- Files MUST NOT exceed 350–550 lines.
|
|
||||||
- If a file exceeds this limit, split it into submodules organized by responsibility (e.g., protocol, transport, state, handlers).
|
|
||||||
- Parent modules MUST declare and describe their submodules.
|
|
||||||
- Maintain clear architectural boundaries between modules.
|
|
||||||
|
|
||||||
Correct example:
|
|
||||||
|
|
||||||
```rust
|
|
||||||
// Client connection handling logic.
|
|
||||||
// Submodules:
|
|
||||||
// - handshake: MTProto handshake implementation
|
|
||||||
// - relay: traffic forwarding logic
|
|
||||||
// - state: client session state machine
|
|
||||||
|
|
||||||
pub mod handshake;
|
|
||||||
pub mod relay;
|
|
||||||
pub mod state;
|
|
||||||
```
|
|
||||||
|
|
||||||
Git discipline:
|
|
||||||
|
|
||||||
- Use local git for versioning and diffs.
|
|
||||||
- Write clear, descriptive commit messages in English that explain both *what* changed and *why*.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 3. Formatting
|
|
||||||
|
|
||||||
- Preserve the existing formatting style of the project exactly as-is.
|
|
||||||
- Reformat code only when explicitly instructed to do so.
|
|
||||||
- Do not run `cargo fmt` unless explicitly instructed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 4. Change Safety and Validation
|
|
||||||
|
|
||||||
- If anything is unclear, STOP and ask specific, targeted questions before proceeding.
|
|
||||||
- List exactly what is ambiguous and offer possible interpretations for the user to choose from.
|
|
||||||
- Prefer clarification over assumptions. Do not guess intent, behavior, or missing requirements.
|
|
||||||
- Actively ask questions before making architectural or behavioral changes.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 5. Warnings and Unused Code
|
|
||||||
|
|
||||||
- Leave all warnings, unused variables, functions, imports, and dead code untouched unless explicitly instructed to modify them.
|
|
||||||
- These may be intentional or part of work-in-progress code.
|
|
||||||
- `todo!()` and `unimplemented!()` are permitted and should not be removed or replaced unless explicitly instructed.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 6. Architectural Integrity
|
|
||||||
|
|
||||||
- Preserve existing architecture unless explicitly instructed to refactor.
|
|
||||||
- Do not introduce hidden behavioral changes.
|
|
||||||
- Do not introduce implicit refactors.
|
|
||||||
- Keep changes minimal, isolated, and intentional.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 7. When Modifying Code
|
|
||||||
|
|
||||||
You MUST:
|
|
||||||
|
|
||||||
- Maintain architectural consistency with the existing codebase.
|
|
||||||
- Document non-obvious logic with comments that describe *why*, not *what*.
|
|
||||||
- Limit changes strictly to the requested scope (plus coordinated fixes per Section 0).
|
|
||||||
- Keep all existing symbol names unless renaming is explicitly requested.
|
|
||||||
- Preserve global formatting as-is
|
|
||||||
- Result every modification in a self-contained, compilable, runnable state of the codebase
|
|
||||||
|
|
||||||
You MUST NOT:
|
|
||||||
|
|
||||||
- Use placeholders: no `// ... rest of code`, no `// implement here`, no `/* TODO */` stubs that replace existing working code. Write full, working implementation. If the implementation is unclear, ask first
|
|
||||||
- Refactor code outside the requested scope
|
|
||||||
- Make speculative improvements
|
|
||||||
- Spawn multiple agents for EDITING
|
|
||||||
- Produce partial changes
|
|
||||||
- Introduce references to entities that are not yet implemented
|
|
||||||
- Leave TODO placeholders in production paths
|
|
||||||
|
|
||||||
Note: `todo!()` and `unimplemented!()` are allowed as idiomatic Rust markers for genuinely unfinished code paths.
|
|
||||||
|
|
||||||
Every change must:
|
|
||||||
- compile,
|
|
||||||
- pass type checks,
|
|
||||||
- have no broken imports,
|
|
||||||
- preserve invariants,
|
|
||||||
- not rely on future patches.
|
|
||||||
|
|
||||||
If the task requires multiple phases:
|
|
||||||
- either implement all required phases,
|
|
||||||
- or explicitly refuse and explain missing dependencies.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 8. Decision Process for Complex Changes
|
|
||||||
|
|
||||||
When facing a non-trivial modification, follow this sequence:
|
|
||||||
|
|
||||||
1. **Clarify**: Restate the task in one sentence to confirm understanding.
|
|
||||||
2. **Assess impact**: Identify which modules, types, and invariants are affected.
|
|
||||||
3. **Propose**: Describe the intended change before implementing it.
|
|
||||||
4. **Implement**: Make the minimal, isolated change.
|
|
||||||
5. **Verify**: Explain why the change preserves existing behavior and architectural integrity.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 9. Context Awareness
|
|
||||||
|
|
||||||
- When provided with partial code, assume the rest of the codebase exists and functions correctly unless stated otherwise.
|
|
||||||
- Reference existing types, functions, and module structures by their actual names as shown in the provided code.
|
|
||||||
- When the provided context is insufficient to make a safe change, request the missing context explicitly.
|
|
||||||
- Spawn multiple agents for SEARCHING information, code, functions
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
### 10. Response Format
|
|
||||||
|
|
||||||
#### Language Policy
|
|
||||||
|
|
||||||
- Code, comments, commit messages, documentation ONLY ON **English**!
|
|
||||||
- Reasoning and explanations in response text on language from promt
|
|
||||||
|
|
||||||
#### Response Structure
|
|
||||||
|
|
||||||
Your response MUST consist of two sections:
|
|
||||||
|
|
||||||
**Section 1: `## Reasoning`**
|
|
||||||
|
|
||||||
- What needs to be done and why.
|
|
||||||
- Which files and modules are affected.
|
|
||||||
- Architectural decisions and their rationale.
|
|
||||||
- Potential risks or side effects.
|
|
||||||
|
|
||||||
**Section 2: `## Changes`**
|
|
||||||
|
|
||||||
- For each modified or created file: the filename on a separate line in backticks, followed by the code block.
|
|
||||||
- For files **under 200 lines**: return the full file with all changes applied.
|
|
||||||
- For files **over 200 lines**: return only the changed functions/blocks with at least 3 lines of surrounding context above and below. If the user requests the full file, provide it.
|
|
||||||
- New files: full file content.
|
|
||||||
- End with a suggested git commit message in English.
|
|
||||||
|
|
||||||
#### Reporting Out-of-Scope Issues
|
|
||||||
|
|
||||||
If during modification you discover issues outside the requested scope (potential bugs, unsafe code, architectural concerns, missing error handling, unused imports, dead code):
|
|
||||||
|
|
||||||
- Do not fix them silently.
|
|
||||||
- List them under `## ⚠️ Out-of-scope observations` at the end of your response.
|
|
||||||
- Include: file path, line/function context, brief description of the issue, and severity estimate.
|
|
||||||
|
|
||||||
#### Splitting Protocol
|
|
||||||
|
|
||||||
If the response exceeds the output limit:
|
|
||||||
|
|
||||||
1. End the current part with: **SPLIT: PART N — CONTINUE? (remaining: file_list)**
|
|
||||||
2. List the files that will be provided in subsequent parts.
|
|
||||||
3. Wait for user confirmation before continuing.
|
|
||||||
4. No single file may be split across parts.
|
|
||||||
|
|
||||||
## 11. Anti-LLM Degeneration Safeguards (Principal-Paranoid, Visionary)
|
|
||||||
|
|
||||||
This section exists to prevent common LLM failure modes: scope creep, semantic drift, cargo-cult refactors, performance regressions, contract breakage, and hidden behavior changes.
|
|
||||||
|
|
||||||
### 11.1 Non-Negotiable Invariants
|
|
||||||
|
|
||||||
- **No semantic drift:** Do not reinterpret requirements, rename concepts, or change meaning of existing terms.
|
|
||||||
- **No “helpful refactors”:** Any refactor not explicitly requested is forbidden.
|
|
||||||
- **No architectural drift:** Do not introduce new layers, patterns, abstractions, or “clean architecture” migrations unless requested.
|
|
||||||
- **No dependency drift:** Do not add crates, features, or versions unless explicitly requested.
|
|
||||||
- **No behavior drift:** If a change could alter runtime behavior, you MUST call it out explicitly in `## Reasoning` and justify it.
|
|
||||||
|
|
||||||
### 11.2 Minimal Surface Area Rule
|
|
||||||
|
|
||||||
- Touch the smallest number of files possible.
|
|
||||||
- Prefer local changes over cross-cutting edits.
|
|
||||||
- Do not “align style” across a file/module—only adjust the modified region.
|
|
||||||
- Do not reorder items, imports, or code unless required for correctness.
|
|
||||||
|
|
||||||
### 11.3 No Implicit Contract Changes
|
|
||||||
|
|
||||||
Contracts include:
|
|
||||||
- public APIs, trait bounds, visibility, error types, timeouts/retries, logging semantics, metrics semantics,
|
|
||||||
- protocol formats, framing, padding, keepalive cadence, state machine transitions,
|
|
||||||
- concurrency guarantees, cancellation behavior, backpressure behavior.
|
|
||||||
|
|
||||||
Rule:
|
|
||||||
- If you change a contract, you MUST update all dependents in the same patch AND document the contract delta explicitly.
|
|
||||||
|
|
||||||
### 11.4 Hot-Path Preservation (Performance Paranoia)
|
|
||||||
|
|
||||||
- Do not introduce extra allocations, cloning, or formatting in hot paths.
|
|
||||||
- Do not add logging/metrics on hot paths unless requested.
|
|
||||||
- Do not add new locks or broaden lock scope.
|
|
||||||
- Prefer `&str` / slices / borrowed data where the codebase already does so.
|
|
||||||
- Avoid `String` building for errors/logs if it changes current patterns.
|
|
||||||
|
|
||||||
If you cannot prove performance neutrality, label it as risk in `## Reasoning`.
|
|
||||||
|
|
||||||
### 11.5 Async / Concurrency Safety (Cancellation & Backpressure)
|
|
||||||
|
|
||||||
- No blocking calls inside async contexts.
|
|
||||||
- Preserve cancellation safety: do not introduce `await` between lock acquisition and critical invariants unless already present.
|
|
||||||
- Preserve backpressure: do not replace bounded channels with unbounded, do not remove flow control.
|
|
||||||
- Do not change task lifecycle semantics (spawn patterns, join handles, shutdown order) unless requested.
|
|
||||||
- Do not introduce `tokio::spawn` / background tasks unless explicitly requested.
|
|
||||||
|
|
||||||
### 11.6 Error Semantics Integrity
|
|
||||||
|
|
||||||
- Do not replace structured errors with generic strings.
|
|
||||||
- Do not widen/narrow error types or change error categories without explicit approval.
|
|
||||||
- Avoid introducing panics in production paths (`unwrap`, `expect`) unless the codebase already treats that path as impossible and documented.
|
|
||||||
|
|
||||||
### 11.7 “No New Abstractions” Default
|
|
||||||
|
|
||||||
Default stance:
|
|
||||||
- No new traits, generics, macros, builder patterns, type-level cleverness, or “frameworking”.
|
|
||||||
- If abstraction is necessary, prefer the smallest possible local helper (private function) and justify it.
|
|
||||||
|
|
||||||
### 11.8 Negative-Diff Protection
|
|
||||||
|
|
||||||
Avoid “diff inflation” patterns:
|
|
||||||
- mass edits,
|
|
||||||
- moving code between files,
|
|
||||||
- rewrapping long lines,
|
|
||||||
- rearranging module order,
|
|
||||||
- renaming for aesthetics.
|
|
||||||
|
|
||||||
If a diff becomes large, STOP and ask before proceeding.
|
|
||||||
|
|
||||||
### 11.9 Consistency with Existing Style (But Not Style Refactors)
|
|
||||||
|
|
||||||
- Follow existing conventions of the touched module (naming, error style, return patterns).
|
|
||||||
- Do not enforce global “best practices” that the codebase does not already use.
|
|
||||||
|
|
||||||
### 11.10 Two-Phase Safety Gate (Plan → Patch)
|
|
||||||
|
|
||||||
For non-trivial changes:
|
|
||||||
1) Provide a micro-plan (1–5 bullets): what files, what functions, what invariants, what risks.
|
|
||||||
2) Implement exactly that plan—no extra improvements.
|
|
||||||
|
|
||||||
### 11.11 Pre-Response Checklist (Hard Gate)
|
|
||||||
|
|
||||||
Before final output, verify internally:
|
|
||||||
|
|
||||||
- No unresolved symbols / broken imports.
|
|
||||||
- No partially updated call sites.
|
|
||||||
- No new public surface changes unless requested.
|
|
||||||
- No transitional states / TODO placeholders replacing working code.
|
|
||||||
- Changes are atomic: the repository remains buildable and runnable.
|
|
||||||
- Any behavior change is explicitly stated.
|
|
||||||
|
|
||||||
If any check fails: fix it before responding.
|
|
||||||
|
|
||||||
### 11.12 Truthfulness Policy (No Hallucinated Claims)
|
|
||||||
|
|
||||||
- Do not claim “this compiles” or “tests pass” unless you actually verified with the available tooling/context.
|
|
||||||
- If verification is not possible, state: “Not executed; reasoning-based consistency check only.”
|
|
||||||
|
|
||||||
### 11.13 Visionary Guardrail: Preserve Optionality
|
|
||||||
|
|
||||||
When multiple valid designs exist, prefer the one that:
|
|
||||||
- minimally constrains future evolution,
|
|
||||||
- preserves existing extension points,
|
|
||||||
- avoids locking the project into a new paradigm,
|
|
||||||
- keeps interfaces stable and implementation local.
|
|
||||||
|
|
||||||
Default to reversible changes.
|
|
||||||
|
|
||||||
### 11.14 Stop Conditions
|
|
||||||
|
|
||||||
STOP and ask targeted questions if:
|
|
||||||
- required context is missing,
|
|
||||||
- a change would cross module boundaries,
|
|
||||||
- a contract might change,
|
|
||||||
- concurrency/protocol invariants are unclear,
|
|
||||||
- the diff is growing beyond a minimal patch.
|
|
||||||
|
|
||||||
No guessing.
|
|
||||||
|
|
||||||
### 12. Invariant Preservation
|
|
||||||
|
|
||||||
You MUST explicitly preserve:
|
|
||||||
- Thread-safety guarantees (`Send` / `Sync` expectations).
|
|
||||||
- Memory safety assumptions (no hidden `unsafe` expansions).
|
|
||||||
- Lock ordering and deadlock invariants.
|
|
||||||
- State machine correctness (no new invalid transitions).
|
|
||||||
- Backward compatibility of serialized formats (if applicable).
|
|
||||||
|
|
||||||
If a change touches concurrency, networking, protocol logic, or state machines,
|
|
||||||
you MUST explain why existing invariants remain valid.
|
|
||||||
|
|
||||||
### 13. Error Handling Policy
|
|
||||||
|
|
||||||
- Do not replace structured errors with generic strings.
|
|
||||||
- Preserve existing error propagation semantics.
|
|
||||||
- Do not widen or narrow error types without approval.
|
|
||||||
- Avoid introducing panics in production paths.
|
|
||||||
- Prefer explicit error mapping over implicit conversions.
|
|
||||||
|
|
||||||
### 14. Test Safety
|
|
||||||
|
|
||||||
- Do not modify existing tests unless the task explicitly requires it.
|
|
||||||
- Do not weaken assertions.
|
|
||||||
- Preserve determinism in testable components.
|
|
||||||
|
|
||||||
### 15. Security Constraints
|
|
||||||
|
|
||||||
- Do not weaken cryptographic assumptions.
|
|
||||||
- Do not modify key derivation logic without explicit request.
|
|
||||||
- Do not change constant-time behavior.
|
|
||||||
- Do not introduce logging of secrets.
|
|
||||||
- Preserve TLS/MTProto protocol correctness.
|
|
||||||
|
|
||||||
### 16. Logging Policy
|
|
||||||
|
|
||||||
- Do not introduce excessive logging in hot paths.
|
|
||||||
- Do not log sensitive data.
|
|
||||||
- Preserve existing log levels and style.
|
|
||||||
|
|
||||||
### 17. Pre-Response Verification Checklist
|
|
||||||
|
|
||||||
Before producing the final answer, verify internally:
|
|
||||||
|
|
||||||
- The change compiles conceptually.
|
|
||||||
- No unresolved symbols exist.
|
|
||||||
- All modified call sites are updated.
|
|
||||||
- No accidental behavioral changes were introduced.
|
|
||||||
- Architectural boundaries remain intact.
|
|
||||||
|
|
||||||
### 18. Atomic Change Principle
|
|
||||||
Every patch must be **atomic and production-safe**.
|
|
||||||
* **Self-contained** — no dependency on future patches or unimplemented components.
|
|
||||||
* **Build-safe** — the project must compile successfully after the change.
|
|
||||||
* **Contract-consistent** — no partial interface or behavioral changes; all dependent code must be updated within the same patch.
|
|
||||||
* **No transitional states** — no placeholders, incomplete refactors, or temporary inconsistencies.
|
|
||||||
|
|
||||||
**Invariant:** After any single patch, the repository remains fully functional and buildable.
|
|
||||||
|
|
||||||
544
Cargo.lock
generated
544
Cargo.lock
generated
@@ -55,6 +55,45 @@ version = "1.0.101"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea"
|
checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "asn1-rs"
|
||||||
|
version = "0.5.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0"
|
||||||
|
dependencies = [
|
||||||
|
"asn1-rs-derive",
|
||||||
|
"asn1-rs-impl",
|
||||||
|
"displaydoc",
|
||||||
|
"nom",
|
||||||
|
"num-traits",
|
||||||
|
"rusticata-macros",
|
||||||
|
"thiserror 1.0.69",
|
||||||
|
"time",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "asn1-rs-derive"
|
||||||
|
version = "0.4.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
"synstructure 0.12.6",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "asn1-rs-impl"
|
||||||
|
version = "0.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "atomic-waker"
|
name = "atomic-waker"
|
||||||
version = "1.1.2"
|
version = "1.1.2"
|
||||||
@@ -88,6 +127,12 @@ version = "0.8.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
|
checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "bitflags"
|
||||||
|
version = "1.3.2"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "bitflags"
|
name = "bitflags"
|
||||||
version = "2.10.0"
|
version = "2.10.0"
|
||||||
@@ -155,6 +200,12 @@ version = "1.0.4"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
|
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "cfg_aliases"
|
||||||
|
version = "0.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "cfg_aliases"
|
name = "cfg_aliases"
|
||||||
version = "0.2.1"
|
version = "0.2.1"
|
||||||
@@ -252,6 +303,15 @@ dependencies = [
|
|||||||
"libc",
|
"libc",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crc32c"
|
||||||
|
version = "0.6.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47"
|
||||||
|
dependencies = [
|
||||||
|
"rustc_version",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crc32fast"
|
name = "crc32fast"
|
||||||
version = "1.5.0"
|
version = "1.5.0"
|
||||||
@@ -297,6 +357,15 @@ dependencies = [
|
|||||||
"itertools",
|
"itertools",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "crossbeam-channel"
|
||||||
|
version = "0.5.15"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2"
|
||||||
|
dependencies = [
|
||||||
|
"crossbeam-utils",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "crossbeam-deque"
|
name = "crossbeam-deque"
|
||||||
version = "0.8.6"
|
version = "0.8.6"
|
||||||
@@ -369,6 +438,35 @@ dependencies = [
|
|||||||
"parking_lot_core",
|
"parking_lot_core",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "data-encoding"
|
||||||
|
version = "2.10.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "der-parser"
|
||||||
|
version = "8.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e"
|
||||||
|
dependencies = [
|
||||||
|
"asn1-rs",
|
||||||
|
"displaydoc",
|
||||||
|
"nom",
|
||||||
|
"num-bigint",
|
||||||
|
"num-traits",
|
||||||
|
"rusticata-macros",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "deranged"
|
||||||
|
version = "0.5.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c"
|
||||||
|
dependencies = [
|
||||||
|
"powerfmt",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "digest"
|
name = "digest"
|
||||||
version = "0.10.7"
|
version = "0.10.7"
|
||||||
@@ -388,7 +486,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -419,6 +517,17 @@ version = "2.3.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "filetime"
|
||||||
|
version = "0.2.27"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db"
|
||||||
|
dependencies = [
|
||||||
|
"cfg-if",
|
||||||
|
"libc",
|
||||||
|
"libredox",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "find-msvc-tools"
|
name = "find-msvc-tools"
|
||||||
version = "0.1.9"
|
version = "0.1.9"
|
||||||
@@ -452,6 +561,15 @@ dependencies = [
|
|||||||
"percent-encoding",
|
"percent-encoding",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "fsevent-sys"
|
||||||
|
version = "4.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "futures"
|
name = "futures"
|
||||||
version = "0.3.31"
|
version = "0.3.31"
|
||||||
@@ -508,7 +626,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -756,7 +874,7 @@ dependencies = [
|
|||||||
"tokio",
|
"tokio",
|
||||||
"tokio-rustls",
|
"tokio-rustls",
|
||||||
"tower-service",
|
"tower-service",
|
||||||
"webpki-roots",
|
"webpki-roots 1.0.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -926,6 +1044,26 @@ dependencies = [
|
|||||||
"serde_core",
|
"serde_core",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "inotify"
|
||||||
|
version = "0.9.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 1.3.2",
|
||||||
|
"inotify-sys",
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "inotify-sys"
|
||||||
|
version = "0.1.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "inout"
|
name = "inout"
|
||||||
version = "0.1.4"
|
version = "0.1.4"
|
||||||
@@ -942,6 +1080,15 @@ version = "2.11.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "ipnetwork"
|
||||||
|
version = "0.20.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e"
|
||||||
|
dependencies = [
|
||||||
|
"serde",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "iri-string"
|
name = "iri-string"
|
||||||
version = "0.7.10"
|
version = "0.7.10"
|
||||||
@@ -988,6 +1135,26 @@ dependencies = [
|
|||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "kqueue"
|
||||||
|
version = "1.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a"
|
||||||
|
dependencies = [
|
||||||
|
"kqueue-sys",
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "kqueue-sys"
|
||||||
|
version = "1.0.4"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 1.3.2",
|
||||||
|
"libc",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "lazy_static"
|
name = "lazy_static"
|
||||||
version = "1.5.0"
|
version = "1.5.0"
|
||||||
@@ -1006,6 +1173,17 @@ version = "0.2.181"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5"
|
checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "libredox"
|
||||||
|
version = "0.1.12"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 2.10.0",
|
||||||
|
"libc",
|
||||||
|
"redox_syscall 0.7.1",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "linux-raw-sys"
|
name = "linux-raw-sys"
|
||||||
version = "0.11.0"
|
version = "0.11.0"
|
||||||
@@ -1073,6 +1251,33 @@ version = "2.8.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
|
checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "memoffset"
|
||||||
|
version = "0.9.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a"
|
||||||
|
dependencies = [
|
||||||
|
"autocfg",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "minimal-lexical"
|
||||||
|
version = "0.2.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "mio"
|
||||||
|
version = "0.8.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
|
||||||
|
dependencies = [
|
||||||
|
"libc",
|
||||||
|
"log",
|
||||||
|
"wasi",
|
||||||
|
"windows-sys 0.48.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "mio"
|
name = "mio"
|
||||||
version = "1.1.1"
|
version = "1.1.1"
|
||||||
@@ -1084,6 +1289,48 @@ dependencies = [
|
|||||||
"windows-sys 0.61.2",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nix"
|
||||||
|
version = "0.28.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 2.10.0",
|
||||||
|
"cfg-if",
|
||||||
|
"cfg_aliases 0.1.1",
|
||||||
|
"libc",
|
||||||
|
"memoffset",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "nom"
|
||||||
|
version = "7.1.3"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a"
|
||||||
|
dependencies = [
|
||||||
|
"memchr",
|
||||||
|
"minimal-lexical",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "notify"
|
||||||
|
version = "6.1.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 2.10.0",
|
||||||
|
"crossbeam-channel",
|
||||||
|
"filetime",
|
||||||
|
"fsevent-sys",
|
||||||
|
"inotify",
|
||||||
|
"kqueue",
|
||||||
|
"libc",
|
||||||
|
"log",
|
||||||
|
"mio 0.8.11",
|
||||||
|
"walkdir",
|
||||||
|
"windows-sys 0.48.0",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "nu-ansi-term"
|
name = "nu-ansi-term"
|
||||||
version = "0.50.3"
|
version = "0.50.3"
|
||||||
@@ -1103,6 +1350,12 @@ dependencies = [
|
|||||||
"num-traits",
|
"num-traits",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "num-conv"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "num-integer"
|
name = "num-integer"
|
||||||
version = "0.1.46"
|
version = "0.1.46"
|
||||||
@@ -1121,6 +1374,15 @@ dependencies = [
|
|||||||
"autocfg",
|
"autocfg",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "oid-registry"
|
||||||
|
version = "0.6.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff"
|
||||||
|
dependencies = [
|
||||||
|
"asn1-rs",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "once_cell"
|
name = "once_cell"
|
||||||
version = "1.21.3"
|
version = "1.21.3"
|
||||||
@@ -1151,7 +1413,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg-if",
|
"cfg-if",
|
||||||
"libc",
|
"libc",
|
||||||
"redox_syscall",
|
"redox_syscall 0.5.18",
|
||||||
"smallvec",
|
"smallvec",
|
||||||
"windows-link",
|
"windows-link",
|
||||||
]
|
]
|
||||||
@@ -1211,6 +1473,12 @@ dependencies = [
|
|||||||
"zerovec",
|
"zerovec",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "powerfmt"
|
||||||
|
version = "0.2.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "ppv-lite86"
|
name = "ppv-lite86"
|
||||||
version = "0.2.21"
|
version = "0.2.21"
|
||||||
@@ -1227,7 +1495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
|
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1247,7 +1515,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bit-set",
|
"bit-set",
|
||||||
"bit-vec",
|
"bit-vec",
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
"rand",
|
"rand",
|
||||||
"rand_chacha",
|
"rand_chacha",
|
||||||
@@ -1271,14 +1539,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
|
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"cfg_aliases",
|
"cfg_aliases 0.2.1",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"quinn-proto",
|
"quinn-proto",
|
||||||
"quinn-udp",
|
"quinn-udp",
|
||||||
"rustc-hash",
|
"rustc-hash",
|
||||||
"rustls",
|
"rustls",
|
||||||
"socket2 0.6.2",
|
"socket2 0.6.2",
|
||||||
"thiserror",
|
"thiserror 2.0.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
"tracing",
|
"tracing",
|
||||||
"web-time",
|
"web-time",
|
||||||
@@ -1299,7 +1567,7 @@ dependencies = [
|
|||||||
"rustls",
|
"rustls",
|
||||||
"rustls-pki-types",
|
"rustls-pki-types",
|
||||||
"slab",
|
"slab",
|
||||||
"thiserror",
|
"thiserror 2.0.18",
|
||||||
"tinyvec",
|
"tinyvec",
|
||||||
"tracing",
|
"tracing",
|
||||||
"web-time",
|
"web-time",
|
||||||
@@ -1311,7 +1579,7 @@ version = "0.5.14"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
|
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"cfg_aliases",
|
"cfg_aliases 0.2.1",
|
||||||
"libc",
|
"libc",
|
||||||
"once_cell",
|
"once_cell",
|
||||||
"socket2 0.6.2",
|
"socket2 0.6.2",
|
||||||
@@ -1398,7 +1666,16 @@ version = "0.5.18"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
|
checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "redox_syscall"
|
||||||
|
version = "0.7.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b"
|
||||||
|
dependencies = [
|
||||||
|
"bitflags 2.10.0",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1465,7 +1742,7 @@ dependencies = [
|
|||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
"wasm-bindgen-futures",
|
"wasm-bindgen-futures",
|
||||||
"web-sys",
|
"web-sys",
|
||||||
"webpki-roots",
|
"webpki-roots 1.0.6",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1488,13 +1765,31 @@ version = "2.1.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rustc_version"
|
||||||
|
version = "0.4.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
|
||||||
|
dependencies = [
|
||||||
|
"semver",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "rusticata-macros"
|
||||||
|
version = "4.1.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632"
|
||||||
|
dependencies = [
|
||||||
|
"nom",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "rustix"
|
name = "rustix"
|
||||||
version = "1.1.3"
|
version = "1.1.3"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34"
|
checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
"errno",
|
"errno",
|
||||||
"libc",
|
"libc",
|
||||||
"linux-raw-sys",
|
"linux-raw-sys",
|
||||||
@@ -1608,7 +1903,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1736,6 +2031,17 @@ version = "2.6.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "syn"
|
||||||
|
version = "1.0.109"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"unicode-ident",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "syn"
|
name = "syn"
|
||||||
version = "2.0.114"
|
version = "2.0.114"
|
||||||
@@ -1756,6 +2062,18 @@ dependencies = [
|
|||||||
"futures-core",
|
"futures-core",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "synstructure"
|
||||||
|
version = "0.12.6"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 1.0.109",
|
||||||
|
"unicode-xid",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "synstructure"
|
name = "synstructure"
|
||||||
version = "0.13.2"
|
version = "0.13.2"
|
||||||
@@ -1764,18 +2082,20 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.0.0"
|
version = "3.0.10"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"aes",
|
"aes",
|
||||||
|
"anyhow",
|
||||||
"base64",
|
"base64",
|
||||||
"bytes",
|
"bytes",
|
||||||
"cbc",
|
"cbc",
|
||||||
"chrono",
|
"chrono",
|
||||||
|
"crc32c",
|
||||||
"crc32fast",
|
"crc32fast",
|
||||||
"criterion",
|
"criterion",
|
||||||
"crossbeam-queue",
|
"crossbeam-queue",
|
||||||
@@ -1788,9 +2108,12 @@ dependencies = [
|
|||||||
"httpdate",
|
"httpdate",
|
||||||
"hyper",
|
"hyper",
|
||||||
"hyper-util",
|
"hyper-util",
|
||||||
|
"ipnetwork",
|
||||||
"libc",
|
"libc",
|
||||||
"lru",
|
"lru",
|
||||||
"md-5",
|
"md-5",
|
||||||
|
"nix",
|
||||||
|
"notify",
|
||||||
"num-bigint",
|
"num-bigint",
|
||||||
"num-traits",
|
"num-traits",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
@@ -1798,19 +2121,23 @@ dependencies = [
|
|||||||
"rand",
|
"rand",
|
||||||
"regex",
|
"regex",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
|
"rustls",
|
||||||
"serde",
|
"serde",
|
||||||
"serde_json",
|
"serde_json",
|
||||||
"sha1",
|
"sha1",
|
||||||
"sha2",
|
"sha2",
|
||||||
"socket2 0.5.10",
|
"socket2 0.5.10",
|
||||||
"thiserror",
|
"thiserror 2.0.18",
|
||||||
"tokio",
|
"tokio",
|
||||||
|
"tokio-rustls",
|
||||||
"tokio-test",
|
"tokio-test",
|
||||||
"tokio-util",
|
"tokio-util",
|
||||||
"toml",
|
"toml",
|
||||||
"tracing",
|
"tracing",
|
||||||
"tracing-subscriber",
|
"tracing-subscriber",
|
||||||
"url",
|
"url",
|
||||||
|
"webpki-roots 0.26.11",
|
||||||
|
"x509-parser",
|
||||||
"zeroize",
|
"zeroize",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -1827,13 +2154,33 @@ dependencies = [
|
|||||||
"windows-sys 0.61.2",
|
"windows-sys 0.61.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52"
|
||||||
|
dependencies = [
|
||||||
|
"thiserror-impl 1.0.69",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "thiserror"
|
name = "thiserror"
|
||||||
version = "2.0.18"
|
version = "2.0.18"
|
||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
|
checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"thiserror-impl",
|
"thiserror-impl 2.0.18",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "thiserror-impl"
|
||||||
|
version = "1.0.69"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
|
||||||
|
dependencies = [
|
||||||
|
"proc-macro2",
|
||||||
|
"quote",
|
||||||
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1844,7 +2191,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -1856,6 +2203,37 @@ dependencies = [
|
|||||||
"cfg-if",
|
"cfg-if",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "time"
|
||||||
|
version = "0.3.47"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c"
|
||||||
|
dependencies = [
|
||||||
|
"deranged",
|
||||||
|
"itoa",
|
||||||
|
"num-conv",
|
||||||
|
"powerfmt",
|
||||||
|
"serde_core",
|
||||||
|
"time-core",
|
||||||
|
"time-macros",
|
||||||
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "time-core"
|
||||||
|
version = "0.1.8"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "time-macros"
|
||||||
|
version = "0.2.27"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215"
|
||||||
|
dependencies = [
|
||||||
|
"num-conv",
|
||||||
|
"time-core",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "tinystr"
|
name = "tinystr"
|
||||||
version = "0.8.2"
|
version = "0.8.2"
|
||||||
@@ -1899,7 +2277,7 @@ checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"bytes",
|
"bytes",
|
||||||
"libc",
|
"libc",
|
||||||
"mio",
|
"mio 1.1.1",
|
||||||
"parking_lot",
|
"parking_lot",
|
||||||
"pin-project-lite",
|
"pin-project-lite",
|
||||||
"signal-hook-registry",
|
"signal-hook-registry",
|
||||||
@@ -1917,7 +2295,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2031,7 +2409,7 @@ version = "0.6.8"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
|
checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
"bytes",
|
"bytes",
|
||||||
"futures-util",
|
"futures-util",
|
||||||
"http",
|
"http",
|
||||||
@@ -2074,7 +2452,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2280,7 +2658,7 @@ dependencies = [
|
|||||||
"bumpalo",
|
"bumpalo",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
"wasm-bindgen-shared",
|
"wasm-bindgen-shared",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -2321,7 +2699,7 @@ version = "0.244.0"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
|
checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
"hashbrown 0.15.5",
|
"hashbrown 0.15.5",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"semver",
|
"semver",
|
||||||
@@ -2347,6 +2725,15 @@ dependencies = [
|
|||||||
"wasm-bindgen",
|
"wasm-bindgen",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "webpki-roots"
|
||||||
|
version = "0.26.11"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
|
||||||
|
dependencies = [
|
||||||
|
"webpki-roots 1.0.6",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "webpki-roots"
|
name = "webpki-roots"
|
||||||
version = "1.0.6"
|
version = "1.0.6"
|
||||||
@@ -2386,7 +2773,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2397,7 +2784,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2424,6 +2811,15 @@ dependencies = [
|
|||||||
"windows-link",
|
"windows-link",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-sys"
|
||||||
|
version = "0.48.0"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||||
|
dependencies = [
|
||||||
|
"windows-targets 0.48.5",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows-sys"
|
name = "windows-sys"
|
||||||
version = "0.52.0"
|
version = "0.52.0"
|
||||||
@@ -2451,6 +2847,21 @@ dependencies = [
|
|||||||
"windows-link",
|
"windows-link",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows-targets"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
|
||||||
|
dependencies = [
|
||||||
|
"windows_aarch64_gnullvm 0.48.5",
|
||||||
|
"windows_aarch64_msvc 0.48.5",
|
||||||
|
"windows_i686_gnu 0.48.5",
|
||||||
|
"windows_i686_msvc 0.48.5",
|
||||||
|
"windows_x86_64_gnu 0.48.5",
|
||||||
|
"windows_x86_64_gnullvm 0.48.5",
|
||||||
|
"windows_x86_64_msvc 0.48.5",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows-targets"
|
name = "windows-targets"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2484,6 +2895,12 @@ dependencies = [
|
|||||||
"windows_x86_64_msvc 0.53.1",
|
"windows_x86_64_msvc 0.53.1",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_gnullvm"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_aarch64_gnullvm"
|
name = "windows_aarch64_gnullvm"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2496,6 +2913,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
|
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_aarch64_msvc"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_aarch64_msvc"
|
name = "windows_aarch64_msvc"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2508,6 +2931,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
|
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_gnu"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_i686_gnu"
|
name = "windows_i686_gnu"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2532,6 +2961,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
|
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_i686_msvc"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_i686_msvc"
|
name = "windows_i686_msvc"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2544,6 +2979,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
|
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnu"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_x86_64_gnu"
|
name = "windows_x86_64_gnu"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2556,6 +2997,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
|
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_gnullvm"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_x86_64_gnullvm"
|
name = "windows_x86_64_gnullvm"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2568,6 +3015,12 @@ version = "0.53.1"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
|
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "windows_x86_64_msvc"
|
||||||
|
version = "0.48.5"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "windows_x86_64_msvc"
|
name = "windows_x86_64_msvc"
|
||||||
version = "0.52.6"
|
version = "0.52.6"
|
||||||
@@ -2619,7 +3072,7 @@ dependencies = [
|
|||||||
"heck",
|
"heck",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"prettyplease",
|
"prettyplease",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
"wasm-metadata",
|
"wasm-metadata",
|
||||||
"wit-bindgen-core",
|
"wit-bindgen-core",
|
||||||
"wit-component",
|
"wit-component",
|
||||||
@@ -2635,7 +3088,7 @@ dependencies = [
|
|||||||
"prettyplease",
|
"prettyplease",
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
"wit-bindgen-core",
|
"wit-bindgen-core",
|
||||||
"wit-bindgen-rust",
|
"wit-bindgen-rust",
|
||||||
]
|
]
|
||||||
@@ -2647,7 +3100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
|||||||
checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
|
checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"anyhow",
|
"anyhow",
|
||||||
"bitflags",
|
"bitflags 2.10.0",
|
||||||
"indexmap",
|
"indexmap",
|
||||||
"log",
|
"log",
|
||||||
"serde",
|
"serde",
|
||||||
@@ -2683,6 +3136,23 @@ version = "0.6.2"
|
|||||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
|
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
|
||||||
|
|
||||||
|
[[package]]
|
||||||
|
name = "x509-parser"
|
||||||
|
version = "0.15.1"
|
||||||
|
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||||
|
checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da"
|
||||||
|
dependencies = [
|
||||||
|
"asn1-rs",
|
||||||
|
"data-encoding",
|
||||||
|
"der-parser",
|
||||||
|
"lazy_static",
|
||||||
|
"nom",
|
||||||
|
"oid-registry",
|
||||||
|
"rusticata-macros",
|
||||||
|
"thiserror 1.0.69",
|
||||||
|
"time",
|
||||||
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
name = "yoke"
|
name = "yoke"
|
||||||
version = "0.8.1"
|
version = "0.8.1"
|
||||||
@@ -2702,8 +3172,8 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
"synstructure",
|
"synstructure 0.13.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2723,7 +3193,7 @@ checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2743,8 +3213,8 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
"synstructure",
|
"synstructure 0.13.2",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2764,7 +3234,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
@@ -2797,7 +3267,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
|
|||||||
dependencies = [
|
dependencies = [
|
||||||
"proc-macro2",
|
"proc-macro2",
|
||||||
"quote",
|
"quote",
|
||||||
"syn",
|
"syn 2.0.114",
|
||||||
]
|
]
|
||||||
|
|
||||||
[[package]]
|
[[package]]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.0.10"
|
version = "3.0.13"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
# ==========================
|
# ==========================
|
||||||
# Stage 1: Build
|
# Stage 1: Build
|
||||||
# ==========================
|
# ==========================
|
||||||
FROM rust:1.85-slim-bookworm AS builder
|
FROM rust:1.88-slim-bookworm AS builder
|
||||||
|
|
||||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||||
pkg-config \
|
pkg-config \
|
||||||
@@ -40,4 +40,4 @@ EXPOSE 443
|
|||||||
EXPOSE 9090
|
EXPOSE 9090
|
||||||
|
|
||||||
ENTRYPOINT ["/app/telemt"]
|
ENTRYPOINT ["/app/telemt"]
|
||||||
CMD ["config.toml"]
|
CMD ["config.toml"]
|
||||||
|
|||||||
@@ -31,7 +31,7 @@
|
|||||||
- Улучшение обработки ошибок в edge-case транспортных сценариях
|
- Улучшение обработки ошибок в edge-case транспортных сценариях
|
||||||
|
|
||||||
Релиз:
|
Релиз:
|
||||||
[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
|
[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
@@ -69,7 +69,7 @@ Additionally, we implemented a set of robustness enhancements designed to:
|
|||||||
- Improve error handling in edge-case transport scenarios
|
- Improve error handling in edge-case transport scenarios
|
||||||
|
|
||||||
Release:
|
Release:
|
||||||
[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9)
|
[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12)
|
||||||
|
|
||||||
---
|
---
|
||||||
|
|
||||||
|
|||||||
@@ -38,10 +38,17 @@ me_warmup_stagger_enabled = true
|
|||||||
me_warmup_step_delay_ms = 500 # Base delay between extra connects
|
me_warmup_step_delay_ms = 500 # Base delay between extra connects
|
||||||
me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
|
me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
|
||||||
# Reconnect policy knobs.
|
# Reconnect policy knobs.
|
||||||
me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
|
me_reconnect_max_concurrent_per_dc = 4 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
|
||||||
me_reconnect_backoff_base_ms = 500 # Backoff start
|
me_reconnect_backoff_base_ms = 500 # Backoff start
|
||||||
me_reconnect_backoff_cap_ms = 30000 # Backoff cap
|
me_reconnect_backoff_cap_ms = 30000 # Backoff cap
|
||||||
me_reconnect_fast_retry_count = 11 # Quick retries before backoff
|
me_reconnect_fast_retry_count = 11 # Quick retries before backoff
|
||||||
|
update_every = 7200 # Resolve the active updater interval for ME infrastructure refresh tasks.
|
||||||
|
crypto_pending_buffer = 262144 # Max pending ciphertext buffer per client writer (bytes). Controls FakeTLS backpressure vs throughput.
|
||||||
|
max_client_frame = 16777216 # Maximum allowed client MTProto frame size (bytes).
|
||||||
|
desync_all_full = false # Emit full crypto-desync forensic logs for every event. When false, full forensic details are emitted once per key window.
|
||||||
|
me_reinit_drain_timeout_secs = 300 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close).
|
||||||
|
auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC.
|
||||||
|
degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading.
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
|
|||||||
@@ -194,6 +194,12 @@ prefer_ipv6 = false
|
|||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = false
|
use_middle_proxy = false
|
||||||
log_level = "normal"
|
log_level = "normal"
|
||||||
|
desync_all_full = false
|
||||||
|
update_every = 43200
|
||||||
|
hardswap = false
|
||||||
|
me_pool_drain_ttl_secs = 90
|
||||||
|
me_pool_min_fresh_ratio = 0.8
|
||||||
|
me_reinit_drain_timeout_secs = 120
|
||||||
|
|
||||||
[network]
|
[network]
|
||||||
ipv4 = true
|
ipv4 = true
|
||||||
@@ -229,6 +235,7 @@ tls_domain = "{domain}"
|
|||||||
mask = true
|
mask = true
|
||||||
mask_port = 443
|
mask_port = 443
|
||||||
fake_cert_len = 2048
|
fake_cert_len = 2048
|
||||||
|
tls_full_cert_ttl_secs = 90
|
||||||
|
|
||||||
[access]
|
[access]
|
||||||
replay_check_len = 65536
|
replay_check_len = 65536
|
||||||
|
|||||||
@@ -1,4 +1,3 @@
|
|||||||
use std::net::IpAddr;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use ipnetwork::IpNetwork;
|
use ipnetwork::IpNetwork;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
@@ -83,7 +82,7 @@ pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_pool_size() -> usize {
|
pub(crate) fn default_pool_size() -> usize {
|
||||||
2
|
8
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||||
@@ -118,10 +117,18 @@ pub(crate) fn default_max_client_frame() -> usize {
|
|||||||
16 * 1024 * 1024
|
16 * 1024 * 1024
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_desync_all_full() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
||||||
|
90
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
@@ -136,10 +143,18 @@ pub(crate) fn default_alpn_enforce() -> bool {
|
|||||||
|
|
||||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||||
vec![
|
vec![
|
||||||
|
"stun.l.google.com:5349".to_string(),
|
||||||
|
"stun1.l.google.com:3478".to_string(),
|
||||||
|
"stun.gmx.net:3478".to_string(),
|
||||||
"stun.l.google.com:19302".to_string(),
|
"stun.l.google.com:19302".to_string(),
|
||||||
|
"stun.1und1.de:3478".to_string(),
|
||||||
"stun1.l.google.com:19302".to_string(),
|
"stun1.l.google.com:19302".to_string(),
|
||||||
"stun2.l.google.com:19302".to_string(),
|
"stun2.l.google.com:19302".to_string(),
|
||||||
|
"stun3.l.google.com:19302".to_string(),
|
||||||
|
"stun4.l.google.com:19302".to_string(),
|
||||||
|
"stun.services.mozilla.com:3478".to_string(),
|
||||||
"stun.stunprotocol.org:3478".to_string(),
|
"stun.stunprotocol.org:3478".to_string(),
|
||||||
|
"stun.nextcloud.com:3478".to_string(),
|
||||||
"stun.voip.eutelia.it:3478".to_string(),
|
"stun.voip.eutelia.it:3478".to_string(),
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@@ -156,11 +171,31 @@ pub(crate) fn default_cache_public_ip_path() -> String {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
pub(crate) fn default_proxy_secret_reload_secs() -> u64 {
|
||||||
12 * 60 * 60
|
1 * 60 * 60
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
pub(crate) fn default_proxy_config_reload_secs() -> u64 {
|
||||||
12 * 60 * 60
|
1 * 60 * 60
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_update_every_secs() -> u64 {
|
||||||
|
1 * 30 * 60
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||||
|
120
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||||
|
90
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 {
|
||||||
|
0.8
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_hardswap() -> bool {
|
||||||
|
true
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn default_ntp_check() -> bool {
|
pub(crate) fn default_ntp_check() -> bool {
|
||||||
|
|||||||
@@ -10,6 +10,12 @@
|
|||||||
//! | `general` | `ad_tag` | Passed on next connection |
|
//! | `general` | `ad_tag` | Passed on next connection |
|
||||||
//! | `general` | `middle_proxy_pool_size` | Passed on next connection |
|
//! | `general` | `middle_proxy_pool_size` | Passed on next connection |
|
||||||
//! | `general` | `me_keepalive_*` | Passed on next connection |
|
//! | `general` | `me_keepalive_*` | Passed on next connection |
|
||||||
|
//! | `general` | `desync_all_full` | Applied immediately |
|
||||||
|
//! | `general` | `update_every` | Applied to ME updater immediately |
|
||||||
|
//! | `general` | `hardswap` | Applied on next ME map update |
|
||||||
|
//! | `general` | `me_pool_drain_ttl_secs` | Applied on next ME map update |
|
||||||
|
//! | `general` | `me_pool_min_fresh_ratio` | Applied on next ME map update |
|
||||||
|
//! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update |
|
||||||
//! | `access` | All user/quota fields | Effective immediately |
|
//! | `access` | All user/quota fields | Effective immediately |
|
||||||
//!
|
//!
|
||||||
//! Fields that require re-binding sockets (`server.port`, `censorship.*`,
|
//! Fields that require re-binding sockets (`server.port`, `censorship.*`,
|
||||||
@@ -34,6 +40,12 @@ pub struct HotFields {
|
|||||||
pub log_level: LogLevel,
|
pub log_level: LogLevel,
|
||||||
pub ad_tag: Option<String>,
|
pub ad_tag: Option<String>,
|
||||||
pub middle_proxy_pool_size: usize,
|
pub middle_proxy_pool_size: usize,
|
||||||
|
pub desync_all_full: bool,
|
||||||
|
pub update_every_secs: u64,
|
||||||
|
pub hardswap: bool,
|
||||||
|
pub me_pool_drain_ttl_secs: u64,
|
||||||
|
pub me_pool_min_fresh_ratio: f32,
|
||||||
|
pub me_reinit_drain_timeout_secs: u64,
|
||||||
pub me_keepalive_enabled: bool,
|
pub me_keepalive_enabled: bool,
|
||||||
pub me_keepalive_interval_secs: u64,
|
pub me_keepalive_interval_secs: u64,
|
||||||
pub me_keepalive_jitter_secs: u64,
|
pub me_keepalive_jitter_secs: u64,
|
||||||
@@ -47,6 +59,12 @@ impl HotFields {
|
|||||||
log_level: cfg.general.log_level.clone(),
|
log_level: cfg.general.log_level.clone(),
|
||||||
ad_tag: cfg.general.ad_tag.clone(),
|
ad_tag: cfg.general.ad_tag.clone(),
|
||||||
middle_proxy_pool_size: cfg.general.middle_proxy_pool_size,
|
middle_proxy_pool_size: cfg.general.middle_proxy_pool_size,
|
||||||
|
desync_all_full: cfg.general.desync_all_full,
|
||||||
|
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||||
|
hardswap: cfg.general.hardswap,
|
||||||
|
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
|
||||||
|
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
|
||||||
me_keepalive_enabled: cfg.general.me_keepalive_enabled,
|
me_keepalive_enabled: cfg.general.me_keepalive_enabled,
|
||||||
me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs,
|
me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs,
|
||||||
me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs,
|
me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs,
|
||||||
@@ -175,6 +193,48 @@ fn log_changes(
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if old_hot.desync_all_full != new_hot.desync_all_full {
|
||||||
|
info!(
|
||||||
|
"config reload: desync_all_full: {} → {}",
|
||||||
|
old_hot.desync_all_full, new_hot.desync_all_full,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if old_hot.update_every_secs != new_hot.update_every_secs {
|
||||||
|
info!(
|
||||||
|
"config reload: update_every(effective): {}s → {}s",
|
||||||
|
old_hot.update_every_secs, new_hot.update_every_secs,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if old_hot.hardswap != new_hot.hardswap {
|
||||||
|
info!(
|
||||||
|
"config reload: hardswap: {} → {}",
|
||||||
|
old_hot.hardswap, new_hot.hardswap,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if old_hot.me_pool_drain_ttl_secs != new_hot.me_pool_drain_ttl_secs {
|
||||||
|
info!(
|
||||||
|
"config reload: me_pool_drain_ttl_secs: {}s → {}s",
|
||||||
|
old_hot.me_pool_drain_ttl_secs, new_hot.me_pool_drain_ttl_secs,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
|
||||||
|
info!(
|
||||||
|
"config reload: me_pool_min_fresh_ratio: {:.3} → {:.3}",
|
||||||
|
old_hot.me_pool_min_fresh_ratio, new_hot.me_pool_min_fresh_ratio,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
if old_hot.me_reinit_drain_timeout_secs != new_hot.me_reinit_drain_timeout_secs {
|
||||||
|
info!(
|
||||||
|
"config reload: me_reinit_drain_timeout_secs: {}s → {}s",
|
||||||
|
old_hot.me_reinit_drain_timeout_secs, new_hot.me_reinit_drain_timeout_secs,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled
|
if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled
|
||||||
|| old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs
|
|| old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs
|
||||||
|| old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs
|
|| old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs
|
||||||
|
|||||||
@@ -117,6 +117,52 @@ impl ProxyConfig {
|
|||||||
let mut config: ProxyConfig =
|
let mut config: ProxyConfig =
|
||||||
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
|
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
|
||||||
|
if let Some(update_every) = config.general.update_every {
|
||||||
|
if update_every == 0 {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"general.update_every must be > 0".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let legacy_secret = config.general.proxy_secret_auto_reload_secs;
|
||||||
|
let legacy_config = config.general.proxy_config_auto_reload_secs;
|
||||||
|
let effective = legacy_secret.min(legacy_config);
|
||||||
|
if effective == 0 {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"legacy proxy_*_auto_reload_secs values must be > 0 when general.update_every is not set".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if legacy_secret != default_proxy_secret_reload_secs()
|
||||||
|
|| legacy_config != default_proxy_config_reload_secs()
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
proxy_secret_auto_reload_secs = legacy_secret,
|
||||||
|
proxy_config_auto_reload_secs = legacy_config,
|
||||||
|
effective_update_every_secs = effective,
|
||||||
|
"proxy_*_auto_reload_secs are deprecated; set general.update_every"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !(0.0..=1.0).contains(&config.general.me_pool_min_fresh_ratio) {
|
||||||
|
return Err(ProxyError::Config(
|
||||||
|
"general.me_pool_min_fresh_ratio must be within [0.0, 1.0]".to_string(),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.general.effective_me_pool_force_close_secs() > 0
|
||||||
|
&& config.general.effective_me_pool_force_close_secs()
|
||||||
|
< config.general.me_pool_drain_ttl_secs
|
||||||
|
{
|
||||||
|
warn!(
|
||||||
|
me_pool_drain_ttl_secs = config.general.me_pool_drain_ttl_secs,
|
||||||
|
me_reinit_drain_timeout_secs = config.general.effective_me_pool_force_close_secs(),
|
||||||
|
"force-close timeout is lower than drain TTL; bumping force-close timeout to TTL"
|
||||||
|
);
|
||||||
|
config.general.me_reinit_drain_timeout_secs = config.general.me_pool_drain_ttl_secs;
|
||||||
|
}
|
||||||
|
|
||||||
// Validate secrets.
|
// Validate secrets.
|
||||||
for (user, secret) in &config.access.users {
|
for (user, secret) in &config.access.users {
|
||||||
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 {
|
||||||
@@ -347,4 +393,109 @@ mod tests {
|
|||||||
.unwrap_or(false));
|
.unwrap_or(false));
|
||||||
let _ = std::fs::remove_file(path);
|
let _ = std::fs::remove_file(path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn update_every_overrides_legacy_fields() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
update_every = 123
|
||||||
|
proxy_secret_auto_reload_secs = 700
|
||||||
|
proxy_config_auto_reload_secs = 800
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_update_every_override_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let cfg = ProxyConfig::load(&path).unwrap();
|
||||||
|
assert_eq!(cfg.general.effective_update_every_secs(), 123);
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn update_every_fallback_to_legacy_min() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
proxy_secret_auto_reload_secs = 600
|
||||||
|
proxy_config_auto_reload_secs = 120
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_update_every_legacy_min_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let cfg = ProxyConfig::load(&path).unwrap();
|
||||||
|
assert_eq!(cfg.general.update_every, None);
|
||||||
|
assert_eq!(cfg.general.effective_update_every_secs(), 120);
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn update_every_zero_is_rejected() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
update_every = 0
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_update_every_zero_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||||
|
assert!(err.contains("general.update_every must be > 0"));
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn me_pool_min_fresh_ratio_out_of_range_is_rejected() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
me_pool_min_fresh_ratio = 1.5
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_me_pool_min_ratio_invalid_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||||
|
assert!(err.contains("general.me_pool_min_fresh_ratio must be within [0.0, 1.0]"));
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn force_close_bumped_when_below_drain_ttl() {
|
||||||
|
let toml = r#"
|
||||||
|
[general]
|
||||||
|
me_pool_drain_ttl_secs = 90
|
||||||
|
me_reinit_drain_timeout_secs = 30
|
||||||
|
|
||||||
|
[censorship]
|
||||||
|
tls_domain = "example.com"
|
||||||
|
|
||||||
|
[access.users]
|
||||||
|
user = "00000000000000000000000000000000"
|
||||||
|
"#;
|
||||||
|
let dir = std::env::temp_dir();
|
||||||
|
let path = dir.join("telemt_force_close_bump_test.toml");
|
||||||
|
std::fs::write(&path, toml).unwrap();
|
||||||
|
let cfg = ProxyConfig::load(&path).unwrap();
|
||||||
|
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90);
|
||||||
|
let _ = std::fs::remove_file(path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -201,6 +201,16 @@ pub struct GeneralConfig {
|
|||||||
#[serde(default = "default_max_client_frame")]
|
#[serde(default = "default_max_client_frame")]
|
||||||
pub max_client_frame: usize,
|
pub max_client_frame: usize,
|
||||||
|
|
||||||
|
/// Emit full crypto-desync forensic logs for every event.
|
||||||
|
/// When false, full forensic details are emitted once per key window.
|
||||||
|
#[serde(default = "default_desync_all_full")]
|
||||||
|
pub desync_all_full: bool,
|
||||||
|
|
||||||
|
/// Enable C-like hard-swap for ME pool generations.
|
||||||
|
/// When true, Telemt prewarms a new generation and switches once full coverage is reached.
|
||||||
|
#[serde(default = "default_hardswap")]
|
||||||
|
pub hardswap: bool,
|
||||||
|
|
||||||
/// Enable staggered warmup of extra ME writers.
|
/// Enable staggered warmup of extra ME writers.
|
||||||
#[serde(default = "default_true")]
|
#[serde(default = "default_true")]
|
||||||
pub me_warmup_stagger_enabled: bool,
|
pub me_warmup_stagger_enabled: bool,
|
||||||
@@ -252,11 +262,33 @@ pub struct GeneralConfig {
|
|||||||
#[serde(default = "default_fast_mode_min_tls_record")]
|
#[serde(default = "default_fast_mode_min_tls_record")]
|
||||||
pub fast_mode_min_tls_record: usize,
|
pub fast_mode_min_tls_record: usize,
|
||||||
|
|
||||||
/// Automatically reload proxy-secret every N seconds.
|
/// Unified ME updater interval in seconds for getProxyConfig/getProxyConfigV6/getProxySecret.
|
||||||
|
/// When omitted, effective value falls back to legacy proxy_*_auto_reload_secs fields.
|
||||||
|
#[serde(default)]
|
||||||
|
pub update_every: Option<u64>,
|
||||||
|
|
||||||
|
/// Drain-TTL in seconds for stale ME writers after endpoint map changes.
|
||||||
|
/// During TTL, stale writers may be used only as fallback for new bindings.
|
||||||
|
#[serde(default = "default_me_pool_drain_ttl_secs")]
|
||||||
|
pub me_pool_drain_ttl_secs: u64,
|
||||||
|
|
||||||
|
/// Minimum desired-DC coverage ratio required before draining stale writers.
|
||||||
|
/// Range: 0.0..=1.0.
|
||||||
|
#[serde(default = "default_me_pool_min_fresh_ratio")]
|
||||||
|
pub me_pool_min_fresh_ratio: f32,
|
||||||
|
|
||||||
|
/// Drain timeout in seconds for stale ME writers after endpoint map changes.
|
||||||
|
/// Set to 0 to keep stale writers draining indefinitely (no force-close).
|
||||||
|
#[serde(default = "default_me_reinit_drain_timeout_secs")]
|
||||||
|
pub me_reinit_drain_timeout_secs: u64,
|
||||||
|
|
||||||
|
/// Deprecated legacy setting; kept for backward compatibility fallback.
|
||||||
|
/// Use `update_every` instead.
|
||||||
#[serde(default = "default_proxy_secret_reload_secs")]
|
#[serde(default = "default_proxy_secret_reload_secs")]
|
||||||
pub proxy_secret_auto_reload_secs: u64,
|
pub proxy_secret_auto_reload_secs: u64,
|
||||||
|
|
||||||
/// Automatically reload proxy-multi.conf every N seconds.
|
/// Deprecated legacy setting; kept for backward compatibility fallback.
|
||||||
|
/// Use `update_every` instead.
|
||||||
#[serde(default = "default_proxy_config_reload_secs")]
|
#[serde(default = "default_proxy_config_reload_secs")]
|
||||||
pub proxy_config_auto_reload_secs: u64,
|
pub proxy_config_auto_reload_secs: u64,
|
||||||
|
|
||||||
@@ -291,7 +323,7 @@ impl Default for GeneralConfig {
|
|||||||
middle_proxy_nat_stun: None,
|
middle_proxy_nat_stun: None,
|
||||||
middle_proxy_nat_stun_servers: Vec::new(),
|
middle_proxy_nat_stun_servers: Vec::new(),
|
||||||
middle_proxy_pool_size: default_pool_size(),
|
middle_proxy_pool_size: default_pool_size(),
|
||||||
middle_proxy_warm_standby: 8,
|
middle_proxy_warm_standby: 16,
|
||||||
me_keepalive_enabled: true,
|
me_keepalive_enabled: true,
|
||||||
me_keepalive_interval_secs: default_keepalive_interval(),
|
me_keepalive_interval_secs: default_keepalive_interval(),
|
||||||
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
||||||
@@ -299,7 +331,7 @@ impl Default for GeneralConfig {
|
|||||||
me_warmup_stagger_enabled: true,
|
me_warmup_stagger_enabled: true,
|
||||||
me_warmup_step_delay_ms: default_warmup_step_delay_ms(),
|
me_warmup_step_delay_ms: default_warmup_step_delay_ms(),
|
||||||
me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(),
|
me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(),
|
||||||
me_reconnect_max_concurrent_per_dc: 4,
|
me_reconnect_max_concurrent_per_dc: 8,
|
||||||
me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(),
|
me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(),
|
||||||
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
|
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
|
||||||
me_reconnect_fast_retry_count: 8,
|
me_reconnect_fast_retry_count: 8,
|
||||||
@@ -310,7 +342,13 @@ impl Default for GeneralConfig {
|
|||||||
links: LinksConfig::default(),
|
links: LinksConfig::default(),
|
||||||
crypto_pending_buffer: default_crypto_pending_buffer(),
|
crypto_pending_buffer: default_crypto_pending_buffer(),
|
||||||
max_client_frame: default_max_client_frame(),
|
max_client_frame: default_max_client_frame(),
|
||||||
|
desync_all_full: default_desync_all_full(),
|
||||||
|
hardswap: default_hardswap(),
|
||||||
fast_mode_min_tls_record: default_fast_mode_min_tls_record(),
|
fast_mode_min_tls_record: default_fast_mode_min_tls_record(),
|
||||||
|
update_every: Some(default_update_every_secs()),
|
||||||
|
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
|
||||||
|
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
|
||||||
|
me_reinit_drain_timeout_secs: default_me_reinit_drain_timeout_secs(),
|
||||||
proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(),
|
proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(),
|
||||||
proxy_config_auto_reload_secs: default_proxy_config_reload_secs(),
|
proxy_config_auto_reload_secs: default_proxy_config_reload_secs(),
|
||||||
ntp_check: default_ntp_check(),
|
ntp_check: default_ntp_check(),
|
||||||
@@ -321,6 +359,21 @@ impl Default for GeneralConfig {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl GeneralConfig {
|
||||||
|
/// Resolve the active updater interval for ME infrastructure refresh tasks.
|
||||||
|
/// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used.
|
||||||
|
pub fn effective_update_every_secs(&self) -> u64 {
|
||||||
|
self.update_every
|
||||||
|
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Resolve force-close timeout for stale writers.
|
||||||
|
/// `me_reinit_drain_timeout_secs` remains backward-compatible alias.
|
||||||
|
pub fn effective_me_pool_force_close_secs(&self) -> u64 {
|
||||||
|
self.me_reinit_drain_timeout_secs
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// `[general.links]` — proxy link generation settings.
|
/// `[general.links]` — proxy link generation settings.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||||
pub struct LinksConfig {
|
pub struct LinksConfig {
|
||||||
@@ -474,6 +527,12 @@ pub struct AntiCensorshipConfig {
|
|||||||
#[serde(default = "default_tls_new_session_tickets")]
|
#[serde(default = "default_tls_new_session_tickets")]
|
||||||
pub tls_new_session_tickets: u8,
|
pub tls_new_session_tickets: u8,
|
||||||
|
|
||||||
|
/// TTL in seconds for sending full certificate payload per client IP.
|
||||||
|
/// First client connection per (SNI domain, client IP) gets full cert payload.
|
||||||
|
/// Subsequent handshakes within TTL use compact cert metadata payload.
|
||||||
|
#[serde(default = "default_tls_full_cert_ttl_secs")]
|
||||||
|
pub tls_full_cert_ttl_secs: u64,
|
||||||
|
|
||||||
/// Enforce ALPN echo of client preference.
|
/// Enforce ALPN echo of client preference.
|
||||||
#[serde(default = "default_alpn_enforce")]
|
#[serde(default = "default_alpn_enforce")]
|
||||||
pub alpn_enforce: bool,
|
pub alpn_enforce: bool,
|
||||||
@@ -494,6 +553,7 @@ impl Default for AntiCensorshipConfig {
|
|||||||
server_hello_delay_min_ms: default_server_hello_delay_min_ms(),
|
server_hello_delay_min_ms: default_server_hello_delay_min_ms(),
|
||||||
server_hello_delay_max_ms: default_server_hello_delay_max_ms(),
|
server_hello_delay_max_ms: default_server_hello_delay_max_ms(),
|
||||||
tls_new_session_tickets: default_tls_new_session_tickets(),
|
tls_new_session_tickets: default_tls_new_session_tickets(),
|
||||||
|
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
|
||||||
alpn_enforce: default_alpn_enforce(),
|
alpn_enforce: default_alpn_enforce(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
30
src/main.rs
30
src/main.rs
@@ -362,6 +362,10 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
config.general.me_reconnect_backoff_base_ms,
|
config.general.me_reconnect_backoff_base_ms,
|
||||||
config.general.me_reconnect_backoff_cap_ms,
|
config.general.me_reconnect_backoff_cap_ms,
|
||||||
config.general.me_reconnect_fast_retry_count,
|
config.general.me_reconnect_fast_retry_count,
|
||||||
|
config.general.hardswap,
|
||||||
|
config.general.me_pool_drain_ttl_secs,
|
||||||
|
config.general.effective_me_pool_force_close_secs(),
|
||||||
|
config.general.me_pool_min_fresh_ratio,
|
||||||
);
|
);
|
||||||
|
|
||||||
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||||
@@ -392,18 +396,6 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
.await;
|
.await;
|
||||||
});
|
});
|
||||||
|
|
||||||
// Periodic updater: getProxyConfig + proxy-secret
|
|
||||||
let pool_clone2 = pool.clone();
|
|
||||||
let rng_clone2 = rng.clone();
|
|
||||||
tokio::spawn(async move {
|
|
||||||
crate::transport::middle_proxy::me_config_updater(
|
|
||||||
pool_clone2,
|
|
||||||
rng_clone2,
|
|
||||||
std::time::Duration::from_secs(12 * 3600),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
});
|
|
||||||
|
|
||||||
Some(pool)
|
Some(pool)
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
@@ -702,6 +694,20 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
detected_ip_v6,
|
detected_ip_v6,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
if let Some(ref pool) = me_pool {
|
||||||
|
let pool_clone = pool.clone();
|
||||||
|
let rng_clone = rng.clone();
|
||||||
|
let config_rx_clone = config_rx.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
crate::transport::middle_proxy::me_config_updater(
|
||||||
|
pool_clone,
|
||||||
|
rng_clone,
|
||||||
|
config_rx_clone,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
let mut listeners = Vec::new();
|
let mut listeners = Vec::new();
|
||||||
|
|
||||||
for listener_conf in &config.server.listeners {
|
for listener_conf in &config.server.listeners {
|
||||||
|
|||||||
@@ -140,6 +140,65 @@ fn render_metrics(stats: &Stats) -> String {
|
|||||||
let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter");
|
let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter");
|
||||||
let _ = writeln!(out, "telemt_secure_padding_invalid_total {}", stats.get_secure_padding_invalid());
|
let _ = writeln!(out, "telemt_secure_padding_invalid_total {}", stats.get_secure_padding_invalid());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_desync_total Total crypto-desync detections");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_desync_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_desync_total {}", stats.get_desync_total());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_desync_full_logged_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_desync_full_logged_total {}", stats.get_desync_full_logged());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_desync_suppressed_total Suppressed desync forensic events");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_desync_suppressed_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_desync_suppressed_total {}", stats.get_desync_suppressed());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_desync_frames_bucket_total counter");
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_desync_frames_bucket_total{{bucket=\"0\"}} {}",
|
||||||
|
stats.get_desync_frames_bucket_0()
|
||||||
|
);
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_desync_frames_bucket_total{{bucket=\"1_2\"}} {}",
|
||||||
|
stats.get_desync_frames_bucket_1_2()
|
||||||
|
);
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_desync_frames_bucket_total{{bucket=\"3_10\"}} {}",
|
||||||
|
stats.get_desync_frames_bucket_3_10()
|
||||||
|
);
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_desync_frames_bucket_total{{bucket=\"gt_10\"}} {}",
|
||||||
|
stats.get_desync_frames_bucket_gt_10()
|
||||||
|
);
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_pool_swap_total Successful ME pool swaps");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_pool_swap_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_pool_swap_total {}", stats.get_pool_swap_total());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_pool_drain_active Active draining ME writers");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_pool_drain_active gauge");
|
||||||
|
let _ = writeln!(out, "telemt_pool_drain_active {}", stats.get_pool_drain_active());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_pool_force_close_total Forced close events for draining writers");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_pool_force_close_total counter");
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_pool_force_close_total {}",
|
||||||
|
stats.get_pool_force_close_total()
|
||||||
|
);
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_pool_stale_pick_total counter");
|
||||||
|
let _ = writeln!(
|
||||||
|
out,
|
||||||
|
"telemt_pool_stale_pick_total {}",
|
||||||
|
stats.get_pool_stale_pick_total()
|
||||||
|
);
|
||||||
|
|
||||||
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
||||||
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
||||||
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
||||||
|
|||||||
@@ -2,6 +2,7 @@
|
|||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
use tracing::{debug, warn, trace, info};
|
use tracing::{debug, warn, trace, info};
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
@@ -108,11 +109,23 @@ where
|
|||||||
|
|
||||||
let cached = if config.censorship.tls_emulation {
|
let cached = if config.censorship.tls_emulation {
|
||||||
if let Some(cache) = tls_cache.as_ref() {
|
if let Some(cache) = tls_cache.as_ref() {
|
||||||
if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
|
let selected_domain = if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
|
||||||
Some(cache.get(&sni).await)
|
if cache.contains_domain(&sni).await {
|
||||||
|
sni
|
||||||
|
} else {
|
||||||
|
config.censorship.tls_domain.clone()
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
Some(cache.get(&config.censorship.tls_domain).await)
|
config.censorship.tls_domain.clone()
|
||||||
}
|
};
|
||||||
|
let cached_entry = cache.get(&selected_domain).await;
|
||||||
|
let use_full_cert_payload = cache
|
||||||
|
.take_full_cert_budget_for_ip(
|
||||||
|
peer.ip(),
|
||||||
|
Duration::from_secs(config.censorship.tls_full_cert_ttl_secs),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
Some((cached_entry, use_full_cert_payload))
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
@@ -137,12 +150,13 @@ where
|
|||||||
None
|
None
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = if let Some(cached_entry) = cached {
|
let response = if let Some((cached_entry, use_full_cert_payload)) = cached {
|
||||||
emulator::build_emulated_server_hello(
|
emulator::build_emulated_server_hello(
|
||||||
secret,
|
secret,
|
||||||
&validation.digest,
|
&validation.digest,
|
||||||
&validation.session_id,
|
&validation.session_id,
|
||||||
&cached_entry,
|
&cached_entry,
|
||||||
|
use_full_cert_payload,
|
||||||
rng,
|
rng,
|
||||||
selected_alpn.clone(),
|
selected_alpn.clone(),
|
||||||
config.censorship.tls_new_session_tickets,
|
config.censorship.tls_new_session_tickets,
|
||||||
|
|||||||
@@ -1,5 +1,10 @@
|
|||||||
use std::net::SocketAddr;
|
use std::collections::HashMap;
|
||||||
use std::sync::Arc;
|
use std::collections::hash_map::DefaultHasher;
|
||||||
|
use std::hash::{Hash, Hasher};
|
||||||
|
use std::net::{IpAddr, SocketAddr};
|
||||||
|
use std::sync::atomic::{AtomicU64, Ordering};
|
||||||
|
use std::sync::{Arc, Mutex, OnceLock};
|
||||||
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use tokio::sync::{mpsc, oneshot};
|
use tokio::sync::{mpsc, oneshot};
|
||||||
@@ -19,6 +24,148 @@ enum C2MeCommand {
|
|||||||
Close,
|
Close,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60);
|
||||||
|
const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync";
|
||||||
|
static DESYNC_DEDUP: OnceLock<Mutex<HashMap<u64, Instant>>> = OnceLock::new();
|
||||||
|
|
||||||
|
struct RelayForensicsState {
|
||||||
|
trace_id: u64,
|
||||||
|
conn_id: u64,
|
||||||
|
user: String,
|
||||||
|
peer: SocketAddr,
|
||||||
|
peer_hash: u64,
|
||||||
|
started_at: Instant,
|
||||||
|
bytes_c2me: u64,
|
||||||
|
bytes_me2c: Arc<AtomicU64>,
|
||||||
|
desync_all_full: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_value<T: Hash>(value: &T) -> u64 {
|
||||||
|
let mut hasher = DefaultHasher::new();
|
||||||
|
value.hash(&mut hasher);
|
||||||
|
hasher.finish()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn hash_ip(ip: IpAddr) -> u64 {
|
||||||
|
hash_value(&ip)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool {
|
||||||
|
if all_full {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let dedup = DESYNC_DEDUP.get_or_init(|| Mutex::new(HashMap::new()));
|
||||||
|
let mut guard = dedup.lock().expect("desync dedup mutex poisoned");
|
||||||
|
guard.retain(|_, seen_at| now.duration_since(*seen_at) < DESYNC_DEDUP_WINDOW);
|
||||||
|
|
||||||
|
match guard.get_mut(&key) {
|
||||||
|
Some(seen_at) => {
|
||||||
|
if now.duration_since(*seen_at) >= DESYNC_DEDUP_WINDOW {
|
||||||
|
*seen_at = now;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
guard.insert(key, now);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn report_desync_frame_too_large(
|
||||||
|
state: &RelayForensicsState,
|
||||||
|
proto_tag: ProtoTag,
|
||||||
|
frame_counter: u64,
|
||||||
|
max_frame: usize,
|
||||||
|
len: usize,
|
||||||
|
raw_len_bytes: Option<[u8; 4]>,
|
||||||
|
stats: &Stats,
|
||||||
|
) -> ProxyError {
|
||||||
|
let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes());
|
||||||
|
let looks_like_tls = raw_len_bytes
|
||||||
|
.map(|b| b[0] == 0x16 && b[1] == 0x03)
|
||||||
|
.unwrap_or(false);
|
||||||
|
let looks_like_http = raw_len_bytes
|
||||||
|
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
||||||
|
.unwrap_or(false);
|
||||||
|
let now = Instant::now();
|
||||||
|
let dedup_key = hash_value(&(
|
||||||
|
state.user.as_str(),
|
||||||
|
state.peer_hash,
|
||||||
|
proto_tag,
|
||||||
|
DESYNC_ERROR_CLASS,
|
||||||
|
));
|
||||||
|
let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now);
|
||||||
|
let duration_ms = state.started_at.elapsed().as_millis() as u64;
|
||||||
|
let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed);
|
||||||
|
|
||||||
|
stats.increment_desync_total();
|
||||||
|
stats.observe_desync_frames_ok(frame_counter);
|
||||||
|
if emit_full {
|
||||||
|
stats.increment_desync_full_logged();
|
||||||
|
warn!(
|
||||||
|
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||||
|
conn_id = state.conn_id,
|
||||||
|
user = %state.user,
|
||||||
|
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||||
|
proto = ?proto_tag,
|
||||||
|
mode = "middle_proxy",
|
||||||
|
is_tls = true,
|
||||||
|
duration_ms,
|
||||||
|
bytes_c2me = state.bytes_c2me,
|
||||||
|
bytes_me2c,
|
||||||
|
raw_len = len,
|
||||||
|
raw_len_hex = format_args!("0x{:08x}", len),
|
||||||
|
raw_bytes = format_args!(
|
||||||
|
"{:02x} {:02x} {:02x} {:02x}",
|
||||||
|
len_buf[0], len_buf[1], len_buf[2], len_buf[3]
|
||||||
|
),
|
||||||
|
max_frame,
|
||||||
|
tls_like = looks_like_tls,
|
||||||
|
http_like = looks_like_http,
|
||||||
|
frames_ok = frame_counter,
|
||||||
|
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||||
|
desync_all_full = state.desync_all_full,
|
||||||
|
full_reason = if state.desync_all_full { "desync_all_full" } else { "first_in_dedup_window" },
|
||||||
|
error_class = DESYNC_ERROR_CLASS,
|
||||||
|
"Frame too large — crypto desync forensics"
|
||||||
|
);
|
||||||
|
debug!(
|
||||||
|
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||||
|
conn_id = state.conn_id,
|
||||||
|
user = %state.user,
|
||||||
|
peer = %state.peer,
|
||||||
|
"Frame too large forensic peer detail"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
stats.increment_desync_suppressed();
|
||||||
|
debug!(
|
||||||
|
trace_id = format_args!("0x{:016x}", state.trace_id),
|
||||||
|
conn_id = state.conn_id,
|
||||||
|
user = %state.user,
|
||||||
|
peer_hash = format_args!("0x{:016x}", state.peer_hash),
|
||||||
|
proto = ?proto_tag,
|
||||||
|
duration_ms,
|
||||||
|
bytes_c2me = state.bytes_c2me,
|
||||||
|
bytes_me2c,
|
||||||
|
raw_len = len,
|
||||||
|
frames_ok = frame_counter,
|
||||||
|
dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(),
|
||||||
|
error_class = DESYNC_ERROR_CLASS,
|
||||||
|
"Frame too large — crypto desync forensic suppressed"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
ProxyError::Proxy(format!(
|
||||||
|
"Frame too large: {len} (max {max_frame}), frames_ok={frame_counter}, conn_id={}, trace_id=0x{:016x}",
|
||||||
|
state.conn_id,
|
||||||
|
state.trace_id
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
pub(crate) async fn handle_via_middle_proxy<R, W>(
|
||||||
mut crypto_reader: CryptoReader<R>,
|
mut crypto_reader: CryptoReader<R>,
|
||||||
crypto_writer: CryptoWriter<W>,
|
crypto_writer: CryptoWriter<W>,
|
||||||
@@ -37,6 +184,7 @@ where
|
|||||||
let user = success.user.clone();
|
let user = success.user.clone();
|
||||||
let peer = success.peer;
|
let peer = success.peer;
|
||||||
let proto_tag = success.proto_tag;
|
let proto_tag = success.proto_tag;
|
||||||
|
let pool_generation = me_pool.current_generation();
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
user = %user,
|
user = %user,
|
||||||
@@ -44,19 +192,37 @@ where
|
|||||||
dc = success.dc_idx,
|
dc = success.dc_idx,
|
||||||
proto = ?proto_tag,
|
proto = ?proto_tag,
|
||||||
mode = "middle_proxy",
|
mode = "middle_proxy",
|
||||||
|
pool_generation,
|
||||||
"Routing via Middle-End"
|
"Routing via Middle-End"
|
||||||
);
|
);
|
||||||
|
|
||||||
let (conn_id, me_rx) = me_pool.registry().register().await;
|
let (conn_id, me_rx) = me_pool.registry().register().await;
|
||||||
|
let trace_id = conn_id;
|
||||||
|
let bytes_me2c = Arc::new(AtomicU64::new(0));
|
||||||
|
let mut forensics = RelayForensicsState {
|
||||||
|
trace_id,
|
||||||
|
conn_id,
|
||||||
|
user: user.clone(),
|
||||||
|
peer,
|
||||||
|
peer_hash: hash_ip(peer.ip()),
|
||||||
|
started_at: Instant::now(),
|
||||||
|
bytes_c2me: 0,
|
||||||
|
bytes_me2c: bytes_me2c.clone(),
|
||||||
|
desync_all_full: config.general.desync_all_full,
|
||||||
|
};
|
||||||
|
|
||||||
stats.increment_user_connects(&user);
|
stats.increment_user_connects(&user);
|
||||||
stats.increment_user_curr_connects(&user);
|
stats.increment_user_curr_connects(&user);
|
||||||
|
|
||||||
let proto_flags = proto_flags_for_tag(proto_tag, me_pool.has_proxy_tag());
|
let proto_flags = proto_flags_for_tag(proto_tag, me_pool.has_proxy_tag());
|
||||||
debug!(
|
debug!(
|
||||||
|
trace_id = format_args!("0x{:016x}", trace_id),
|
||||||
user = %user,
|
user = %user,
|
||||||
conn_id,
|
conn_id,
|
||||||
|
peer_hash = format_args!("0x{:016x}", forensics.peer_hash),
|
||||||
|
desync_all_full = forensics.desync_all_full,
|
||||||
proto_flags = format_args!("0x{:08x}", proto_flags),
|
proto_flags = format_args!("0x{:08x}", proto_flags),
|
||||||
|
pool_generation,
|
||||||
"ME relay started"
|
"ME relay started"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -93,6 +259,7 @@ where
|
|||||||
let stats_clone = stats.clone();
|
let stats_clone = stats.clone();
|
||||||
let rng_clone = rng.clone();
|
let rng_clone = rng.clone();
|
||||||
let user_clone = user.clone();
|
let user_clone = user.clone();
|
||||||
|
let bytes_me2c_clone = bytes_me2c.clone();
|
||||||
let me_writer = tokio::spawn(async move {
|
let me_writer = tokio::spawn(async move {
|
||||||
let mut writer = crypto_writer;
|
let mut writer = crypto_writer;
|
||||||
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
let mut frame_buf = Vec::with_capacity(16 * 1024);
|
||||||
@@ -102,6 +269,7 @@ where
|
|||||||
match msg {
|
match msg {
|
||||||
Some(MeResponse::Data { flags, data }) => {
|
Some(MeResponse::Data { flags, data }) => {
|
||||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
trace!(conn_id, bytes = data.len(), flags, "ME->C data");
|
||||||
|
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||||
write_client_payload(
|
write_client_payload(
|
||||||
&mut writer,
|
&mut writer,
|
||||||
@@ -118,6 +286,7 @@ where
|
|||||||
match next {
|
match next {
|
||||||
MeResponse::Data { flags, data } => {
|
MeResponse::Data { flags, data } => {
|
||||||
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)");
|
||||||
|
bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed);
|
||||||
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
stats_clone.add_user_octets_to(&user_clone, data.len() as u64);
|
||||||
write_client_payload(
|
write_client_payload(
|
||||||
&mut writer,
|
&mut writer,
|
||||||
@@ -173,12 +342,15 @@ where
|
|||||||
&mut crypto_reader,
|
&mut crypto_reader,
|
||||||
proto_tag,
|
proto_tag,
|
||||||
frame_limit,
|
frame_limit,
|
||||||
&user,
|
&forensics,
|
||||||
&mut frame_counter,
|
&mut frame_counter,
|
||||||
&stats,
|
&stats,
|
||||||
).await {
|
).await {
|
||||||
Ok(Some((payload, quickack))) => {
|
Ok(Some((payload, quickack))) => {
|
||||||
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
trace!(conn_id, bytes = payload.len(), "C->ME frame");
|
||||||
|
forensics.bytes_c2me = forensics
|
||||||
|
.bytes_c2me
|
||||||
|
.saturating_add(payload.len() as u64);
|
||||||
stats.add_user_octets_from(&user, payload.len() as u64);
|
stats.add_user_octets_from(&user, payload.len() as u64);
|
||||||
let mut flags = proto_flags;
|
let mut flags = proto_flags;
|
||||||
if quickack {
|
if quickack {
|
||||||
@@ -237,7 +409,16 @@ where
|
|||||||
(_, _, Err(e)) => Err(e),
|
(_, _, Err(e)) => Err(e),
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!(user = %user, conn_id, "ME relay cleanup");
|
debug!(
|
||||||
|
user = %user,
|
||||||
|
conn_id,
|
||||||
|
trace_id = format_args!("0x{:016x}", trace_id),
|
||||||
|
duration_ms = forensics.started_at.elapsed().as_millis() as u64,
|
||||||
|
bytes_c2me = forensics.bytes_c2me,
|
||||||
|
bytes_me2c = forensics.bytes_me2c.load(Ordering::Relaxed),
|
||||||
|
frames_ok = frame_counter,
|
||||||
|
"ME relay cleanup"
|
||||||
|
);
|
||||||
me_pool.registry().unregister(conn_id).await;
|
me_pool.registry().unregister(conn_id).await;
|
||||||
stats.decrement_user_curr_connects(&user);
|
stats.decrement_user_curr_connects(&user);
|
||||||
result
|
result
|
||||||
@@ -247,7 +428,7 @@ async fn read_client_payload<R>(
|
|||||||
client_reader: &mut CryptoReader<R>,
|
client_reader: &mut CryptoReader<R>,
|
||||||
proto_tag: ProtoTag,
|
proto_tag: ProtoTag,
|
||||||
max_frame: usize,
|
max_frame: usize,
|
||||||
user: &str,
|
forensics: &RelayForensicsState,
|
||||||
frame_counter: &mut u64,
|
frame_counter: &mut u64,
|
||||||
stats: &Stats,
|
stats: &Stats,
|
||||||
) -> Result<Option<(Vec<u8>, bool)>>
|
) -> Result<Option<(Vec<u8>, bool)>>
|
||||||
@@ -302,7 +483,9 @@ where
|
|||||||
}
|
}
|
||||||
if len < 4 && proto_tag != ProtoTag::Abridged {
|
if len < 4 && proto_tag != ProtoTag::Abridged {
|
||||||
warn!(
|
warn!(
|
||||||
user = %user,
|
trace_id = format_args!("0x{:016x}", forensics.trace_id),
|
||||||
|
conn_id = forensics.conn_id,
|
||||||
|
user = %forensics.user,
|
||||||
len,
|
len,
|
||||||
proto = ?proto_tag,
|
proto = ?proto_tag,
|
||||||
"Frame too small — corrupt or probe"
|
"Frame too small — corrupt or probe"
|
||||||
@@ -311,31 +494,15 @@ where
|
|||||||
}
|
}
|
||||||
|
|
||||||
if len > max_frame {
|
if len > max_frame {
|
||||||
let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes());
|
return Err(report_desync_frame_too_large(
|
||||||
let looks_like_tls = raw_len_bytes
|
forensics,
|
||||||
.map(|b| b[0] == 0x16 && b[1] == 0x03)
|
proto_tag,
|
||||||
.unwrap_or(false);
|
*frame_counter,
|
||||||
let looks_like_http = raw_len_bytes
|
max_frame,
|
||||||
.map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D'))
|
len,
|
||||||
.unwrap_or(false);
|
raw_len_bytes,
|
||||||
warn!(
|
stats,
|
||||||
user = %user,
|
));
|
||||||
raw_len = len,
|
|
||||||
raw_len_hex = format_args!("0x{:08x}", len),
|
|
||||||
raw_bytes = format_args!(
|
|
||||||
"{:02x} {:02x} {:02x} {:02x}",
|
|
||||||
len_buf[0], len_buf[1], len_buf[2], len_buf[3]
|
|
||||||
),
|
|
||||||
proto = ?proto_tag,
|
|
||||||
tls_like = looks_like_tls,
|
|
||||||
http_like = looks_like_http,
|
|
||||||
frames_ok = *frame_counter,
|
|
||||||
"Frame too large — crypto desync forensics"
|
|
||||||
);
|
|
||||||
return Err(ProxyError::Proxy(format!(
|
|
||||||
"Frame too large: {len} (max {max_frame}), frames_ok={}",
|
|
||||||
*frame_counter
|
|
||||||
)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let secure_payload_len = if proto_tag == ProtoTag::Secure {
|
let secure_payload_len = if proto_tag == ProtoTag::Secure {
|
||||||
|
|||||||
@@ -31,6 +31,17 @@ pub struct Stats {
|
|||||||
me_route_drop_channel_closed: AtomicU64,
|
me_route_drop_channel_closed: AtomicU64,
|
||||||
me_route_drop_queue_full: AtomicU64,
|
me_route_drop_queue_full: AtomicU64,
|
||||||
secure_padding_invalid: AtomicU64,
|
secure_padding_invalid: AtomicU64,
|
||||||
|
desync_total: AtomicU64,
|
||||||
|
desync_full_logged: AtomicU64,
|
||||||
|
desync_suppressed: AtomicU64,
|
||||||
|
desync_frames_bucket_0: AtomicU64,
|
||||||
|
desync_frames_bucket_1_2: AtomicU64,
|
||||||
|
desync_frames_bucket_3_10: AtomicU64,
|
||||||
|
desync_frames_bucket_gt_10: AtomicU64,
|
||||||
|
pool_swap_total: AtomicU64,
|
||||||
|
pool_drain_active: AtomicU64,
|
||||||
|
pool_force_close_total: AtomicU64,
|
||||||
|
pool_stale_pick_total: AtomicU64,
|
||||||
user_stats: DashMap<String, UserStats>,
|
user_stats: DashMap<String, UserStats>,
|
||||||
start_time: parking_lot::RwLock<Option<Instant>>,
|
start_time: parking_lot::RwLock<Option<Instant>>,
|
||||||
}
|
}
|
||||||
@@ -76,6 +87,60 @@ impl Stats {
|
|||||||
pub fn increment_secure_padding_invalid(&self) {
|
pub fn increment_secure_padding_invalid(&self) {
|
||||||
self.secure_padding_invalid.fetch_add(1, Ordering::Relaxed);
|
self.secure_padding_invalid.fetch_add(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
|
pub fn increment_desync_total(&self) {
|
||||||
|
self.desync_total.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn increment_desync_full_logged(&self) {
|
||||||
|
self.desync_full_logged.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn increment_desync_suppressed(&self) {
|
||||||
|
self.desync_suppressed.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn observe_desync_frames_ok(&self, frames_ok: u64) {
|
||||||
|
match frames_ok {
|
||||||
|
0 => {
|
||||||
|
self.desync_frames_bucket_0.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
1..=2 => {
|
||||||
|
self.desync_frames_bucket_1_2.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
3..=10 => {
|
||||||
|
self.desync_frames_bucket_3_10.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
_ => {
|
||||||
|
self.desync_frames_bucket_gt_10.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn increment_pool_swap_total(&self) {
|
||||||
|
self.pool_swap_total.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn increment_pool_drain_active(&self) {
|
||||||
|
self.pool_drain_active.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn decrement_pool_drain_active(&self) {
|
||||||
|
let mut current = self.pool_drain_active.load(Ordering::Relaxed);
|
||||||
|
loop {
|
||||||
|
if current == 0 {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
match self.pool_drain_active.compare_exchange_weak(
|
||||||
|
current,
|
||||||
|
current - 1,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
Ordering::Relaxed,
|
||||||
|
) {
|
||||||
|
Ok(_) => break,
|
||||||
|
Err(actual) => current = actual,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub fn increment_pool_force_close_total(&self) {
|
||||||
|
self.pool_force_close_total.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
pub fn increment_pool_stale_pick_total(&self) {
|
||||||
|
self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed);
|
||||||
|
}
|
||||||
pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) }
|
pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) }
|
||||||
pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) }
|
pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) }
|
||||||
pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) }
|
pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) }
|
||||||
@@ -96,6 +161,39 @@ impl Stats {
|
|||||||
pub fn get_secure_padding_invalid(&self) -> u64 {
|
pub fn get_secure_padding_invalid(&self) -> u64 {
|
||||||
self.secure_padding_invalid.load(Ordering::Relaxed)
|
self.secure_padding_invalid.load(Ordering::Relaxed)
|
||||||
}
|
}
|
||||||
|
pub fn get_desync_total(&self) -> u64 {
|
||||||
|
self.desync_total.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_full_logged(&self) -> u64 {
|
||||||
|
self.desync_full_logged.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_suppressed(&self) -> u64 {
|
||||||
|
self.desync_suppressed.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_frames_bucket_0(&self) -> u64 {
|
||||||
|
self.desync_frames_bucket_0.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_frames_bucket_1_2(&self) -> u64 {
|
||||||
|
self.desync_frames_bucket_1_2.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_frames_bucket_3_10(&self) -> u64 {
|
||||||
|
self.desync_frames_bucket_3_10.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_desync_frames_bucket_gt_10(&self) -> u64 {
|
||||||
|
self.desync_frames_bucket_gt_10.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_pool_swap_total(&self) -> u64 {
|
||||||
|
self.pool_swap_total.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_pool_drain_active(&self) -> u64 {
|
||||||
|
self.pool_drain_active.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_pool_force_close_total(&self) -> u64 {
|
||||||
|
self.pool_force_close_total.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
pub fn get_pool_stale_pick_total(&self) -> u64 {
|
||||||
|
self.pool_stale_pick_total.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn increment_user_connects(&self, user: &str) {
|
pub fn increment_user_connects(&self, user: &str) {
|
||||||
self.user_stats.entry(user.to_string()).or_default()
|
self.user_stats.entry(user.to_string()).or_default()
|
||||||
|
|||||||
@@ -1,7 +1,8 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
use std::net::IpAddr;
|
||||||
use std::path::{Path, PathBuf};
|
use std::path::{Path, PathBuf};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::{SystemTime, Duration};
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
use tokio::time::sleep;
|
use tokio::time::sleep;
|
||||||
@@ -14,6 +15,7 @@ use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsFetchResult};
|
|||||||
pub struct TlsFrontCache {
|
pub struct TlsFrontCache {
|
||||||
memory: RwLock<HashMap<String, Arc<CachedTlsData>>>,
|
memory: RwLock<HashMap<String, Arc<CachedTlsData>>>,
|
||||||
default: Arc<CachedTlsData>,
|
default: Arc<CachedTlsData>,
|
||||||
|
full_cert_sent: RwLock<HashMap<IpAddr, Instant>>,
|
||||||
disk_path: PathBuf,
|
disk_path: PathBuf,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -31,6 +33,7 @@ impl TlsFrontCache {
|
|||||||
let default = Arc::new(CachedTlsData {
|
let default = Arc::new(CachedTlsData {
|
||||||
server_hello_template: default_template,
|
server_hello_template: default_template,
|
||||||
cert_info: None,
|
cert_info: None,
|
||||||
|
cert_payload: None,
|
||||||
app_data_records_sizes: vec![default_len],
|
app_data_records_sizes: vec![default_len],
|
||||||
total_app_data_len: default_len,
|
total_app_data_len: default_len,
|
||||||
fetched_at: SystemTime::now(),
|
fetched_at: SystemTime::now(),
|
||||||
@@ -45,6 +48,7 @@ impl TlsFrontCache {
|
|||||||
Self {
|
Self {
|
||||||
memory: RwLock::new(map),
|
memory: RwLock::new(map),
|
||||||
default,
|
default,
|
||||||
|
full_cert_sent: RwLock::new(HashMap::new()),
|
||||||
disk_path: disk_path.as_ref().to_path_buf(),
|
disk_path: disk_path.as_ref().to_path_buf(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -54,6 +58,45 @@ impl TlsFrontCache {
|
|||||||
guard.get(sni).cloned().unwrap_or_else(|| self.default.clone())
|
guard.get(sni).cloned().unwrap_or_else(|| self.default.clone())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn contains_domain(&self, domain: &str) -> bool {
|
||||||
|
self.memory.read().await.contains_key(domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Returns true when full cert payload should be sent for client_ip
|
||||||
|
/// according to TTL policy.
|
||||||
|
pub async fn take_full_cert_budget_for_ip(
|
||||||
|
&self,
|
||||||
|
client_ip: IpAddr,
|
||||||
|
ttl: Duration,
|
||||||
|
) -> bool {
|
||||||
|
if ttl.is_zero() {
|
||||||
|
self.full_cert_sent
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.insert(client_ip, Instant::now());
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let now = Instant::now();
|
||||||
|
let mut guard = self.full_cert_sent.write().await;
|
||||||
|
guard.retain(|_, seen_at| now.duration_since(*seen_at) < ttl);
|
||||||
|
|
||||||
|
match guard.get_mut(&client_ip) {
|
||||||
|
Some(seen_at) => {
|
||||||
|
if now.duration_since(*seen_at) >= ttl {
|
||||||
|
*seen_at = now;
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None => {
|
||||||
|
guard.insert(client_ip, now);
|
||||||
|
true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn set(&self, domain: &str, data: CachedTlsData) {
|
pub async fn set(&self, domain: &str, data: CachedTlsData) {
|
||||||
let mut guard = self.memory.write().await;
|
let mut guard = self.memory.write().await;
|
||||||
guard.insert(domain.to_string(), Arc::new(data));
|
guard.insert(domain.to_string(), Arc::new(data));
|
||||||
@@ -142,6 +185,7 @@ impl TlsFrontCache {
|
|||||||
let data = CachedTlsData {
|
let data = CachedTlsData {
|
||||||
server_hello_template: fetched.server_hello_parsed,
|
server_hello_template: fetched.server_hello_parsed,
|
||||||
cert_info: fetched.cert_info,
|
cert_info: fetched.cert_info,
|
||||||
|
cert_payload: fetched.cert_payload,
|
||||||
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
|
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
|
||||||
total_app_data_len: fetched.total_app_data_len,
|
total_app_data_len: fetched.total_app_data_len,
|
||||||
fetched_at: SystemTime::now(),
|
fetched_at: SystemTime::now(),
|
||||||
@@ -161,3 +205,50 @@ impl TlsFrontCache {
|
|||||||
&self.disk_path
|
&self.disk_path
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_full_cert_budget_for_ip_uses_ttl() {
|
||||||
|
let cache = TlsFrontCache::new(
|
||||||
|
&["example.com".to_string()],
|
||||||
|
1024,
|
||||||
|
"tlsfront-test-cache",
|
||||||
|
);
|
||||||
|
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||||
|
let ttl = Duration::from_millis(80);
|
||||||
|
|
||||||
|
assert!(cache
|
||||||
|
.take_full_cert_budget_for_ip(ip, ttl)
|
||||||
|
.await);
|
||||||
|
assert!(!cache
|
||||||
|
.take_full_cert_budget_for_ip(ip, ttl)
|
||||||
|
.await);
|
||||||
|
|
||||||
|
tokio::time::sleep(Duration::from_millis(90)).await;
|
||||||
|
|
||||||
|
assert!(cache
|
||||||
|
.take_full_cert_budget_for_ip(ip, ttl)
|
||||||
|
.await);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_take_full_cert_budget_for_ip_zero_ttl_always_allows_full_payload() {
|
||||||
|
let cache = TlsFrontCache::new(
|
||||||
|
&["example.com".to_string()],
|
||||||
|
1024,
|
||||||
|
"tlsfront-test-cache",
|
||||||
|
);
|
||||||
|
let ip: IpAddr = "127.0.0.1".parse().expect("ip");
|
||||||
|
let ttl = Duration::ZERO;
|
||||||
|
|
||||||
|
assert!(cache
|
||||||
|
.take_full_cert_budget_for_ip(ip, ttl)
|
||||||
|
.await);
|
||||||
|
assert!(cache
|
||||||
|
.take_full_cert_budget_for_ip(ip, ttl)
|
||||||
|
.await);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use crate::protocol::constants::{
|
|||||||
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
|
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
|
||||||
};
|
};
|
||||||
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
|
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
|
||||||
use crate::tls_front::types::CachedTlsData;
|
use crate::tls_front::types::{CachedTlsData, ParsedCertificateInfo};
|
||||||
|
|
||||||
const MIN_APP_DATA: usize = 64;
|
const MIN_APP_DATA: usize = 64;
|
||||||
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 allows up to 2^14 + 256
|
const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 allows up to 2^14 + 256
|
||||||
@@ -27,12 +27,81 @@ fn jitter_and_clamp_sizes(sizes: &[usize], rng: &SecureRandom) -> Vec<usize> {
|
|||||||
.collect()
|
.collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn app_data_body_capacity(sizes: &[usize]) -> usize {
|
||||||
|
sizes.iter().map(|&size| size.saturating_sub(17)).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn ensure_payload_capacity(mut sizes: Vec<usize>, payload_len: usize) -> Vec<usize> {
|
||||||
|
if payload_len == 0 {
|
||||||
|
return sizes;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut body_total = app_data_body_capacity(&sizes);
|
||||||
|
if body_total >= payload_len {
|
||||||
|
return sizes;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(last) = sizes.last_mut() {
|
||||||
|
let free = MAX_APP_DATA.saturating_sub(*last);
|
||||||
|
let grow = free.min(payload_len - body_total);
|
||||||
|
*last += grow;
|
||||||
|
body_total += grow;
|
||||||
|
}
|
||||||
|
|
||||||
|
while body_total < payload_len {
|
||||||
|
let remaining = payload_len - body_total;
|
||||||
|
let chunk = (remaining + 17).min(MAX_APP_DATA).max(MIN_APP_DATA);
|
||||||
|
sizes.push(chunk);
|
||||||
|
body_total += chunk.saturating_sub(17);
|
||||||
|
}
|
||||||
|
|
||||||
|
sizes
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_compact_cert_info_payload(cert_info: &ParsedCertificateInfo) -> Option<Vec<u8>> {
|
||||||
|
let mut fields = Vec::new();
|
||||||
|
|
||||||
|
if let Some(subject) = cert_info.subject_cn.as_deref() {
|
||||||
|
fields.push(format!("CN={subject}"));
|
||||||
|
}
|
||||||
|
if let Some(issuer) = cert_info.issuer_cn.as_deref() {
|
||||||
|
fields.push(format!("ISSUER={issuer}"));
|
||||||
|
}
|
||||||
|
if let Some(not_before) = cert_info.not_before_unix {
|
||||||
|
fields.push(format!("NB={not_before}"));
|
||||||
|
}
|
||||||
|
if let Some(not_after) = cert_info.not_after_unix {
|
||||||
|
fields.push(format!("NA={not_after}"));
|
||||||
|
}
|
||||||
|
if !cert_info.san_names.is_empty() {
|
||||||
|
let san = cert_info
|
||||||
|
.san_names
|
||||||
|
.iter()
|
||||||
|
.take(8)
|
||||||
|
.map(String::as_str)
|
||||||
|
.collect::<Vec<_>>()
|
||||||
|
.join(",");
|
||||||
|
fields.push(format!("SAN={san}"));
|
||||||
|
}
|
||||||
|
|
||||||
|
if fields.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut payload = fields.join(";").into_bytes();
|
||||||
|
if payload.len() > 512 {
|
||||||
|
payload.truncate(512);
|
||||||
|
}
|
||||||
|
Some(payload)
|
||||||
|
}
|
||||||
|
|
||||||
/// Build a ServerHello + CCS + ApplicationData sequence using cached TLS metadata.
|
/// Build a ServerHello + CCS + ApplicationData sequence using cached TLS metadata.
|
||||||
pub fn build_emulated_server_hello(
|
pub fn build_emulated_server_hello(
|
||||||
secret: &[u8],
|
secret: &[u8],
|
||||||
client_digest: &[u8; TLS_DIGEST_LEN],
|
client_digest: &[u8; TLS_DIGEST_LEN],
|
||||||
session_id: &[u8],
|
session_id: &[u8],
|
||||||
cached: &CachedTlsData,
|
cached: &CachedTlsData,
|
||||||
|
use_full_cert_payload: bool,
|
||||||
rng: &SecureRandom,
|
rng: &SecureRandom,
|
||||||
alpn: Option<Vec<u8>>,
|
alpn: Option<Vec<u8>>,
|
||||||
new_session_tickets: u8,
|
new_session_tickets: u8,
|
||||||
@@ -109,21 +178,60 @@ pub fn build_emulated_server_hello(
|
|||||||
if sizes.is_empty() {
|
if sizes.is_empty() {
|
||||||
sizes.push(cached.total_app_data_len.max(1024));
|
sizes.push(cached.total_app_data_len.max(1024));
|
||||||
}
|
}
|
||||||
let sizes = jitter_and_clamp_sizes(&sizes, rng);
|
let mut sizes = jitter_and_clamp_sizes(&sizes, rng);
|
||||||
|
let compact_payload = cached
|
||||||
|
.cert_info
|
||||||
|
.as_ref()
|
||||||
|
.and_then(build_compact_cert_info_payload);
|
||||||
|
let selected_payload: Option<&[u8]> = if use_full_cert_payload {
|
||||||
|
cached
|
||||||
|
.cert_payload
|
||||||
|
.as_ref()
|
||||||
|
.map(|payload| payload.certificate_message.as_slice())
|
||||||
|
.filter(|payload| !payload.is_empty())
|
||||||
|
.or_else(|| compact_payload.as_deref())
|
||||||
|
} else {
|
||||||
|
compact_payload.as_deref()
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(payload) = selected_payload {
|
||||||
|
sizes = ensure_payload_capacity(sizes, payload.len());
|
||||||
|
}
|
||||||
|
|
||||||
let mut app_data = Vec::new();
|
let mut app_data = Vec::new();
|
||||||
|
let mut payload_offset = 0usize;
|
||||||
for size in sizes {
|
for size in sizes {
|
||||||
let mut rec = Vec::with_capacity(5 + size);
|
let mut rec = Vec::with_capacity(5 + size);
|
||||||
rec.push(TLS_RECORD_APPLICATION);
|
rec.push(TLS_RECORD_APPLICATION);
|
||||||
rec.extend_from_slice(&TLS_VERSION);
|
rec.extend_from_slice(&TLS_VERSION);
|
||||||
rec.extend_from_slice(&(size as u16).to_be_bytes());
|
rec.extend_from_slice(&(size as u16).to_be_bytes());
|
||||||
if size > 17 {
|
|
||||||
let body_len = size - 17;
|
if let Some(payload) = selected_payload {
|
||||||
rec.extend_from_slice(&rng.bytes(body_len));
|
if size > 17 {
|
||||||
rec.push(0x16); // inner content type marker (handshake)
|
let body_len = size - 17;
|
||||||
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
let remaining = payload.len().saturating_sub(payload_offset);
|
||||||
|
let copy_len = remaining.min(body_len);
|
||||||
|
if copy_len > 0 {
|
||||||
|
rec.extend_from_slice(&payload[payload_offset..payload_offset + copy_len]);
|
||||||
|
payload_offset += copy_len;
|
||||||
|
}
|
||||||
|
if body_len > copy_len {
|
||||||
|
rec.extend_from_slice(&rng.bytes(body_len - copy_len));
|
||||||
|
}
|
||||||
|
rec.push(0x16); // inner content type marker (handshake)
|
||||||
|
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||||
|
} else {
|
||||||
|
rec.extend_from_slice(&rng.bytes(size));
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
rec.extend_from_slice(&rng.bytes(size));
|
if size > 17 {
|
||||||
|
let body_len = size - 17;
|
||||||
|
rec.extend_from_slice(&rng.bytes(body_len));
|
||||||
|
rec.push(0x16); // inner content type marker (handshake)
|
||||||
|
rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag
|
||||||
|
} else {
|
||||||
|
rec.extend_from_slice(&rng.bytes(size));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
app_data.extend_from_slice(&rec);
|
app_data.extend_from_slice(&rec);
|
||||||
}
|
}
|
||||||
@@ -158,3 +266,125 @@ pub fn build_emulated_server_hello(
|
|||||||
|
|
||||||
response
|
response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsCertPayload};
|
||||||
|
|
||||||
|
use super::build_emulated_server_hello;
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::protocol::constants::{
|
||||||
|
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
|
||||||
|
};
|
||||||
|
|
||||||
|
fn first_app_data_payload(response: &[u8]) -> &[u8] {
|
||||||
|
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||||
|
let ccs_start = 5 + hello_len;
|
||||||
|
let ccs_len = u16::from_be_bytes([response[ccs_start + 3], response[ccs_start + 4]]) as usize;
|
||||||
|
let app_start = ccs_start + 5 + ccs_len;
|
||||||
|
let app_len = u16::from_be_bytes([response[app_start + 3], response[app_start + 4]]) as usize;
|
||||||
|
&response[app_start + 5..app_start + 5 + app_len]
|
||||||
|
}
|
||||||
|
|
||||||
|
fn make_cached(cert_payload: Option<TlsCertPayload>) -> CachedTlsData {
|
||||||
|
CachedTlsData {
|
||||||
|
server_hello_template: ParsedServerHello {
|
||||||
|
version: [0x03, 0x03],
|
||||||
|
random: [0u8; 32],
|
||||||
|
session_id: Vec::new(),
|
||||||
|
cipher_suite: [0x13, 0x01],
|
||||||
|
compression: 0,
|
||||||
|
extensions: Vec::new(),
|
||||||
|
},
|
||||||
|
cert_info: None,
|
||||||
|
cert_payload,
|
||||||
|
app_data_records_sizes: vec![64],
|
||||||
|
total_app_data_len: 64,
|
||||||
|
fetched_at: SystemTime::now(),
|
||||||
|
domain: "example.com".to_string(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_emulated_server_hello_uses_cached_cert_payload() {
|
||||||
|
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||||
|
let cached = make_cached(Some(TlsCertPayload {
|
||||||
|
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||||
|
certificate_message: cert_msg.clone(),
|
||||||
|
}));
|
||||||
|
let rng = SecureRandom::new();
|
||||||
|
let response = build_emulated_server_hello(
|
||||||
|
b"secret",
|
||||||
|
&[0x11; 32],
|
||||||
|
&[0x22; 16],
|
||||||
|
&cached,
|
||||||
|
true,
|
||||||
|
&rng,
|
||||||
|
None,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
|
assert_eq!(response[0], TLS_RECORD_HANDSHAKE);
|
||||||
|
let hello_len = u16::from_be_bytes([response[3], response[4]]) as usize;
|
||||||
|
let ccs_start = 5 + hello_len;
|
||||||
|
assert_eq!(response[ccs_start], TLS_RECORD_CHANGE_CIPHER);
|
||||||
|
let app_start = ccs_start + 6;
|
||||||
|
assert_eq!(response[app_start], TLS_RECORD_APPLICATION);
|
||||||
|
|
||||||
|
let payload = first_app_data_payload(&response);
|
||||||
|
assert!(payload.starts_with(&cert_msg));
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_emulated_server_hello_random_fallback_when_no_cert_payload() {
|
||||||
|
let cached = make_cached(None);
|
||||||
|
let rng = SecureRandom::new();
|
||||||
|
let response = build_emulated_server_hello(
|
||||||
|
b"secret",
|
||||||
|
&[0x22; 32],
|
||||||
|
&[0x33; 16],
|
||||||
|
&cached,
|
||||||
|
true,
|
||||||
|
&rng,
|
||||||
|
None,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
|
let payload = first_app_data_payload(&response);
|
||||||
|
assert!(payload.len() >= 64);
|
||||||
|
assert_eq!(payload[payload.len() - 17], 0x16);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_build_emulated_server_hello_uses_compact_payload_after_first() {
|
||||||
|
let cert_msg = vec![0x0b, 0x00, 0x00, 0x05, 0x00, 0xaa, 0xbb, 0xcc, 0xdd];
|
||||||
|
let mut cached = make_cached(Some(TlsCertPayload {
|
||||||
|
cert_chain_der: vec![vec![0x30, 0x01, 0x00]],
|
||||||
|
certificate_message: cert_msg,
|
||||||
|
}));
|
||||||
|
cached.cert_info = Some(crate::tls_front::types::ParsedCertificateInfo {
|
||||||
|
not_after_unix: Some(1_900_000_000),
|
||||||
|
not_before_unix: Some(1_700_000_000),
|
||||||
|
issuer_cn: Some("Issuer".to_string()),
|
||||||
|
subject_cn: Some("example.com".to_string()),
|
||||||
|
san_names: vec!["example.com".to_string(), "www.example.com".to_string()],
|
||||||
|
});
|
||||||
|
|
||||||
|
let rng = SecureRandom::new();
|
||||||
|
let response = build_emulated_server_hello(
|
||||||
|
b"secret",
|
||||||
|
&[0x44; 32],
|
||||||
|
&[0x55; 16],
|
||||||
|
&cached,
|
||||||
|
false,
|
||||||
|
&rng,
|
||||||
|
None,
|
||||||
|
0,
|
||||||
|
);
|
||||||
|
|
||||||
|
let payload = first_app_data_payload(&response);
|
||||||
|
assert!(payload.starts_with(b"CN=example.com"));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use anyhow::{Context, Result, anyhow};
|
use anyhow::{Result, anyhow};
|
||||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::time::timeout;
|
use tokio::time::timeout;
|
||||||
@@ -19,7 +19,13 @@ use x509_parser::certificate::X509Certificate;
|
|||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE};
|
||||||
use crate::tls_front::types::{ParsedServerHello, TlsExtension, TlsFetchResult, ParsedCertificateInfo};
|
use crate::tls_front::types::{
|
||||||
|
ParsedCertificateInfo,
|
||||||
|
ParsedServerHello,
|
||||||
|
TlsCertPayload,
|
||||||
|
TlsExtension,
|
||||||
|
TlsFetchResult,
|
||||||
|
};
|
||||||
|
|
||||||
/// No-op verifier: accept any certificate (we only need lengths and metadata).
|
/// No-op verifier: accept any certificate (we only need lengths and metadata).
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
@@ -315,6 +321,46 @@ fn parse_cert_info(certs: &[CertificateDer<'static>]) -> Option<ParsedCertificat
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn u24_bytes(value: usize) -> Option<[u8; 3]> {
|
||||||
|
if value > 0x00ff_ffff {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
Some([
|
||||||
|
((value >> 16) & 0xff) as u8,
|
||||||
|
((value >> 8) & 0xff) as u8,
|
||||||
|
(value & 0xff) as u8,
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
|
||||||
|
if cert_chain_der.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut certificate_list = Vec::new();
|
||||||
|
for cert in cert_chain_der {
|
||||||
|
if cert.is_empty() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
certificate_list.extend_from_slice(&u24_bytes(cert.len())?);
|
||||||
|
certificate_list.extend_from_slice(cert);
|
||||||
|
certificate_list.extend_from_slice(&0u16.to_be_bytes()); // cert_entry extensions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Certificate = context_len(1) + certificate_list_len(3) + entries
|
||||||
|
let body_len = 1usize
|
||||||
|
.checked_add(3)?
|
||||||
|
.checked_add(certificate_list.len())?;
|
||||||
|
|
||||||
|
let mut message = Vec::with_capacity(4 + body_len);
|
||||||
|
message.push(0x0b); // HandshakeType::certificate
|
||||||
|
message.extend_from_slice(&u24_bytes(body_len)?);
|
||||||
|
message.push(0x00); // certificate_request_context length
|
||||||
|
message.extend_from_slice(&u24_bytes(certificate_list.len())?);
|
||||||
|
message.extend_from_slice(&certificate_list);
|
||||||
|
Some(message)
|
||||||
|
}
|
||||||
|
|
||||||
async fn fetch_via_raw_tls(
|
async fn fetch_via_raw_tls(
|
||||||
host: &str,
|
host: &str,
|
||||||
port: u16,
|
port: u16,
|
||||||
@@ -368,26 +414,18 @@ async fn fetch_via_raw_tls(
|
|||||||
},
|
},
|
||||||
total_app_data_len,
|
total_app_data_len,
|
||||||
cert_info: None,
|
cert_info: None,
|
||||||
|
cert_payload: None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch real TLS metadata for the given SNI: negotiated cipher and cert lengths.
|
async fn fetch_via_rustls(
|
||||||
pub async fn fetch_real_tls(
|
|
||||||
host: &str,
|
host: &str,
|
||||||
port: u16,
|
port: u16,
|
||||||
sni: &str,
|
sni: &str,
|
||||||
connect_timeout: Duration,
|
connect_timeout: Duration,
|
||||||
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
) -> Result<TlsFetchResult> {
|
) -> Result<TlsFetchResult> {
|
||||||
// Preferred path: raw TLS probe for accurate record sizing
|
// rustls handshake path for certificate and basic negotiated metadata.
|
||||||
match fetch_via_raw_tls(host, port, sni, connect_timeout).await {
|
|
||||||
Ok(res) => return Ok(res),
|
|
||||||
Err(e) => {
|
|
||||||
warn!(sni = %sni, error = %e, "Raw TLS fetch failed, falling back to rustls");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fallback: rustls handshake to at least get certificate sizes
|
|
||||||
let stream = if let Some(manager) = upstream {
|
let stream = if let Some(manager) = upstream {
|
||||||
// Resolve host to SocketAddr
|
// Resolve host to SocketAddr
|
||||||
if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
|
||||||
@@ -429,8 +467,19 @@ pub async fn fetch_real_tls(
|
|||||||
.peer_certificates()
|
.peer_certificates()
|
||||||
.map(|slice| slice.to_vec())
|
.map(|slice| slice.to_vec())
|
||||||
.unwrap_or_default();
|
.unwrap_or_default();
|
||||||
|
let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect();
|
||||||
|
let cert_payload = encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
|
||||||
|
TlsCertPayload {
|
||||||
|
cert_chain_der: cert_chain_der.clone(),
|
||||||
|
certificate_message,
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
let total_cert_len: usize = certs.iter().map(|c| c.len()).sum::<usize>().max(1024);
|
let total_cert_len = cert_payload
|
||||||
|
.as_ref()
|
||||||
|
.map(|payload| payload.certificate_message.len())
|
||||||
|
.unwrap_or_else(|| cert_chain_der.iter().map(Vec::len).sum::<usize>())
|
||||||
|
.max(1024);
|
||||||
let cert_info = parse_cert_info(&certs);
|
let cert_info = parse_cert_info(&certs);
|
||||||
|
|
||||||
// Heuristic: split across two records if large to mimic real servers a bit.
|
// Heuristic: split across two records if large to mimic real servers a bit.
|
||||||
@@ -453,6 +502,7 @@ pub async fn fetch_real_tls(
|
|||||||
sni = %sni,
|
sni = %sni,
|
||||||
len = total_cert_len,
|
len = total_cert_len,
|
||||||
cipher = format!("0x{:04x}", u16::from_be_bytes(cipher_suite)),
|
cipher = format!("0x{:04x}", u16::from_be_bytes(cipher_suite)),
|
||||||
|
has_cert_payload = cert_payload.is_some(),
|
||||||
"Fetched TLS metadata via rustls"
|
"Fetched TLS metadata via rustls"
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -461,5 +511,81 @@ pub async fn fetch_real_tls(
|
|||||||
app_data_records_sizes: app_data_records_sizes.clone(),
|
app_data_records_sizes: app_data_records_sizes.clone(),
|
||||||
total_app_data_len: app_data_records_sizes.iter().sum(),
|
total_app_data_len: app_data_records_sizes.iter().sum(),
|
||||||
cert_info,
|
cert_info,
|
||||||
|
cert_payload,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Fetch real TLS metadata for the given SNI.
|
||||||
|
///
|
||||||
|
/// Strategy:
|
||||||
|
/// 1) Probe raw TLS for realistic ServerHello and ApplicationData record sizes.
|
||||||
|
/// 2) Fetch certificate chain via rustls to build cert payload.
|
||||||
|
/// 3) Merge both when possible; otherwise auto-fallback to whichever succeeded.
|
||||||
|
pub async fn fetch_real_tls(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
sni: &str,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
|
||||||
|
) -> Result<TlsFetchResult> {
|
||||||
|
let raw_result = match fetch_via_raw_tls(host, port, sni, connect_timeout).await {
|
||||||
|
Ok(res) => Some(res),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(sni = %sni, error = %e, "Raw TLS fetch failed");
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match fetch_via_rustls(host, port, sni, connect_timeout, upstream).await {
|
||||||
|
Ok(rustls_result) => {
|
||||||
|
if let Some(mut raw) = raw_result {
|
||||||
|
raw.cert_info = rustls_result.cert_info;
|
||||||
|
raw.cert_payload = rustls_result.cert_payload;
|
||||||
|
debug!(sni = %sni, "Fetched TLS metadata via raw probe + rustls cert chain");
|
||||||
|
Ok(raw)
|
||||||
|
} else {
|
||||||
|
Ok(rustls_result)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
if let Some(raw) = raw_result {
|
||||||
|
warn!(sni = %sni, error = %e, "Rustls cert fetch failed, using raw TLS metadata only");
|
||||||
|
Ok(raw)
|
||||||
|
} else {
|
||||||
|
Err(e)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests {
|
||||||
|
use super::encode_tls13_certificate_message;
|
||||||
|
|
||||||
|
fn read_u24(bytes: &[u8]) -> usize {
|
||||||
|
((bytes[0] as usize) << 16) | ((bytes[1] as usize) << 8) | (bytes[2] as usize)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_tls13_certificate_message_single_cert() {
|
||||||
|
let cert = vec![0x30, 0x03, 0x02, 0x01, 0x01];
|
||||||
|
let message = encode_tls13_certificate_message(&[cert.clone()]).expect("message");
|
||||||
|
|
||||||
|
assert_eq!(message[0], 0x0b);
|
||||||
|
assert_eq!(read_u24(&message[1..4]), message.len() - 4);
|
||||||
|
assert_eq!(message[4], 0x00);
|
||||||
|
|
||||||
|
let cert_list_len = read_u24(&message[5..8]);
|
||||||
|
assert_eq!(cert_list_len, cert.len() + 5);
|
||||||
|
|
||||||
|
let cert_len = read_u24(&message[8..11]);
|
||||||
|
assert_eq!(cert_len, cert.len());
|
||||||
|
assert_eq!(&message[11..11 + cert.len()], cert.as_slice());
|
||||||
|
assert_eq!(&message[11 + cert.len()..13 + cert.len()], &[0x00, 0x00]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_encode_tls13_certificate_message_empty_chain() {
|
||||||
|
assert!(encode_tls13_certificate_message(&[]).is_none());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -29,11 +29,23 @@ pub struct ParsedCertificateInfo {
|
|||||||
pub san_names: Vec<String>,
|
pub san_names: Vec<String>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TLS certificate payload captured from profiled upstream.
|
||||||
|
///
|
||||||
|
/// `certificate_message` stores an encoded TLS 1.3 Certificate handshake
|
||||||
|
/// message body that can be replayed as opaque ApplicationData bytes in FakeTLS.
|
||||||
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
pub struct TlsCertPayload {
|
||||||
|
pub cert_chain_der: Vec<Vec<u8>>,
|
||||||
|
pub certificate_message: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
/// Cached data per SNI used by the emulator.
|
/// Cached data per SNI used by the emulator.
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub struct CachedTlsData {
|
pub struct CachedTlsData {
|
||||||
pub server_hello_template: ParsedServerHello,
|
pub server_hello_template: ParsedServerHello,
|
||||||
pub cert_info: Option<ParsedCertificateInfo>,
|
pub cert_info: Option<ParsedCertificateInfo>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub cert_payload: Option<TlsCertPayload>,
|
||||||
pub app_data_records_sizes: Vec<usize>,
|
pub app_data_records_sizes: Vec<usize>,
|
||||||
pub total_app_data_len: usize,
|
pub total_app_data_len: usize,
|
||||||
#[serde(default = "now_system_time", skip_serializing, skip_deserializing)]
|
#[serde(default = "now_system_time", skip_serializing, skip_deserializing)]
|
||||||
@@ -52,4 +64,5 @@ pub struct TlsFetchResult {
|
|||||||
pub app_data_records_sizes: Vec<usize>,
|
pub app_data_records_sizes: Vec<usize>,
|
||||||
pub total_app_data_len: usize,
|
pub total_app_data_len: usize,
|
||||||
pub cert_info: Option<ParsedCertificateInfo>,
|
pub cert_info: Option<ParsedCertificateInfo>,
|
||||||
|
pub cert_payload: Option<TlsCertPayload>,
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,8 +4,10 @@ use std::sync::Arc;
|
|||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
|
|
||||||
use httpdate;
|
use httpdate;
|
||||||
|
use tokio::sync::watch;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
|
|
||||||
|
use crate::config::ProxyConfig;
|
||||||
use crate::error::Result;
|
use crate::error::Result;
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
@@ -128,49 +130,134 @@ pub async fn fetch_proxy_config(url: &str) -> Result<ProxyConfigData> {
|
|||||||
Ok(ProxyConfigData { map, default_dc })
|
Ok(ProxyConfigData { map, default_dc })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interval: Duration) {
|
async fn run_update_cycle(pool: &Arc<MePool>, rng: &Arc<SecureRandom>, cfg: &ProxyConfig) {
|
||||||
let mut tick = tokio::time::interval(interval);
|
pool.update_runtime_reinit_policy(
|
||||||
// skip immediate tick to avoid double-fetch right after startup
|
cfg.general.hardswap,
|
||||||
tick.tick().await;
|
cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
cfg.general.effective_me_pool_force_close_secs(),
|
||||||
|
cfg.general.me_pool_min_fresh_ratio,
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut maps_changed = false;
|
||||||
|
|
||||||
|
// Update proxy config v4
|
||||||
|
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
|
||||||
|
if let Some(cfg_v4) = cfg_v4 {
|
||||||
|
let changed = pool.update_proxy_maps(cfg_v4.map.clone(), None).await;
|
||||||
|
if let Some(dc) = cfg_v4.default_dc {
|
||||||
|
pool.default_dc
|
||||||
|
.store(dc, std::sync::atomic::Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
if changed {
|
||||||
|
maps_changed = true;
|
||||||
|
info!("ME config updated (v4)");
|
||||||
|
} else {
|
||||||
|
debug!("ME config v4 unchanged");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update proxy config v6 (optional)
|
||||||
|
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
|
||||||
|
if let Some(cfg_v6) = cfg_v6 {
|
||||||
|
let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
|
||||||
|
if changed {
|
||||||
|
maps_changed = true;
|
||||||
|
info!("ME config updated (v6)");
|
||||||
|
} else {
|
||||||
|
debug!("ME config v6 unchanged");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if maps_changed {
|
||||||
|
pool.zero_downtime_reinit_after_map_change(rng.as_ref())
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
pool.reset_stun_state();
|
||||||
|
|
||||||
|
// Update proxy-secret
|
||||||
|
match download_proxy_secret().await {
|
||||||
|
Ok(secret) => {
|
||||||
|
if pool.update_secret(secret).await {
|
||||||
|
info!("proxy-secret updated and pool reconnect scheduled");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => warn!(error = %e, "proxy-secret update failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn me_config_updater(
|
||||||
|
pool: Arc<MePool>,
|
||||||
|
rng: Arc<SecureRandom>,
|
||||||
|
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||||
|
) {
|
||||||
|
let mut update_every_secs = config_rx
|
||||||
|
.borrow()
|
||||||
|
.general
|
||||||
|
.effective_update_every_secs()
|
||||||
|
.max(1);
|
||||||
|
let mut update_every = Duration::from_secs(update_every_secs);
|
||||||
|
let mut next_tick = tokio::time::Instant::now() + update_every;
|
||||||
|
info!(update_every_secs, "ME config updater started");
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tick.tick().await;
|
let sleep = tokio::time::sleep_until(next_tick);
|
||||||
|
tokio::pin!(sleep);
|
||||||
|
|
||||||
// Update proxy config v4
|
tokio::select! {
|
||||||
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
|
_ = &mut sleep => {
|
||||||
if let Some(cfg) = cfg_v4 {
|
let cfg = config_rx.borrow().clone();
|
||||||
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await;
|
run_update_cycle(&pool, &rng, cfg.as_ref()).await;
|
||||||
if let Some(dc) = cfg.default_dc {
|
let refreshed_secs = cfg.general.effective_update_every_secs().max(1);
|
||||||
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed);
|
if refreshed_secs != update_every_secs {
|
||||||
|
info!(
|
||||||
|
old_update_every_secs = update_every_secs,
|
||||||
|
new_update_every_secs = refreshed_secs,
|
||||||
|
"ME config updater interval changed"
|
||||||
|
);
|
||||||
|
update_every_secs = refreshed_secs;
|
||||||
|
update_every = Duration::from_secs(update_every_secs);
|
||||||
|
}
|
||||||
|
next_tick = tokio::time::Instant::now() + update_every;
|
||||||
}
|
}
|
||||||
if changed {
|
changed = config_rx.changed() => {
|
||||||
info!("ME config updated (v4), reconciling connections");
|
if changed.is_err() {
|
||||||
pool.reconcile_connections(&rng).await;
|
warn!("ME config updater stopped: config channel closed");
|
||||||
} else {
|
break;
|
||||||
debug!("ME config v4 unchanged");
|
}
|
||||||
}
|
let cfg = config_rx.borrow().clone();
|
||||||
}
|
pool.update_runtime_reinit_policy(
|
||||||
|
cfg.general.hardswap,
|
||||||
|
cfg.general.me_pool_drain_ttl_secs,
|
||||||
|
cfg.general.effective_me_pool_force_close_secs(),
|
||||||
|
cfg.general.me_pool_min_fresh_ratio,
|
||||||
|
);
|
||||||
|
let new_secs = cfg.general.effective_update_every_secs().max(1);
|
||||||
|
if new_secs == update_every_secs {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Update proxy config v6 (optional)
|
if new_secs < update_every_secs {
|
||||||
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
|
info!(
|
||||||
if let Some(cfg_v6) = cfg_v6 {
|
old_update_every_secs = update_every_secs,
|
||||||
let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
|
new_update_every_secs = new_secs,
|
||||||
if changed {
|
"ME config updater interval decreased, running immediate refresh"
|
||||||
info!("ME config updated (v6), reconciling connections");
|
);
|
||||||
pool.reconcile_connections(&rng).await;
|
update_every_secs = new_secs;
|
||||||
} else {
|
update_every = Duration::from_secs(update_every_secs);
|
||||||
debug!("ME config v6 unchanged");
|
run_update_cycle(&pool, &rng, cfg.as_ref()).await;
|
||||||
}
|
next_tick = tokio::time::Instant::now() + update_every;
|
||||||
}
|
} else {
|
||||||
pool.reset_stun_state();
|
info!(
|
||||||
|
old_update_every_secs = update_every_secs,
|
||||||
// Update proxy-secret
|
new_update_every_secs = new_secs,
|
||||||
match download_proxy_secret().await {
|
"ME config updater interval increased"
|
||||||
Ok(secret) => {
|
);
|
||||||
if pool.update_secret(secret).await {
|
update_every_secs = new_secs;
|
||||||
info!("proxy-secret updated and pool reconnect scheduled");
|
update_every = Duration::from_secs(update_every_secs);
|
||||||
|
next_tick = tokio::time::Instant::now() + update_every;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(e) => warn!(error = %e, "proxy-secret update failed"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,6 +68,7 @@ async fn check_family(
|
|||||||
.read()
|
.read()
|
||||||
.await
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
|
.filter(|w| !w.draining.load(std::sync::atomic::Ordering::Relaxed))
|
||||||
.map(|w| w.addr)
|
.map(|w| w.addr)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::{HashMap, HashSet};
|
||||||
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, AtomicU64, AtomicUsize, Ordering};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use tokio::sync::{Mutex, RwLock, mpsc, Notify};
|
use tokio::sync::{Mutex, RwLock, mpsc, Notify};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
@@ -27,10 +27,13 @@ const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
|
|||||||
pub struct MeWriter {
|
pub struct MeWriter {
|
||||||
pub id: u64,
|
pub id: u64,
|
||||||
pub addr: SocketAddr,
|
pub addr: SocketAddr,
|
||||||
|
pub generation: u64,
|
||||||
pub tx: mpsc::Sender<WriterCommand>,
|
pub tx: mpsc::Sender<WriterCommand>,
|
||||||
pub cancel: CancellationToken,
|
pub cancel: CancellationToken,
|
||||||
pub degraded: Arc<AtomicBool>,
|
pub degraded: Arc<AtomicBool>,
|
||||||
pub draining: Arc<AtomicBool>,
|
pub draining: Arc<AtomicBool>,
|
||||||
|
pub draining_started_at_epoch_secs: Arc<AtomicU64>,
|
||||||
|
pub allow_drain_fallback: Arc<AtomicBool>,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct MePool {
|
pub struct MePool {
|
||||||
@@ -73,6 +76,11 @@ pub struct MePool {
|
|||||||
pub(super) writer_available: Arc<Notify>,
|
pub(super) writer_available: Arc<Notify>,
|
||||||
pub(super) conn_count: AtomicUsize,
|
pub(super) conn_count: AtomicUsize,
|
||||||
pub(super) stats: Arc<crate::stats::Stats>,
|
pub(super) stats: Arc<crate::stats::Stats>,
|
||||||
|
pub(super) generation: AtomicU64,
|
||||||
|
pub(super) hardswap: AtomicBool,
|
||||||
|
pub(super) me_pool_drain_ttl_secs: AtomicU64,
|
||||||
|
pub(super) me_pool_force_close_secs: AtomicU64,
|
||||||
|
pub(super) me_pool_min_fresh_ratio_permille: AtomicU32,
|
||||||
pool_size: usize,
|
pool_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -83,6 +91,22 @@ pub struct NatReflectionCache {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl MePool {
|
impl MePool {
|
||||||
|
fn ratio_to_permille(ratio: f32) -> u32 {
|
||||||
|
let clamped = ratio.clamp(0.0, 1.0);
|
||||||
|
(clamped * 1000.0).round() as u32
|
||||||
|
}
|
||||||
|
|
||||||
|
fn permille_to_ratio(permille: u32) -> f32 {
|
||||||
|
(permille.min(1000) as f32) / 1000.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn now_epoch_secs() -> u64 {
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(UNIX_EPOCH)
|
||||||
|
.unwrap_or_default()
|
||||||
|
.as_secs()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn new(
|
pub fn new(
|
||||||
proxy_tag: Option<Vec<u8>>,
|
proxy_tag: Option<Vec<u8>>,
|
||||||
proxy_secret: Vec<u8>,
|
proxy_secret: Vec<u8>,
|
||||||
@@ -110,6 +134,10 @@ impl MePool {
|
|||||||
me_reconnect_backoff_base_ms: u64,
|
me_reconnect_backoff_base_ms: u64,
|
||||||
me_reconnect_backoff_cap_ms: u64,
|
me_reconnect_backoff_cap_ms: u64,
|
||||||
me_reconnect_fast_retry_count: u32,
|
me_reconnect_fast_retry_count: u32,
|
||||||
|
hardswap: bool,
|
||||||
|
me_pool_drain_ttl_secs: u64,
|
||||||
|
me_pool_force_close_secs: u64,
|
||||||
|
me_pool_min_fresh_ratio: f32,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
registry: Arc::new(ConnRegistry::new()),
|
registry: Arc::new(ConnRegistry::new()),
|
||||||
@@ -152,6 +180,11 @@ impl MePool {
|
|||||||
nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())),
|
nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())),
|
||||||
writer_available: Arc::new(Notify::new()),
|
writer_available: Arc::new(Notify::new()),
|
||||||
conn_count: AtomicUsize::new(0),
|
conn_count: AtomicUsize::new(0),
|
||||||
|
generation: AtomicU64::new(1),
|
||||||
|
hardswap: AtomicBool::new(hardswap),
|
||||||
|
me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs),
|
||||||
|
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs),
|
||||||
|
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(me_pool_min_fresh_ratio)),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -159,6 +192,25 @@ impl MePool {
|
|||||||
self.proxy_tag.is_some()
|
self.proxy_tag.is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn current_generation(&self) -> u64 {
|
||||||
|
self.generation.load(Ordering::Relaxed)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn update_runtime_reinit_policy(
|
||||||
|
&self,
|
||||||
|
hardswap: bool,
|
||||||
|
drain_ttl_secs: u64,
|
||||||
|
force_close_secs: u64,
|
||||||
|
min_fresh_ratio: f32,
|
||||||
|
) {
|
||||||
|
self.hardswap.store(hardswap, Ordering::Relaxed);
|
||||||
|
self.me_pool_drain_ttl_secs.store(drain_ttl_secs, Ordering::Relaxed);
|
||||||
|
self.me_pool_force_close_secs
|
||||||
|
.store(force_close_secs, Ordering::Relaxed);
|
||||||
|
self.me_pool_min_fresh_ratio_permille
|
||||||
|
.store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn reset_stun_state(&self) {
|
pub fn reset_stun_state(&self) {
|
||||||
self.nat_probe_attempts.store(0, Ordering::Relaxed);
|
self.nat_probe_attempts.store(0, Ordering::Relaxed);
|
||||||
self.nat_probe_disabled.store(false, Ordering::Relaxed);
|
self.nat_probe_disabled.store(false, Ordering::Relaxed);
|
||||||
@@ -177,8 +229,43 @@ impl MePool {
|
|||||||
self.writers.clone()
|
self.writers.clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn force_close_timeout(&self) -> Option<Duration> {
|
||||||
|
let secs = self.me_pool_force_close_secs.load(Ordering::Relaxed);
|
||||||
|
if secs == 0 {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(Duration::from_secs(secs))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn coverage_ratio(
|
||||||
|
desired_by_dc: &HashMap<i32, HashSet<SocketAddr>>,
|
||||||
|
active_writer_addrs: &HashSet<SocketAddr>,
|
||||||
|
) -> (f32, Vec<i32>) {
|
||||||
|
if desired_by_dc.is_empty() {
|
||||||
|
return (1.0, Vec::new());
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut missing_dc = Vec::<i32>::new();
|
||||||
|
let mut covered = 0usize;
|
||||||
|
for (dc, endpoints) in desired_by_dc {
|
||||||
|
if endpoints.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) {
|
||||||
|
covered += 1;
|
||||||
|
} else {
|
||||||
|
missing_dc.push(*dc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
missing_dc.sort_unstable();
|
||||||
|
let total = desired_by_dc.len().max(1);
|
||||||
|
let ratio = (covered as f32) / (total as f32);
|
||||||
|
(ratio, missing_dc)
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
|
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
|
||||||
use std::collections::HashSet;
|
|
||||||
let writers = self.writers.read().await;
|
let writers = self.writers.read().await;
|
||||||
let current: HashSet<SocketAddr> = writers
|
let current: HashSet<SocketAddr> = writers
|
||||||
.iter()
|
.iter()
|
||||||
@@ -210,6 +297,181 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async fn desired_dc_endpoints(&self) -> HashMap<i32, HashSet<SocketAddr>> {
|
||||||
|
let mut out: HashMap<i32, HashSet<SocketAddr>> = HashMap::new();
|
||||||
|
|
||||||
|
if self.decision.ipv4_me {
|
||||||
|
let map_v4 = self.proxy_map_v4.read().await.clone();
|
||||||
|
for (dc, addrs) in map_v4 {
|
||||||
|
let entry = out.entry(dc.abs()).or_default();
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
entry.insert(SocketAddr::new(ip, port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.decision.ipv6_me {
|
||||||
|
let map_v6 = self.proxy_map_v6.read().await.clone();
|
||||||
|
for (dc, addrs) in map_v6 {
|
||||||
|
let entry = out.entry(dc.abs()).or_default();
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
entry.insert(SocketAddr::new(ip, port));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
out
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn warmup_generation_for_all_dcs(
|
||||||
|
self: &Arc<Self>,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
generation: u64,
|
||||||
|
desired_by_dc: &HashMap<i32, HashSet<SocketAddr>>,
|
||||||
|
) {
|
||||||
|
for endpoints in desired_by_dc.values() {
|
||||||
|
if endpoints.is_empty() {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let has_fresh = {
|
||||||
|
let ws = self.writers.read().await;
|
||||||
|
ws.iter().any(|w| {
|
||||||
|
!w.draining.load(Ordering::Relaxed)
|
||||||
|
&& w.generation == generation
|
||||||
|
&& endpoints.contains(&w.addr)
|
||||||
|
})
|
||||||
|
};
|
||||||
|
|
||||||
|
if has_fresh {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut shuffled: Vec<SocketAddr> = endpoints.iter().copied().collect();
|
||||||
|
shuffled.shuffle(&mut rand::rng());
|
||||||
|
for addr in shuffled {
|
||||||
|
if self.connect_one(addr, rng).await.is_ok() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn zero_downtime_reinit_after_map_change(
|
||||||
|
self: &Arc<Self>,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
) {
|
||||||
|
let desired_by_dc = self.desired_dc_endpoints().await;
|
||||||
|
if desired_by_dc.is_empty() {
|
||||||
|
warn!("ME endpoint map is empty after update; skipping stale writer drain");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let previous_generation = self.current_generation();
|
||||||
|
let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1;
|
||||||
|
let hardswap = self.hardswap.load(Ordering::Relaxed);
|
||||||
|
|
||||||
|
if hardswap {
|
||||||
|
self.warmup_generation_for_all_dcs(rng, generation, &desired_by_dc)
|
||||||
|
.await;
|
||||||
|
} else {
|
||||||
|
self.reconcile_connections(rng).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
let writers = self.writers.read().await;
|
||||||
|
let active_writer_addrs: HashSet<SocketAddr> = writers
|
||||||
|
.iter()
|
||||||
|
.filter(|w| !w.draining.load(Ordering::Relaxed))
|
||||||
|
.map(|w| w.addr)
|
||||||
|
.collect();
|
||||||
|
let min_ratio = Self::permille_to_ratio(
|
||||||
|
self.me_pool_min_fresh_ratio_permille
|
||||||
|
.load(Ordering::Relaxed),
|
||||||
|
);
|
||||||
|
let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs);
|
||||||
|
if !hardswap && coverage_ratio < min_ratio {
|
||||||
|
warn!(
|
||||||
|
previous_generation,
|
||||||
|
generation,
|
||||||
|
coverage_ratio = format_args!("{coverage_ratio:.3}"),
|
||||||
|
min_ratio = format_args!("{min_ratio:.3}"),
|
||||||
|
missing_dc = ?missing_dc,
|
||||||
|
"ME reinit coverage below threshold; keeping stale writers"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if hardswap {
|
||||||
|
let fresh_writer_addrs: HashSet<SocketAddr> = writers
|
||||||
|
.iter()
|
||||||
|
.filter(|w| !w.draining.load(Ordering::Relaxed))
|
||||||
|
.filter(|w| w.generation == generation)
|
||||||
|
.map(|w| w.addr)
|
||||||
|
.collect();
|
||||||
|
let (fresh_ratio, fresh_missing_dc) =
|
||||||
|
Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs);
|
||||||
|
if !fresh_missing_dc.is_empty() {
|
||||||
|
warn!(
|
||||||
|
previous_generation,
|
||||||
|
generation,
|
||||||
|
fresh_ratio = format_args!("{fresh_ratio:.3}"),
|
||||||
|
missing_dc = ?fresh_missing_dc,
|
||||||
|
"ME hardswap pending: fresh generation coverage incomplete"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
} else if !missing_dc.is_empty() {
|
||||||
|
warn!(
|
||||||
|
missing_dc = ?missing_dc,
|
||||||
|
// Keep stale writers alive when fresh coverage is incomplete.
|
||||||
|
"ME reinit coverage incomplete after map update; keeping stale writers"
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let desired_addrs: HashSet<SocketAddr> = desired_by_dc
|
||||||
|
.values()
|
||||||
|
.flat_map(|set| set.iter().copied())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let stale_writer_ids: Vec<u64> = writers
|
||||||
|
.iter()
|
||||||
|
.filter(|w| !w.draining.load(Ordering::Relaxed))
|
||||||
|
.filter(|w| {
|
||||||
|
if hardswap {
|
||||||
|
w.generation < generation
|
||||||
|
} else {
|
||||||
|
!desired_addrs.contains(&w.addr)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
.map(|w| w.id)
|
||||||
|
.collect();
|
||||||
|
drop(writers);
|
||||||
|
|
||||||
|
if stale_writer_ids.is_empty() {
|
||||||
|
debug!("ME map update completed with no stale writers");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let drain_timeout = self.force_close_timeout();
|
||||||
|
let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0);
|
||||||
|
info!(
|
||||||
|
stale_writers = stale_writer_ids.len(),
|
||||||
|
previous_generation,
|
||||||
|
generation,
|
||||||
|
hardswap,
|
||||||
|
coverage_ratio = format_args!("{coverage_ratio:.3}"),
|
||||||
|
min_ratio = format_args!("{min_ratio:.3}"),
|
||||||
|
drain_timeout_secs,
|
||||||
|
"ME map update covered; draining stale writers"
|
||||||
|
);
|
||||||
|
self.stats.increment_pool_swap_total();
|
||||||
|
for writer_id in stale_writer_ids {
|
||||||
|
self.mark_writer_draining_with_timeout(writer_id, drain_timeout, !hardswap)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub async fn update_proxy_maps(
|
pub async fn update_proxy_maps(
|
||||||
&self,
|
&self,
|
||||||
new_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
|
new_v4: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
@@ -411,9 +673,12 @@ impl MePool {
|
|||||||
let hs = self.handshake_only(stream, addr, rng).await?;
|
let hs = self.handshake_only(stream, addr, rng).await?;
|
||||||
|
|
||||||
let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed);
|
let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed);
|
||||||
|
let generation = self.current_generation();
|
||||||
let cancel = CancellationToken::new();
|
let cancel = CancellationToken::new();
|
||||||
let degraded = Arc::new(AtomicBool::new(false));
|
let degraded = Arc::new(AtomicBool::new(false));
|
||||||
let draining = Arc::new(AtomicBool::new(false));
|
let draining = Arc::new(AtomicBool::new(false));
|
||||||
|
let draining_started_at_epoch_secs = Arc::new(AtomicU64::new(0));
|
||||||
|
let allow_drain_fallback = Arc::new(AtomicBool::new(false));
|
||||||
let (tx, mut rx) = mpsc::channel::<WriterCommand>(4096);
|
let (tx, mut rx) = mpsc::channel::<WriterCommand>(4096);
|
||||||
let mut rpc_writer = RpcWriter {
|
let mut rpc_writer = RpcWriter {
|
||||||
writer: hs.wr,
|
writer: hs.wr,
|
||||||
@@ -444,10 +709,13 @@ impl MePool {
|
|||||||
let writer = MeWriter {
|
let writer = MeWriter {
|
||||||
id: writer_id,
|
id: writer_id,
|
||||||
addr,
|
addr,
|
||||||
|
generation,
|
||||||
tx: tx.clone(),
|
tx: tx.clone(),
|
||||||
cancel: cancel.clone(),
|
cancel: cancel.clone(),
|
||||||
degraded: degraded.clone(),
|
degraded: degraded.clone(),
|
||||||
draining: draining.clone(),
|
draining: draining.clone(),
|
||||||
|
draining_started_at_epoch_secs: draining_started_at_epoch_secs.clone(),
|
||||||
|
allow_drain_fallback: allow_drain_fallback.clone(),
|
||||||
};
|
};
|
||||||
self.writers.write().await.push(writer.clone());
|
self.writers.write().await.push(writer.clone());
|
||||||
self.conn_count.fetch_add(1, Ordering::Relaxed);
|
self.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
@@ -619,6 +887,9 @@ impl MePool {
|
|||||||
let mut ws = self.writers.write().await;
|
let mut ws = self.writers.write().await;
|
||||||
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
|
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
|
||||||
let w = ws.remove(pos);
|
let w = ws.remove(pos);
|
||||||
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
|
self.stats.decrement_pool_drain_active();
|
||||||
|
}
|
||||||
w.cancel.cancel();
|
w.cancel.cancel();
|
||||||
close_tx = Some(w.tx.clone());
|
close_tx = Some(w.tx.clone());
|
||||||
self.conn_count.fetch_sub(1, Ordering::Relaxed);
|
self.conn_count.fetch_sub(1, Ordering::Relaxed);
|
||||||
@@ -631,23 +902,55 @@ impl MePool {
|
|||||||
self.registry.writer_lost(writer_id).await
|
self.registry.writer_lost(writer_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn mark_writer_draining(self: &Arc<Self>, writer_id: u64) {
|
pub(crate) async fn mark_writer_draining_with_timeout(
|
||||||
{
|
self: &Arc<Self>,
|
||||||
|
writer_id: u64,
|
||||||
|
timeout: Option<Duration>,
|
||||||
|
allow_drain_fallback: bool,
|
||||||
|
) {
|
||||||
|
let timeout = timeout.filter(|d| !d.is_zero());
|
||||||
|
let found = {
|
||||||
let mut ws = self.writers.write().await;
|
let mut ws = self.writers.write().await;
|
||||||
if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) {
|
if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) {
|
||||||
|
let already_draining = w.draining.swap(true, Ordering::Relaxed);
|
||||||
|
w.allow_drain_fallback
|
||||||
|
.store(allow_drain_fallback, Ordering::Relaxed);
|
||||||
|
w.draining_started_at_epoch_secs
|
||||||
|
.store(Self::now_epoch_secs(), Ordering::Relaxed);
|
||||||
|
if !already_draining {
|
||||||
|
self.stats.increment_pool_drain_active();
|
||||||
|
}
|
||||||
w.draining.store(true, Ordering::Relaxed);
|
w.draining.store(true, Ordering::Relaxed);
|
||||||
|
true
|
||||||
|
} else {
|
||||||
|
false
|
||||||
}
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0);
|
||||||
|
debug!(
|
||||||
|
writer_id,
|
||||||
|
timeout_secs,
|
||||||
|
allow_drain_fallback,
|
||||||
|
"ME writer marked draining"
|
||||||
|
);
|
||||||
|
|
||||||
let pool = Arc::downgrade(self);
|
let pool = Arc::downgrade(self);
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let deadline = Instant::now() + Duration::from_secs(300);
|
let deadline = timeout.map(|t| Instant::now() + t);
|
||||||
loop {
|
loop {
|
||||||
if let Some(p) = pool.upgrade() {
|
if let Some(p) = pool.upgrade() {
|
||||||
if Instant::now() >= deadline {
|
if let Some(deadline_at) = deadline {
|
||||||
warn!(writer_id, "Drain timeout, force-closing");
|
if Instant::now() >= deadline_at {
|
||||||
let _ = p.remove_writer_and_close_clients(writer_id).await;
|
warn!(writer_id, "Drain timeout, force-closing");
|
||||||
break;
|
p.stats.increment_pool_force_close_total();
|
||||||
|
let _ = p.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if p.registry.is_writer_empty(writer_id).await {
|
if p.registry.is_writer_empty(writer_id).await {
|
||||||
let _ = p.remove_writer_only(writer_id).await;
|
let _ = p.remove_writer_only(writer_id).await;
|
||||||
@@ -661,6 +964,32 @@ impl MePool {
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn mark_writer_draining(self: &Arc<Self>, writer_id: u64) {
|
||||||
|
self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300)), false)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(super) fn writer_accepts_new_binding(&self, writer: &MeWriter) -> bool {
|
||||||
|
if !writer.draining.load(Ordering::Relaxed) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
if !writer.allow_drain_fallback.load(Ordering::Relaxed) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed);
|
||||||
|
if ttl_secs == 0 {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
let started = writer.draining_started_at_epoch_secs.load(Ordering::Relaxed);
|
||||||
|
if started == 0 {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
Self::now_epoch_secs().saturating_sub(started) <= ttl_secs
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn hex_dump(data: &[u8]) -> String {
|
fn hex_dump(data: &[u8]) -> String {
|
||||||
|
|||||||
@@ -134,8 +134,8 @@ impl MePool {
|
|||||||
candidate_indices.sort_by_key(|idx| {
|
candidate_indices.sort_by_key(|idx| {
|
||||||
let w = &writers_snapshot[*idx];
|
let w = &writers_snapshot[*idx];
|
||||||
let degraded = w.degraded.load(Ordering::Relaxed);
|
let degraded = w.degraded.load(Ordering::Relaxed);
|
||||||
let draining = w.draining.load(Ordering::Relaxed);
|
let stale = (w.generation < self.current_generation()) as usize;
|
||||||
(draining as usize, degraded as usize)
|
(stale, degraded as usize)
|
||||||
});
|
});
|
||||||
|
|
||||||
let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len();
|
let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len();
|
||||||
@@ -143,13 +143,23 @@ impl MePool {
|
|||||||
for offset in 0..candidate_indices.len() {
|
for offset in 0..candidate_indices.len() {
|
||||||
let idx = candidate_indices[(start + offset) % candidate_indices.len()];
|
let idx = candidate_indices[(start + offset) % candidate_indices.len()];
|
||||||
let w = &writers_snapshot[idx];
|
let w = &writers_snapshot[idx];
|
||||||
if w.draining.load(Ordering::Relaxed) {
|
if !self.writer_accepts_new_binding(w) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if w.tx.send(WriterCommand::Data(payload.clone())).await.is_ok() {
|
if w.tx.send(WriterCommand::Data(payload.clone())).await.is_ok() {
|
||||||
self.registry
|
self.registry
|
||||||
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
||||||
.await;
|
.await;
|
||||||
|
if w.generation < self.current_generation() {
|
||||||
|
self.stats.increment_pool_stale_pick_total();
|
||||||
|
debug!(
|
||||||
|
conn_id,
|
||||||
|
writer_id = w.id,
|
||||||
|
writer_generation = w.generation,
|
||||||
|
current_generation = self.current_generation(),
|
||||||
|
"Selected stale ME writer for fallback bind"
|
||||||
|
);
|
||||||
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
} else {
|
} else {
|
||||||
warn!(writer_id = w.id, "ME writer channel closed");
|
warn!(writer_id = w.id, "ME writer channel closed");
|
||||||
@@ -159,7 +169,7 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let w = writers_snapshot[candidate_indices[start]].clone();
|
let w = writers_snapshot[candidate_indices[start]].clone();
|
||||||
if w.draining.load(Ordering::Relaxed) {
|
if !self.writer_accepts_new_binding(&w) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
match w.tx.send(WriterCommand::Data(payload.clone())).await {
|
match w.tx.send(WriterCommand::Data(payload.clone())).await {
|
||||||
@@ -167,6 +177,9 @@ impl MePool {
|
|||||||
self.registry
|
self.registry
|
||||||
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
||||||
.await;
|
.await;
|
||||||
|
if w.generation < self.current_generation() {
|
||||||
|
self.stats.increment_pool_stale_pick_total();
|
||||||
|
}
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Err(_) => {
|
Err(_) => {
|
||||||
@@ -245,13 +258,13 @@ impl MePool {
|
|||||||
|
|
||||||
if preferred.is_empty() {
|
if preferred.is_empty() {
|
||||||
return (0..writers.len())
|
return (0..writers.len())
|
||||||
.filter(|i| !writers[*i].draining.load(Ordering::Relaxed))
|
.filter(|i| self.writer_accepts_new_binding(&writers[*i]))
|
||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut out = Vec::new();
|
let mut out = Vec::new();
|
||||||
for (idx, w) in writers.iter().enumerate() {
|
for (idx, w) in writers.iter().enumerate() {
|
||||||
if w.draining.load(Ordering::Relaxed) {
|
if !self.writer_accepts_new_binding(w) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if preferred.iter().any(|p| *p == w.addr) {
|
if preferred.iter().any(|p| *p == w.addr) {
|
||||||
@@ -260,7 +273,7 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
if out.is_empty() {
|
if out.is_empty() {
|
||||||
return (0..writers.len())
|
return (0..writers.len())
|
||||||
.filter(|i| !writers[*i].draining.load(Ordering::Relaxed))
|
.filter(|i| self.writer_accepts_new_binding(&writers[*i]))
|
||||||
.collect();
|
.collect();
|
||||||
}
|
}
|
||||||
out
|
out
|
||||||
|
|||||||
396
tools/tlsearch.py
Normal file
396
tools/tlsearch.py
Normal file
@@ -0,0 +1,396 @@
|
|||||||
|
#!/usr/bin/env python3
|
||||||
|
"""
|
||||||
|
TLS Profile Inspector
|
||||||
|
|
||||||
|
Usage:
|
||||||
|
python3 tools/tlsearch.py
|
||||||
|
python3 tools/tlsearch.py tlsfront
|
||||||
|
python3 tools/tlsearch.py tlsfront/petrovich.ru.json
|
||||||
|
python3 tools/tlsearch.py tlsfront --only-current
|
||||||
|
"""
|
||||||
|
|
||||||
|
from __future__ import annotations
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import datetime as dt
|
||||||
|
import json
|
||||||
|
from dataclasses import dataclass
|
||||||
|
from pathlib import Path
|
||||||
|
from typing import Any, Iterable
|
||||||
|
|
||||||
|
|
||||||
|
TLS_VERSIONS = {
|
||||||
|
0x0301: "TLS 1.0",
|
||||||
|
0x0302: "TLS 1.1",
|
||||||
|
0x0303: "TLS 1.2",
|
||||||
|
0x0304: "TLS 1.3",
|
||||||
|
}
|
||||||
|
|
||||||
|
EXT_NAMES = {
|
||||||
|
0: "server_name",
|
||||||
|
5: "status_request",
|
||||||
|
10: "supported_groups",
|
||||||
|
11: "ec_point_formats",
|
||||||
|
13: "signature_algorithms",
|
||||||
|
16: "alpn",
|
||||||
|
18: "signed_certificate_timestamp",
|
||||||
|
21: "padding",
|
||||||
|
23: "extended_master_secret",
|
||||||
|
35: "session_ticket",
|
||||||
|
43: "supported_versions",
|
||||||
|
45: "psk_key_exchange_modes",
|
||||||
|
51: "key_share",
|
||||||
|
}
|
||||||
|
|
||||||
|
CIPHER_NAMES = {
|
||||||
|
0x1301: "TLS_AES_128_GCM_SHA256",
|
||||||
|
0x1302: "TLS_AES_256_GCM_SHA384",
|
||||||
|
0x1303: "TLS_CHACHA20_POLY1305_SHA256",
|
||||||
|
0x1304: "TLS_AES_128_CCM_SHA256",
|
||||||
|
0x1305: "TLS_AES_128_CCM_8_SHA256",
|
||||||
|
0x009C: "TLS_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
0x009D: "TLS_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
0xC02F: "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256",
|
||||||
|
0xC030: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||||
|
0xCCA8: "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||||
|
0xCCA9: "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||||
|
}
|
||||||
|
|
||||||
|
NAMED_GROUPS = {
|
||||||
|
0x001D: "x25519",
|
||||||
|
0x0017: "secp256r1",
|
||||||
|
0x0018: "secp384r1",
|
||||||
|
0x0019: "secp521r1",
|
||||||
|
0x0100: "ffdhe2048",
|
||||||
|
0x0101: "ffdhe3072",
|
||||||
|
0x0102: "ffdhe4096",
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class ProfileRecognition:
|
||||||
|
schema: str
|
||||||
|
mode: str
|
||||||
|
has_cert_info: bool
|
||||||
|
has_full_cert_payload: bool
|
||||||
|
cert_message_len: int
|
||||||
|
cert_chain_count: int
|
||||||
|
cert_chain_total_len: int
|
||||||
|
issues: list[str]
|
||||||
|
|
||||||
|
|
||||||
|
def to_hex(data: Iterable[int]) -> str:
|
||||||
|
return "".join(f"{b:02x}" for b in data)
|
||||||
|
|
||||||
|
|
||||||
|
def read_u16be(data: list[int], off: int = 0) -> int:
|
||||||
|
return (data[off] << 8) | data[off + 1]
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_u8_list(value: Any) -> list[int]:
|
||||||
|
if not isinstance(value, list):
|
||||||
|
return []
|
||||||
|
out: list[int] = []
|
||||||
|
for item in value:
|
||||||
|
if isinstance(item, int) and 0 <= item <= 0xFF:
|
||||||
|
out.append(item)
|
||||||
|
else:
|
||||||
|
return []
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def as_dict(value: Any) -> dict[str, Any]:
|
||||||
|
return value if isinstance(value, dict) else {}
|
||||||
|
|
||||||
|
|
||||||
|
def as_int(value: Any, default: int = 0) -> int:
|
||||||
|
return value if isinstance(value, int) else default
|
||||||
|
|
||||||
|
|
||||||
|
def decode_version_pair(v: list[int]) -> str:
|
||||||
|
if len(v) != 2:
|
||||||
|
return f"invalid({v})"
|
||||||
|
ver = read_u16be(v)
|
||||||
|
return f"0x{ver:04x} ({TLS_VERSIONS.get(ver, 'unknown')})"
|
||||||
|
|
||||||
|
|
||||||
|
def decode_cipher_suite(v: list[int]) -> str:
|
||||||
|
if len(v) != 2:
|
||||||
|
return f"invalid({v})"
|
||||||
|
cs = read_u16be(v)
|
||||||
|
name = CIPHER_NAMES.get(cs, "unknown")
|
||||||
|
return f"0x{cs:04x} ({name})"
|
||||||
|
|
||||||
|
|
||||||
|
def decode_supported_versions(data: list[int]) -> str:
|
||||||
|
if len(data) == 2:
|
||||||
|
ver = read_u16be(data)
|
||||||
|
return f"selected=0x{ver:04x} ({TLS_VERSIONS.get(ver, 'unknown')})"
|
||||||
|
if not data:
|
||||||
|
return "empty"
|
||||||
|
if len(data) < 3:
|
||||||
|
return f"raw={to_hex(data)}"
|
||||||
|
vec_len = data[0]
|
||||||
|
versions: list[str] = []
|
||||||
|
for i in range(1, min(1 + vec_len, len(data)), 2):
|
||||||
|
if i + 1 >= len(data):
|
||||||
|
break
|
||||||
|
ver = read_u16be(data, i)
|
||||||
|
versions.append(f"0x{ver:04x}({TLS_VERSIONS.get(ver, 'unknown')})")
|
||||||
|
return "offered=[" + ", ".join(versions) + "]"
|
||||||
|
|
||||||
|
|
||||||
|
def decode_key_share(data: list[int]) -> str:
|
||||||
|
if len(data) < 4:
|
||||||
|
return f"raw={to_hex(data)}"
|
||||||
|
group = read_u16be(data, 0)
|
||||||
|
key_len = read_u16be(data, 2)
|
||||||
|
key_hex = to_hex(data[4 : 4 + min(key_len, len(data) - 4)])
|
||||||
|
gname = NAMED_GROUPS.get(group, "unknown_group")
|
||||||
|
return f"group=0x{group:04x}({gname}), key_len={key_len}, key={key_hex}"
|
||||||
|
|
||||||
|
|
||||||
|
def decode_alpn(data: list[int]) -> str:
|
||||||
|
if len(data) < 3:
|
||||||
|
return f"raw={to_hex(data)}"
|
||||||
|
total = read_u16be(data, 0)
|
||||||
|
pos = 2
|
||||||
|
vals: list[str] = []
|
||||||
|
limit = min(len(data), 2 + total)
|
||||||
|
while pos < limit:
|
||||||
|
ln = data[pos]
|
||||||
|
pos += 1
|
||||||
|
if pos + ln > limit:
|
||||||
|
break
|
||||||
|
raw = bytes(data[pos : pos + ln])
|
||||||
|
pos += ln
|
||||||
|
try:
|
||||||
|
vals.append(raw.decode("ascii"))
|
||||||
|
except UnicodeDecodeError:
|
||||||
|
vals.append(raw.hex())
|
||||||
|
return "protocols=[" + ", ".join(vals) + "]"
|
||||||
|
|
||||||
|
|
||||||
|
def decode_extension(ext_type: int, data: list[int]) -> str:
|
||||||
|
if ext_type == 43:
|
||||||
|
return decode_supported_versions(data)
|
||||||
|
if ext_type == 51:
|
||||||
|
return decode_key_share(data)
|
||||||
|
if ext_type == 16:
|
||||||
|
return decode_alpn(data)
|
||||||
|
return f"raw={to_hex(data)}"
|
||||||
|
|
||||||
|
|
||||||
|
def ts_to_iso(ts: Any) -> str:
|
||||||
|
if not isinstance(ts, int):
|
||||||
|
return "-"
|
||||||
|
return dt.datetime.fromtimestamp(ts, tz=dt.timezone.utc).isoformat()
|
||||||
|
|
||||||
|
|
||||||
|
def recognize_profile(obj: dict[str, Any]) -> ProfileRecognition:
|
||||||
|
issues: list[str] = []
|
||||||
|
|
||||||
|
sh = as_dict(obj.get("server_hello_template"))
|
||||||
|
if not sh:
|
||||||
|
issues.append("missing server_hello_template")
|
||||||
|
|
||||||
|
version = normalize_u8_list(sh.get("version"))
|
||||||
|
if version and len(version) != 2:
|
||||||
|
issues.append("server_hello_template.version must have 2 bytes")
|
||||||
|
|
||||||
|
app_sizes = obj.get("app_data_records_sizes")
|
||||||
|
if not isinstance(app_sizes, list) or not app_sizes:
|
||||||
|
issues.append("missing app_data_records_sizes")
|
||||||
|
elif any((not isinstance(v, int) or v <= 0) for v in app_sizes):
|
||||||
|
issues.append("app_data_records_sizes contains invalid values")
|
||||||
|
|
||||||
|
if not isinstance(obj.get("total_app_data_len"), int):
|
||||||
|
issues.append("missing total_app_data_len")
|
||||||
|
|
||||||
|
cert_info = as_dict(obj.get("cert_info"))
|
||||||
|
has_cert_info = bool(
|
||||||
|
cert_info.get("subject_cn")
|
||||||
|
or cert_info.get("issuer_cn")
|
||||||
|
or cert_info.get("san_names")
|
||||||
|
or isinstance(cert_info.get("not_before_unix"), int)
|
||||||
|
or isinstance(cert_info.get("not_after_unix"), int)
|
||||||
|
)
|
||||||
|
|
||||||
|
cert_payload = as_dict(obj.get("cert_payload"))
|
||||||
|
cert_message_len = 0
|
||||||
|
cert_chain_count = 0
|
||||||
|
cert_chain_total_len = 0
|
||||||
|
has_full_cert_payload = False
|
||||||
|
|
||||||
|
if cert_payload:
|
||||||
|
cert_msg = normalize_u8_list(cert_payload.get("certificate_message"))
|
||||||
|
if not cert_msg:
|
||||||
|
issues.append("cert_payload.certificate_message is missing or invalid")
|
||||||
|
else:
|
||||||
|
cert_message_len = len(cert_msg)
|
||||||
|
|
||||||
|
chain_raw = cert_payload.get("cert_chain_der")
|
||||||
|
if not isinstance(chain_raw, list):
|
||||||
|
issues.append("cert_payload.cert_chain_der is missing or invalid")
|
||||||
|
else:
|
||||||
|
for entry in chain_raw:
|
||||||
|
cert = normalize_u8_list(entry)
|
||||||
|
if cert:
|
||||||
|
cert_chain_count += 1
|
||||||
|
cert_chain_total_len += len(cert)
|
||||||
|
else:
|
||||||
|
issues.append("cert_payload.cert_chain_der has invalid certificate entry")
|
||||||
|
break
|
||||||
|
|
||||||
|
has_full_cert_payload = cert_message_len > 0 and cert_chain_count > 0
|
||||||
|
elif obj.get("cert_payload") is not None:
|
||||||
|
issues.append("cert_payload is not an object")
|
||||||
|
|
||||||
|
if has_full_cert_payload:
|
||||||
|
schema = "current"
|
||||||
|
mode = "full-cert-payload"
|
||||||
|
elif has_cert_info:
|
||||||
|
schema = "current-compact"
|
||||||
|
mode = "compact-cert-info"
|
||||||
|
else:
|
||||||
|
schema = "legacy"
|
||||||
|
mode = "random-fallback"
|
||||||
|
|
||||||
|
if issues:
|
||||||
|
schema = f"{schema}+issues"
|
||||||
|
|
||||||
|
return ProfileRecognition(
|
||||||
|
schema=schema,
|
||||||
|
mode=mode,
|
||||||
|
has_cert_info=has_cert_info,
|
||||||
|
has_full_cert_payload=has_full_cert_payload,
|
||||||
|
cert_message_len=cert_message_len,
|
||||||
|
cert_chain_count=cert_chain_count,
|
||||||
|
cert_chain_total_len=cert_chain_total_len,
|
||||||
|
issues=issues,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def decode_profile(path: Path) -> tuple[str, ProfileRecognition]:
|
||||||
|
obj: dict[str, Any] = json.loads(path.read_text(encoding="utf-8"))
|
||||||
|
recognition = recognize_profile(obj)
|
||||||
|
|
||||||
|
sh = as_dict(obj.get("server_hello_template"))
|
||||||
|
version = normalize_u8_list(sh.get("version"))
|
||||||
|
cipher = normalize_u8_list(sh.get("cipher_suite"))
|
||||||
|
random_bytes = normalize_u8_list(sh.get("random"))
|
||||||
|
session_id = normalize_u8_list(sh.get("session_id"))
|
||||||
|
|
||||||
|
lines: list[str] = []
|
||||||
|
lines.append(f"[{path.name}]")
|
||||||
|
lines.append(f" domain: {obj.get('domain', '-')}")
|
||||||
|
lines.append(f" profile.schema: {recognition.schema}")
|
||||||
|
lines.append(f" profile.mode: {recognition.mode}")
|
||||||
|
lines.append(f" profile.has_full_cert_payload: {recognition.has_full_cert_payload}")
|
||||||
|
lines.append(f" profile.has_cert_info: {recognition.has_cert_info}")
|
||||||
|
if recognition.has_full_cert_payload:
|
||||||
|
lines.append(f" profile.cert_message_len: {recognition.cert_message_len}")
|
||||||
|
lines.append(f" profile.cert_chain_count: {recognition.cert_chain_count}")
|
||||||
|
lines.append(f" profile.cert_chain_total_len: {recognition.cert_chain_total_len}")
|
||||||
|
if recognition.issues:
|
||||||
|
lines.append(" profile.issues:")
|
||||||
|
for issue in recognition.issues:
|
||||||
|
lines.append(f" - {issue}")
|
||||||
|
|
||||||
|
lines.append(f" tls.version: {decode_version_pair(version)}")
|
||||||
|
lines.append(f" tls.cipher: {decode_cipher_suite(cipher)}")
|
||||||
|
lines.append(f" tls.compression: {sh.get('compression', '-')}")
|
||||||
|
lines.append(f" tls.random: {to_hex(random_bytes)}")
|
||||||
|
lines.append(f" tls.session_id_len: {len(session_id)}")
|
||||||
|
if session_id:
|
||||||
|
lines.append(f" tls.session_id: {to_hex(session_id)}")
|
||||||
|
|
||||||
|
app_sizes = obj.get("app_data_records_sizes", [])
|
||||||
|
if isinstance(app_sizes, list):
|
||||||
|
lines.append(" app_data_records_sizes: " + ", ".join(str(v) for v in app_sizes))
|
||||||
|
else:
|
||||||
|
lines.append(" app_data_records_sizes: -")
|
||||||
|
lines.append(f" total_app_data_len: {obj.get('total_app_data_len', '-')}")
|
||||||
|
|
||||||
|
cert = as_dict(obj.get("cert_info"))
|
||||||
|
if cert:
|
||||||
|
lines.append(" cert_info:")
|
||||||
|
lines.append(f" subject_cn: {cert.get('subject_cn') or '-'}")
|
||||||
|
lines.append(f" issuer_cn: {cert.get('issuer_cn') or '-'}")
|
||||||
|
lines.append(f" not_before: {ts_to_iso(cert.get('not_before_unix'))}")
|
||||||
|
lines.append(f" not_after: {ts_to_iso(cert.get('not_after_unix'))}")
|
||||||
|
sans = cert.get("san_names")
|
||||||
|
if isinstance(sans, list) and sans:
|
||||||
|
lines.append(" san_names: " + ", ".join(str(v) for v in sans))
|
||||||
|
else:
|
||||||
|
lines.append(" san_names: -")
|
||||||
|
else:
|
||||||
|
lines.append(" cert_info: -")
|
||||||
|
|
||||||
|
exts = sh.get("extensions", [])
|
||||||
|
if not isinstance(exts, list):
|
||||||
|
exts = []
|
||||||
|
lines.append(f" extensions[{len(exts)}]:")
|
||||||
|
for ext in exts:
|
||||||
|
ext_obj = as_dict(ext)
|
||||||
|
ext_type = as_int(ext_obj.get("ext_type"), -1)
|
||||||
|
data = normalize_u8_list(ext_obj.get("data"))
|
||||||
|
name = EXT_NAMES.get(ext_type, "unknown")
|
||||||
|
decoded = decode_extension(ext_type, data)
|
||||||
|
lines.append(f" - type={ext_type} ({name}), len={len(data)}: {decoded}")
|
||||||
|
|
||||||
|
lines.append("")
|
||||||
|
return ("\n".join(lines), recognition)
|
||||||
|
|
||||||
|
|
||||||
|
def collect_files(input_path: Path) -> list[Path]:
|
||||||
|
if input_path.is_file():
|
||||||
|
return [input_path]
|
||||||
|
return sorted(p for p in input_path.glob("*.json") if p.is_file())
|
||||||
|
|
||||||
|
|
||||||
|
def main() -> int:
|
||||||
|
parser = argparse.ArgumentParser(
|
||||||
|
description="Decode TLS profile JSON files and recognize current schema."
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"path",
|
||||||
|
nargs="?",
|
||||||
|
default="tlsfront",
|
||||||
|
help="Path to tlsfront directory or a single JSON file.",
|
||||||
|
)
|
||||||
|
parser.add_argument(
|
||||||
|
"--only-current",
|
||||||
|
action="store_true",
|
||||||
|
help="Show only profiles recognized as current/full-cert-payload.",
|
||||||
|
)
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
base = Path(args.path)
|
||||||
|
if not base.exists():
|
||||||
|
print(f"Path not found: {base}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
files = collect_files(base)
|
||||||
|
if not files:
|
||||||
|
print(f"No JSON files found in: {base}")
|
||||||
|
return 1
|
||||||
|
|
||||||
|
printed = 0
|
||||||
|
for path in files:
|
||||||
|
try:
|
||||||
|
rendered, recognition = decode_profile(path)
|
||||||
|
if args.only_current and recognition.schema != "current":
|
||||||
|
continue
|
||||||
|
print(rendered, end="")
|
||||||
|
printed += 1
|
||||||
|
except Exception as e: # noqa: BLE001
|
||||||
|
print(f"[{path.name}] decode error: {e}\n")
|
||||||
|
|
||||||
|
if args.only_current and printed == 0:
|
||||||
|
print("No current profiles found.")
|
||||||
|
return 0
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
raise SystemExit(main())
|
||||||
276
tools/zbx_telemt_template.yaml
Normal file
276
tools/zbx_telemt_template.yaml
Normal file
@@ -0,0 +1,276 @@
|
|||||||
|
zabbix_export:
|
||||||
|
version: '7.0'
|
||||||
|
template_groups:
|
||||||
|
- uuid: 43d0fe04c7094000829b0d28c6e3470c
|
||||||
|
name: 'Custom Templates'
|
||||||
|
templates:
|
||||||
|
- uuid: f2a694213c3d49d88cc03bffb111429e
|
||||||
|
template: Telemt
|
||||||
|
name: Telemt
|
||||||
|
description: |
|
||||||
|
A simple template using Prometheus metrics.
|
||||||
|
Set the {$TELEMT_URL} macro with the metrics URL
|
||||||
|
groups:
|
||||||
|
- name: 'Custom Templates'
|
||||||
|
items:
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885813a2
|
||||||
|
name: 'Connections bad total'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.conn_bad_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_connections_bad_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Server connections'
|
||||||
|
- uuid: f36c9632394a4af3853583857ca8dbf1
|
||||||
|
name: 'Connections total'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.conn_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_connections_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Server connections'
|
||||||
|
- uuid: 1618272cf68e44509425f5fab029db7b
|
||||||
|
name: 'Handshake timeouts total'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.handshake_timeouts_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_handshake_timeouts_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Server connections'
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885813d2
|
||||||
|
name: 'ME keepalive send failures'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.me_keepalive_failed_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_me_keepalive_failed_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Middle-End connections'
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885813c2
|
||||||
|
name: 'ME keepalive frames sent'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.me_keepalive_sent_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_me_keepalive_sent_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Middle-End connections'
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885811a2
|
||||||
|
name: 'ME reconnect attempts'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.me_reconnect_attempts_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_me_reconnect_attempts_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Middle-End connections'
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885812a2
|
||||||
|
name: 'ME reconnect successes'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.me_reconnect_success_total
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_me_reconnect_success_total
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Middle-End connections'
|
||||||
|
- uuid: 991b1858e3f94b3098ff0f84859efc41
|
||||||
|
name: 'Prometheus metrics'
|
||||||
|
type: HTTP_AGENT
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
value_type: TEXT
|
||||||
|
trends: '0'
|
||||||
|
url: '{$TELEMT_URL}'
|
||||||
|
- uuid: fb95391c7f894e3eb6984b92885813b2
|
||||||
|
name: 'Telemt Uptime'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.uptime
|
||||||
|
delay: '0'
|
||||||
|
trends: '0'
|
||||||
|
units: s
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- telemt_uptime_seconds
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Server connections'
|
||||||
|
discovery_rules:
|
||||||
|
- uuid: 22727585c14049fbb0863c15dd68634c
|
||||||
|
name: 'Get users'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: telemt.users
|
||||||
|
delay: '0'
|
||||||
|
item_prototypes:
|
||||||
|
- uuid: 137e371a47714a21b5c0c89d535dd717
|
||||||
|
name: 'Active connections by {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.active_conn_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_connections_current{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
- uuid: 3ccce91ab5d54b4d972280c7b7bda910
|
||||||
|
name: 'Messages received from {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.msgs_from_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_msgs_from_client{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
- uuid: e539126215f2419bbfd0d8099aabe1cb
|
||||||
|
name: 'Messages sent to {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.msgs_to_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_msgs_to_client{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
- uuid: 810a8f6346a44ae7bd79a357dbfe2b3c
|
||||||
|
name: 'Bytes received from {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.octets_from_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
units: B
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_octets_from_client{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
- uuid: d0cc3b4d618b4f0d97f8127b51f872c8
|
||||||
|
name: 'Bytes sent to {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.octets_to_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
units: B
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_octets_to_client{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
- uuid: e9735aef967b4af28ed59f6c76ad493d
|
||||||
|
name: 'Total connections by {#TELEMT_USER}'
|
||||||
|
type: DEPENDENT
|
||||||
|
key: 'telemt.total_conn_[{#TELEMT_USER}]'
|
||||||
|
delay: '0'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_PATTERN
|
||||||
|
parameters:
|
||||||
|
- 'telemt_user_connections_total{user=~"{#TELEMT_USER}"}'
|
||||||
|
- value
|
||||||
|
- ''
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
tags:
|
||||||
|
- tag: Application
|
||||||
|
value: 'Users connections'
|
||||||
|
master_item:
|
||||||
|
key: telemt.prom_metrics
|
||||||
|
lld_macro_paths:
|
||||||
|
- lld_macro: '{#TELEMT_USER}'
|
||||||
|
path: '$.labels[''user'']'
|
||||||
|
preprocessing:
|
||||||
|
- type: PROMETHEUS_TO_JSON
|
||||||
|
parameters:
|
||||||
|
- ''
|
||||||
|
tags:
|
||||||
|
- tag: target
|
||||||
|
value: Telemt
|
||||||
Reference in New Issue
Block a user