mirror of https://github.com/telemt/telemt.git
Compare commits
210 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
23156a840d | |
|
|
cf9d4b2c61 | |
|
|
63cfc067f6 | |
|
|
5863b33b81 | |
|
|
7ce87749c0 | |
|
|
bc691539a1 | |
|
|
2162a63e3e | |
|
|
4a77335ba9 | |
|
|
ba29b66c4c | |
|
|
e8cf97095f | |
|
|
ee4264af50 | |
|
|
59c2476650 | |
|
|
89d6be267d | |
|
|
3b717c75da | |
|
|
3af7673342 | |
|
|
ad2057ad44 | |
|
|
f8cfd4f0bc | |
|
|
5cbcfb2a91 | |
|
|
aec2c23a0c | |
|
|
f5e63ab145 | |
|
|
12f99eebab | |
|
|
bc3ad02a20 | |
|
|
14674bd4e6 | |
|
|
a36c7b3f66 | |
|
|
d848e4a729 | |
|
|
8d865a980c | |
|
|
f829439e8f | |
|
|
a14f8b14d2 | |
|
|
31af2da4d5 | |
|
|
ac2b88d6ea | |
|
|
4a3ef62494 | |
|
|
6996d6e597 | |
|
|
b3f11624c9 | |
|
|
13dc1f70bf | |
|
|
b88457b9bc | |
|
|
d176766db2 | |
|
|
fa4e2000a8 | |
|
|
4d87a790cc | |
|
|
07fed8f871 | |
|
|
407d686d49 | |
|
|
eac5cc81fb | |
|
|
c51d16f403 | |
|
|
b5146bba94 | |
|
|
5ed525fa48 | |
|
|
9f7c1693ce | |
|
|
1524396e10 | |
|
|
e630ea0045 | |
|
|
4574e423c6 | |
|
|
5f5582865e | |
|
|
1f54e4a203 | |
|
|
defa37da05 | |
|
|
5fd058b6fd | |
|
|
977ee53b72 | |
|
|
5b11522620 | |
|
|
8fe6fcb7eb | |
|
|
486e439ae6 | |
|
|
444a20672d | |
|
|
8e7b27a16d | |
|
|
7f0057acd7 | |
|
|
7fe38f1b9f | |
|
|
c2f16a343a | |
|
|
9b64d2ee17 | |
|
|
6ea867ce36 | |
|
|
d673935b6d | |
|
|
363b5014f7 | |
|
|
bb6237151c | |
|
|
873618ce53 | |
|
|
a9f695623d | |
|
|
5c29870632 | |
|
|
f6704d7d65 | |
|
|
3d20002e56 | |
|
|
8fcd0fa950 | |
|
|
645e968778 | |
|
|
b46216d357 | |
|
|
8ac1a0017d | |
|
|
3df274caa6 | |
|
|
780546a680 | |
|
|
729ffa0fcd | |
|
|
e594d6f079 | |
|
|
ecd6a19246 | |
|
|
2df6b8704d | |
|
|
5f5a046710 | |
|
|
2dc81ad0e0 | |
|
|
d8d8534cf8 | |
|
|
6c850e4150 | |
|
|
b8cf596e7d | |
|
|
5bf56b6dd8 | |
|
|
65da1f91ec | |
|
|
f3e9d00132 | |
|
|
dee6e13fef | |
|
|
07d774a82a | |
|
|
618bc7e0b6 | |
|
|
d06ac222d6 | |
|
|
567453e0f8 | |
|
|
cba837745b | |
|
|
876c8f1612 | |
|
|
ac8ad864be | |
|
|
fe56dc7c1a | |
|
|
96ae01078c | |
|
|
3b9919fa4d | |
|
|
6c4a3b59f9 | |
|
|
01c3d0a707 | |
|
|
fbee4631d6 | |
|
|
d0b52ea299 | |
|
|
677195e587 | |
|
|
a383efcb21 | |
|
|
cb5753f77c | |
|
|
7a075b2ffe | |
|
|
7de822dd15 | |
|
|
1bbf4584a6 | |
|
|
70479c4094 | |
|
|
b94746a6e0 | |
|
|
ceae1564af | |
|
|
7ce5fc66db | |
|
|
41493462a1 | |
|
|
6ee4d4648c | |
|
|
97f6649584 | |
|
|
dc6b6d3f9d | |
|
|
1c3e0d4e46 | |
|
|
0b78583cf5 | |
|
|
28d318d724 | |
|
|
70c2f0f045 | |
|
|
b9b1271f14 | |
|
|
3c734bd811 | |
|
|
6391df0583 | |
|
|
6a781c8bc3 | |
|
|
138652af8e | |
|
|
59157d31a6 | |
|
|
8bab3f70e1 | |
|
|
41d786cc11 | |
|
|
c43de1bd2a | |
|
|
101efe45b7 | |
|
|
11df61c6ac | |
|
|
08684bcbd2 | |
|
|
744fb4425f | |
|
|
80cb1bc221 | |
|
|
8461556b02 | |
|
|
cfd516edf3 | |
|
|
803c2c0492 | |
|
|
b762bd029f | |
|
|
761679d306 | |
|
|
41668b153d | |
|
|
1d2f88ad29 | |
|
|
80917f5abc | |
|
|
dc61d300ab | |
|
|
ae16080de5 | |
|
|
b8ca1fc166 | |
|
|
f9986944df | |
|
|
cb877c2bc3 | |
|
|
4426082c17 | |
|
|
22097f8c7c | |
|
|
1450af60a0 | |
|
|
f1cc8d65f2 | |
|
|
ec7e808daf | |
|
|
e4b7e23e76 | |
|
|
8b92b80b4a | |
|
|
f7868aa00f | |
|
|
655a08fa5c | |
|
|
8bc432db49 | |
|
|
a40d6929e5 | |
|
|
8db566dbe9 | |
|
|
bb71de0230 | |
|
|
62a258f8e3 | |
|
|
c868eaae74 | |
|
|
8e1860f912 | |
|
|
814bef9d99 | |
|
|
3ceda15073 | |
|
|
a3a6ea2880 | |
|
|
24156b5067 | |
|
|
a1dfa5b11d | |
|
|
800356c751 | |
|
|
1546b012a6 | |
|
|
e6b77af931 | |
|
|
8cfaab9320 | |
|
|
2d69b9d0ae | |
|
|
41c2b4de65 | |
|
|
0a5e8a09fd | |
|
|
2f9fddfa6f | |
|
|
6f4356f72a | |
|
|
0c3c9009a9 | |
|
|
0475844701 | |
|
|
1abf9bd05c | |
|
|
6f17d4d231 | |
|
|
bf30e93284 | |
|
|
91be148b72 | |
|
|
e46d2cfc52 | |
|
|
d4cda6d546 | |
|
|
e35d69c61f | |
|
|
a353a94175 | |
|
|
b856250b2c | |
|
|
97d1476ded | |
|
|
cde14fc1bf | |
|
|
5723d50d0b | |
|
|
3eb384e02a | |
|
|
c960e0e245 | |
|
|
6fc188f0c4 | |
|
|
5c9fea5850 | |
|
|
3011a9ef6d | |
|
|
7b570be5b3 | |
|
|
0461bc65c6 | |
|
|
ead23608f0 | |
|
|
cf82b637d2 | |
|
|
2e8bfa1101 | |
|
|
d091b0b251 | |
|
|
56fc6c4896 | |
|
|
95685adba7 | |
|
|
909714af31 | |
|
|
dc2b4395bd | |
|
|
39875afbff | |
|
|
2ea7813ed4 |
|
|
@ -7,7 +7,16 @@ queries:
|
|||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
paths-ignore:
|
||||
- "**/tests/**"
|
||||
- "**/test/**"
|
||||
- "**/*_test.rs"
|
||||
- "**/*/tests.rs"
|
||||
query-filters:
|
||||
- exclude:
|
||||
tags:
|
||||
- test
|
||||
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
# Architecture Directives
|
||||
|
||||
> Companion to `Agents.md`. These are **activation directives**, not tutorials.
|
||||
> You already know these patterns — apply them. When making any structural or
|
||||
> design decision, run the relevant section below as a checklist.
|
||||
|
||||
---
|
||||
|
||||
## 1. Active Principles (always on)
|
||||
|
||||
Apply these on every non-trivial change. No exceptions.
|
||||
|
||||
- **SRP** — one reason to change per component. If you can't name the responsibility in one noun phrase, split it.
|
||||
- **OCP** — extend by adding, not by modifying. New variants/impls over patching existing logic.
|
||||
- **ISP** — traits stay minimal. More than ~5 methods is a split signal.
|
||||
- **DIP** — high-level modules depend on traits, not concrete types. Infrastructure implements domain traits; it does not own domain logic.
|
||||
- **DRY** — one authoritative source per piece of knowledge. Copies are bugs that haven't diverged yet.
|
||||
- **YAGNI** — generic parameters, extension hooks, and pluggable strategies require an *existing* concrete use case, not a hypothetical one.
|
||||
- **KISS** — two equivalent designs: choose the one with fewer concepts. Justify complexity; never assume it.
|
||||
|
||||
---
|
||||
|
||||
## 2. Layered Architecture
|
||||
|
||||
Dependencies point **inward only**: `Presentation → Application → Domain ← Infrastructure`.
|
||||
|
||||
- Domain layer: zero I/O. No network, no filesystem, no async runtime imports.
|
||||
- Infrastructure: implements domain traits at the boundary. Never leaks SDK/wire types inward.
|
||||
- Anti-Corruption Layer (ACL): all third-party and external-protocol types are translated here. If the external format changes, only the ACL changes.
|
||||
- Presentation: translates wire/HTTP representations to domain types and back. Nothing else.
|
||||
|
||||
---
|
||||
|
||||
## 3. Design Pattern Selection
|
||||
|
||||
Apply the right pattern. Do not invent a new abstraction when a named pattern fits.
|
||||
|
||||
| Situation | Pattern to apply |
|
||||
|---|---|
|
||||
| Struct with 3+ optional/dependent fields | **Builder** — `build()` returns `Result`, never panics |
|
||||
| Cross-cutting behavior (logging, retry, metrics) on a trait impl | **Decorator** — implements same trait, delegates all calls |
|
||||
| Subsystem with multiple internal components | **Façade** — single public entry point, internals are `pub(crate)` |
|
||||
| Swappable algorithm or policy | **Strategy** — trait injection; generics for compile-time, `dyn` for runtime |
|
||||
| Component notifying decoupled consumers | **Observer** — typed channels (`broadcast`, `watch`), not callback `Vec<Box<dyn Fn>>` |
|
||||
| Exclusive mutable state serving concurrent callers | **Actor** — `mpsc` command channel + `oneshot` reply; no lock needed on state |
|
||||
| Finite state with invalid transition prevention | **Typestate** — distinct types per state; invalid ops are compile errors |
|
||||
| Fixed process skeleton with overridable steps | **Template Method** — defaulted trait method calls required hooks |
|
||||
| Request pipeline with independent handlers | **Chain/Middleware** — generic compile-time chain for hot paths, `dyn` for runtime assembly |
|
||||
| Hiding a concrete type behind a trait | **Factory Function** — returns `Box<dyn Trait>` or `impl Trait` |
|
||||
|
||||
---
|
||||
|
||||
## 4. Data Modeling Rules
|
||||
|
||||
- **Make illegal states unrepresentable.** Type system enforces invariants; runtime validation is a second line, not the first.
|
||||
- **Newtype every primitive** that carries domain meaning. `SessionId(u64)` ≠ `UserId(u64)` — the compiler enforces it.
|
||||
- **Enums over booleans** for any parameter or field with two or more named states.
|
||||
- **Typed error enums** with named variants carrying full diagnostic context. `anyhow` is application-layer only; never in library code.
|
||||
- **Domain types carry no I/O concerns.** No `serde`, no codec, no DB derives on domain structs. Conversions via `From`/`TryFrom` at layer boundaries.
|
||||
|
||||
---
|
||||
|
||||
## 5. Concurrency Rules
|
||||
|
||||
- Prefer message-passing over shared memory. Shared state is a fallback.
|
||||
- All channels must be **bounded**. Document the bound's rationale inline.
|
||||
- Never hold a lock across an `await` unless atomicity explicitly requires it — document why.
|
||||
- Document lock acquisition order wherever two locks are taken together.
|
||||
- Every `async fn` is cancellation-safe unless explicitly documented otherwise. Mutate shared state *after* the `await` that may be cancelled, not before.
|
||||
- High-read/low-write state: use `arc-swap` or `watch` for lock-free reads.
|
||||
|
||||
---
|
||||
|
||||
## 6. Error Handling Rules
|
||||
|
||||
- Errors translated at every layer boundary — low-level errors never surface unmodified.
|
||||
- Add context at the propagation site: what operation failed and where.
|
||||
- No `unwrap()`/`expect()` in production paths without a comment proving `None`/`Err` is impossible.
|
||||
- Panics are only permitted in: tests, startup/init unrecoverable failure, and `unreachable!()` with an invariant comment.
|
||||
|
||||
---
|
||||
|
||||
## 7. API Design Rules
|
||||
|
||||
- **CQS**: functions that return data must not mutate; functions that mutate return only `Result`.
|
||||
- **Least surprise**: a function does exactly what its name implies. Side effects are documented.
|
||||
- **Idempotency**: `close()`, `shutdown()`, `unregister()` called twice must not panic or error.
|
||||
- **Fallibility at the type level**: failure → `Result<T, E>`. No sentinel values.
|
||||
- **Minimal public surface**: default to `pub(crate)`. Mark `pub` only deliberate API. Re-export through a single surface in `mod.rs`.
|
||||
|
||||
---
|
||||
|
||||
## 8. Performance Rules (hot paths)
|
||||
|
||||
- Annotate hot-path functions with `// HOT PATH: <throughput requirement>`.
|
||||
- Zero allocations per operation in hot paths after initialization. Preallocate in constructors, reuse buffers.
|
||||
- Pass `&[u8]` / `Bytes` slices — not `Vec<u8>`. Use `BytesMut` for reusable mutable buffers.
|
||||
- No `String` formatting in hot paths. No logging without a rate-limit or sampling gate.
|
||||
- Any allocation in a hot path gets a comment: `// ALLOC: <reason and size>`.
|
||||
|
||||
---
|
||||
|
||||
## 9. Testing Rules
|
||||
|
||||
- Bug fixes require a regression test that is **red before the fix, green after**. Name it after the bug.
|
||||
- Property tests for: codec round-trips, state machine invariants, cryptographic protocol correctness.
|
||||
- No shared mutable state between tests. Each test constructs its own environment.
|
||||
- Test doubles hierarchy (simplest first): Fake → Stub → Spy → Mock. Mocks couple to implementation, not behavior — use sparingly.
|
||||
|
||||
---
|
||||
|
||||
## 10. Pre-Change Checklist
|
||||
|
||||
Run this before proposing or implementing any structural decision:
|
||||
|
||||
- [ ] Responsibility nameable in one noun phrase?
|
||||
- [ ] Layer dependencies point inward only?
|
||||
- [ ] Invalid states unrepresentable in the type system?
|
||||
- [ ] State transitions gated through a single interface?
|
||||
- [ ] All channels bounded?
|
||||
- [ ] No locks held across `await` (or documented)?
|
||||
- [ ] Errors typed and translated at layer boundaries?
|
||||
- [ ] No panics in production paths without invariant proof?
|
||||
- [ ] Hot paths annotated and allocation-free?
|
||||
- [ ] Public surface minimal — only deliberate API marked `pub`?
|
||||
- [ ] Correct pattern chosen from Section 3 table?
|
||||
|
|
@ -0,0 +1,45 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: telemt
|
||||
path: target/release/telemt
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
name: Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
concurrency:
|
||||
group: test-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# Formatting
|
||||
# ==========================
|
||||
fmt:
|
||||
name: Fmt
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
|
||||
- run: cargo fmt -- --check
|
||||
|
||||
# ==========================
|
||||
# Tests
|
||||
# ==========================
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-nextest-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-nextest-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-nextest
|
||||
run: cargo install --locked cargo-nextest || true
|
||||
|
||||
- name: Run tests with nextest
|
||||
run: cargo nextest run -j "$(nproc)"
|
||||
|
||||
# ==========================
|
||||
# Clippy
|
||||
# ==========================
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-clippy-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-clippy-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy -j "$(nproc)" -- --cap-lints warn
|
||||
|
||||
# ==========================
|
||||
# Udeps
|
||||
# ==========================
|
||||
udeps:
|
||||
name: Udeps
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rust-src
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-udeps-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-udeps-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install --locked cargo-udeps || true
|
||||
|
||||
- name: Run udeps
|
||||
run: cargo udeps -j "$(nproc)" || true
|
||||
|
|
@ -5,35 +5,87 @@ on:
|
|||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (example: 3.3.15)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref }}
|
||||
group: release-${{ github.ref_name }}-${{ github.event.inputs.tag || 'auto' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BINARY_NAME: telemt
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.target }}
|
||||
prepare:
|
||||
name: Prepare
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
version: ${{ steps.vars.outputs.version }}
|
||||
prerelease: ${{ steps.vars.outputs.prerelease }}
|
||||
|
||||
steps:
|
||||
- name: Resolve version
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${VERSION#refs/tags/}"
|
||||
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Release version is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${VERSION}" == *-* ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
|
||||
echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "prerelease=${PRERELEASE}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-linux-gnu
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-v3-linux-gnu
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
asset: telemt-aarch64-linux-gnu
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -47,8 +99,8 @@ jobs:
|
|||
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
clang \
|
||||
lld \
|
||||
|
|
@ -59,53 +111,73 @@ jobs:
|
|||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: gnu-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: gnu-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
gnu-${{ matrix.asset }}-
|
||||
gnu-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
export CXX=aarch64-linux-gnu-g++
|
||||
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
export CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
export CC_x86_64_unknown_linux_gnu=clang
|
||||
export CXX_x86_64_unknown_linux_gnu=clang++
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld"
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
set -euo pipefail
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
STRIP_BIN=aarch64-linux-gnu-strip
|
||||
else
|
||||
STRIP_BIN=strip
|
||||
fi
|
||||
|
||||
"${STRIP_BIN}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
build-musl:
|
||||
name: MUSL ${{ matrix.target }}
|
||||
name: MUSL ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
|
@ -116,8 +188,15 @@ jobs:
|
|||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-linux-musl
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-v3-linux-musl
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-musl
|
||||
asset: telemt-aarch64-linux-musl
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -138,30 +217,29 @@ jobs:
|
|||
|
||||
- name: Install aarch64 musl toolchain
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
set -euo pipefail
|
||||
|
||||
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
|
||||
ARCHIVE="aarch64-linux-musl-cross.tgz"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/$ARCHIVE"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/${ARCHIVE}"
|
||||
|
||||
if [ -x "$TOOLCHAIN_DIR/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "✅ MUSL toolchain already installed"
|
||||
if [ -x "${TOOLCHAIN_DIR}/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "MUSL toolchain cached"
|
||||
else
|
||||
echo "⬇️ Downloading musl toolchain from Telemt GitHub Releases..."
|
||||
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "$ARCHIVE" "$URL"
|
||||
-o "${ARCHIVE}" "${URL}"
|
||||
|
||||
mkdir -p "$TOOLCHAIN_DIR"
|
||||
tar -xzf "$ARCHIVE" --strip-components=1 -C "$TOOLCHAIN_DIR"
|
||||
mkdir -p "${TOOLCHAIN_DIR}"
|
||||
tar -xzf "${ARCHIVE}" --strip-components=1 -C "${TOOLCHAIN_DIR}"
|
||||
fi
|
||||
|
||||
echo "$TOOLCHAIN_DIR/bin" >> $GITHUB_PATH
|
||||
echo "${TOOLCHAIN_DIR}/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Add rust target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
|
@ -172,96 +250,70 @@ jobs:
|
|||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: musl-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: musl-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
musl-${{ matrix.asset }}-
|
||||
musl-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
export CC=aarch64-linux-musl-gcc
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc"
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=musl-gcc
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static"
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
set -euo pipefail
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
STRIP_BIN=aarch64-linux-musl-strip
|
||||
else
|
||||
STRIP_BIN=strip
|
||||
fi
|
||||
|
||||
"${STRIP_BIN}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Extract binaries
|
||||
run: |
|
||||
mkdir dist
|
||||
find artifacts -name "*.tar.gz" -exec tar -xzf {} -C dist \;
|
||||
|
||||
cp dist/telemt-x86_64-unknown-linux-musl dist/telemt || true
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
build-args: |
|
||||
BINARY=dist/telemt
|
||||
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
needs: [prepare, build-gnu, build-musl]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
|
@ -272,14 +324,98 @@ jobs:
|
|||
path: artifacts
|
||||
|
||||
- name: Flatten artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir dist
|
||||
set -euo pipefail
|
||||
mkdir -p dist
|
||||
find artifacts -type f -exec cp {} dist/ \;
|
||||
|
||||
- name: Create Release
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ github.sha }}
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }}
|
||||
overwrite_files: true
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, release]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Probe release assets
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
for asset in \
|
||||
telemt-x86_64-linux-musl.tar.gz \
|
||||
telemt-x86_64-linux-musl.tar.gz.sha256 \
|
||||
telemt-aarch64-linux-musl.tar.gz \
|
||||
telemt-aarch64-linux-musl.tar.gz.sha256
|
||||
do
|
||||
curl -fsIL \
|
||||
--retry 10 \
|
||||
--retry-delay 3 \
|
||||
"https://github.com/${GITHUB_REPOSITORY}/releases/download/${VERSION}/${asset}" \
|
||||
> /dev/null
|
||||
done
|
||||
|
||||
- name: Compute image tags
|
||||
id: meta
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="$(echo "ghcr.io/${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]')"
|
||||
TAGS="${IMAGE}:${VERSION}"
|
||||
|
||||
if [[ "${VERSION}" != *-* ]]; then
|
||||
TAGS="${TAGS}"$'\n'"${IMAGE}:latest"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "tags<<EOF"
|
||||
printf '%s\n' "${TAGS}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
pull: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
TELEMT_REPOSITORY=${{ github.repository }}
|
||||
TELEMT_VERSION=${{ needs.prepare.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
|
|
|||
|
|
@ -1,66 +0,0 @@
|
|||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
- name: Stress quota-lock suites (PR only)
|
||||
if: github.event_name == 'pull_request'
|
||||
env:
|
||||
RUST_TEST_THREADS: 16
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for i in $(seq 1 12); do
|
||||
echo "[quota-lock-stress] iteration ${i}/12"
|
||||
cargo test quota_lock_ --bin telemt -- --nocapture --test-threads 16
|
||||
cargo test relay_quota_wake --bin telemt -- --nocapture --test-threads 16
|
||||
done
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# Architect Mode Rules for Telemt
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Entry
|
||||
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||
end
|
||||
|
||||
subgraph Proxy Layer
|
||||
Listener --> ClientHandler[ClientHandler]
|
||||
ClientHandler --> Handshake[Handshake Validator]
|
||||
Handshake --> |Valid| Relay[Relay Layer]
|
||||
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||
end
|
||||
|
||||
subgraph Transport
|
||||
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||
Relay --> DirectRelay[Direct DC Relay]
|
||||
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||
DirectRelay --> TelegramDC
|
||||
end
|
||||
```
|
||||
|
||||
## Module Dependencies
|
||||
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||
|
||||
## Key Architectural Constraints
|
||||
|
||||
### Middle-End Proxy Mode
|
||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||
- Falls back to direct mode automatically on STUN mismatch
|
||||
|
||||
### TLS Fronting
|
||||
- Invalid handshakes are transparently proxied to `mask_host`
|
||||
- This is critical for DPI evasion - do not change this behavior
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||
|
||||
### Stream Architecture
|
||||
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||
|
||||
### Configuration Migration
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||
- New fields must have sensible defaults
|
||||
- DC203 override is auto-injected for CDN/media support
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Code Mode Rules for Telemt
|
||||
|
||||
## Error Handling
|
||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||
|
||||
## Configuration Changes
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||
|
||||
## Crypto Code
|
||||
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||
|
||||
## Stream Handling
|
||||
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||
|
||||
## Testing
|
||||
- Tests are inline in modules using `#[cfg(test)]`
|
||||
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# Debug Mode Rules for Telemt
|
||||
|
||||
## Logging
|
||||
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||
|
||||
## Middle-End Proxy Debugging
|
||||
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||
|
||||
## Connection Issues
|
||||
- DC connectivity is logged at startup with RTT measurements
|
||||
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||
|
||||
## TLS Fronting Issues
|
||||
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||
|
||||
## Common Errors
|
||||
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
# Code of Conduct
|
||||
|
||||
## 1. Purpose
|
||||
## Purpose
|
||||
|
||||
Telemt exists to solve technical problems.
|
||||
**Telemt exists to solve technical problems.**
|
||||
|
||||
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
|
||||
|
||||
|
|
@ -18,27 +18,34 @@ Technology has consequences. Responsibility is inherent.
|
|||
|
||||
---
|
||||
|
||||
## 2. Principles
|
||||
## Principles
|
||||
|
||||
* **Technical over emotional**
|
||||
|
||||
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
|
||||
|
||||
* **Clarity over noise**
|
||||
|
||||
Communication is structured, concise, and relevant.
|
||||
|
||||
* **Openness with standards**
|
||||
|
||||
Participation is open. The work remains disciplined.
|
||||
|
||||
* **Independence of judgment**
|
||||
|
||||
Claims are evaluated on technical merit, not affiliation or posture.
|
||||
|
||||
* **Responsibility over capability**
|
||||
|
||||
Capability does not justify careless use.
|
||||
|
||||
* **Cooperation over friction**
|
||||
|
||||
Progress depends on coordination, mutual support, and honest review.
|
||||
|
||||
* **Good intent, rigorous method**
|
||||
|
||||
Assume good intent, but require rigor.
|
||||
|
||||
> **Aussagen gelten nach ihrer Begründung.**
|
||||
|
|
@ -47,7 +54,7 @@ Technology has consequences. Responsibility is inherent.
|
|||
|
||||
---
|
||||
|
||||
## 3. Expected Behavior
|
||||
## Expected Behavior
|
||||
|
||||
Participants are expected to:
|
||||
|
||||
|
|
@ -69,7 +76,7 @@ New contributors are welcome. They are expected to grow into these standards. Ex
|
|||
|
||||
---
|
||||
|
||||
## 4. Unacceptable Behavior
|
||||
## Unacceptable Behavior
|
||||
|
||||
The following is not allowed:
|
||||
|
||||
|
|
@ -89,7 +96,7 @@ Such discussions may be closed, removed, or redirected.
|
|||
|
||||
---
|
||||
|
||||
## 5. Security and Misuse
|
||||
## Security and Misuse
|
||||
|
||||
Telemt is intended for responsible use.
|
||||
|
||||
|
|
@ -109,15 +116,13 @@ Security is both technical and behavioral.
|
|||
|
||||
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
|
||||
|
||||
Standards are public, legible, and applied to the work itself.
|
||||
|
||||
Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
|
||||
Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
- Standards are public, legible, and applied to the work itself.
|
||||
- Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
- Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
|
||||
---
|
||||
|
||||
## 7. Scope
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all official spaces:
|
||||
|
||||
|
|
@ -127,16 +132,19 @@ This Code of Conduct applies to all official spaces:
|
|||
|
||||
---
|
||||
|
||||
## 8. Maintainer Stewardship
|
||||
## Maintainer Stewardship
|
||||
|
||||
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
|
||||
|
||||
This responsibility is stewardship: preserving continuity, protecting signal, maintaining standards, and keeping Telemt workable for others.
|
||||
This responsibility is stewardship:
|
||||
- preserving continuity,
|
||||
- protecting signal,
|
||||
- maintaining standards,
|
||||
- keeping Telemt workable for others.
|
||||
|
||||
Judgment should be exercised with restraint, consistency, and institutional responsibility.
|
||||
|
||||
Not every decision requires extended debate.
|
||||
Not every intervention requires public explanation.
|
||||
- Not every decision requires extended debate.
|
||||
- Not every intervention requires public explanation.
|
||||
|
||||
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
|
||||
|
||||
|
|
@ -146,7 +154,7 @@ All decisions are expected to serve the durability, clarity, and integrity of Te
|
|||
|
||||
---
|
||||
|
||||
## 9. Enforcement
|
||||
## Enforcement
|
||||
|
||||
Maintainers may act to preserve the integrity of Telemt, including by:
|
||||
|
||||
|
|
@ -156,44 +164,40 @@ Maintainers may act to preserve the integrity of Telemt, including by:
|
|||
* Restricting or banning participants
|
||||
|
||||
Actions are taken to maintain function, continuity, and signal quality.
|
||||
|
||||
Where possible, correction is preferred to exclusion.
|
||||
|
||||
Where necessary, exclusion is preferred to decay.
|
||||
- Where possible, correction is preferred to exclusion.
|
||||
- Where necessary, exclusion is preferred to decay.
|
||||
|
||||
---
|
||||
|
||||
## 10. Final
|
||||
## Final
|
||||
|
||||
Telemt is built on discipline, structure, and shared intent.
|
||||
- Signal over noise.
|
||||
- Facts over opinion.
|
||||
- Systems over rhetoric.
|
||||
|
||||
Signal over noise.
|
||||
Facts over opinion.
|
||||
Systems over rhetoric.
|
||||
- Work is collective.
|
||||
- Outcomes are shared.
|
||||
- Responsibility is distributed.
|
||||
|
||||
Work is collective.
|
||||
Outcomes are shared.
|
||||
Responsibility is distributed.
|
||||
|
||||
Precision is learned.
|
||||
Rigor is expected.
|
||||
Help is part of the work.
|
||||
- Precision is learned.
|
||||
- Rigor is expected.
|
||||
- Help is part of the work.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Freiheit.**
|
||||
|
||||
If you contribute — contribute with care.
|
||||
If you speak — speak with substance.
|
||||
If you engage — engage constructively.
|
||||
- If you contribute — contribute with care.
|
||||
- If you speak — speak with substance.
|
||||
- If you engage — engage constructively.
|
||||
|
||||
---
|
||||
|
||||
## 11. After All
|
||||
## After All
|
||||
|
||||
Systems outlive intentions.
|
||||
|
||||
What is built will be used.
|
||||
What is released will propagate.
|
||||
What is maintained will define the future state.
|
||||
- What is built will be used.
|
||||
- What is released will propagate.
|
||||
- What is maintained will define the future state.
|
||||
|
||||
There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
||||
|
||||
|
|
@ -201,8 +205,8 @@ There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
|||
|
||||
> Every system carries responsibility.
|
||||
|
||||
Stability requires discipline.
|
||||
Freedom requires structure.
|
||||
Trust requires honesty.
|
||||
- Stability requires discipline.
|
||||
- Freedom requires structure.
|
||||
- Trust requires honesty.
|
||||
|
||||
In the end, the system reflects its contributors.
|
||||
In the end: the system reflects its contributors.
|
||||
|
|
|
|||
|
|
@ -1,19 +1,82 @@
|
|||
# Issues - Rules
|
||||
# Issues
|
||||
## Warnung
|
||||
Before opening Issue, if it is more question than problem or bug - ask about that [in our chat](https://t.me/telemtrs)
|
||||
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
# Pull Requests - Rules
|
||||
***Each of your Issues triggers attempts to reproduce problems and analyze them, which are done manually by people***
|
||||
|
||||
---
|
||||
|
||||
# Pull Requests
|
||||
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
- ONLY from your name
|
||||
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||
- DO NOT commit with `codex`, `claude`, or other AI tools as author/committer
|
||||
- PREFER `flow` branch for development, not `main`
|
||||
|
||||
## AI
|
||||
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||
---
|
||||
|
||||
- you really understand what you're doing
|
||||
- you understand the relationships and dependencies of the components being modified
|
||||
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||
## Definition of Ready (MANDATORY)
|
||||
|
||||
A Pull Request WILL be ignored or closed if:
|
||||
|
||||
- it does NOT build
|
||||
- it does NOT pass tests
|
||||
- it does NOT follow formatting rules
|
||||
- it contains unrelated or excessive changes
|
||||
- the author cannot clearly explain the change
|
||||
|
||||
---
|
||||
|
||||
## Blessed Principles
|
||||
- PR must build
|
||||
- PR must pass tests
|
||||
- PR must be understood by author
|
||||
|
||||
---
|
||||
|
||||
## AI Usage Policy
|
||||
|
||||
AI tools (Claude, ChatGPT, Codex, DeepSeek, etc.) are allowed as **assistants**, NOT as decision-makers.
|
||||
|
||||
By submitting a PR, you confirm that:
|
||||
|
||||
- you fully understand the code you submit
|
||||
- you verified correctness manually
|
||||
- you reviewed architecture and dependencies
|
||||
- you take full responsibility for the change
|
||||
|
||||
AI-generated code is treated as **draft** and must be validated like any other external contribution.
|
||||
|
||||
PRs that look like unverified AI dumps WILL be closed
|
||||
|
||||
---
|
||||
|
||||
## Maintainer Policy
|
||||
|
||||
Maintainers reserve the right to:
|
||||
|
||||
- close PRs that do not meet basic quality requirements
|
||||
- request explanations before review
|
||||
- ignore low-effort contributions
|
||||
|
||||
Respect the reviewers time
|
||||
|
||||
---
|
||||
|
||||
## Enforcement
|
||||
|
||||
Pull Requests that violate project standards may be closed without review.
|
||||
|
||||
This includes (but is not limited to):
|
||||
|
||||
- non-building code
|
||||
- failing tests
|
||||
- unverified or low-effort changes
|
||||
- inability to explain the change
|
||||
|
||||
These actions follow the Code of Conduct and are intended to preserve signal, quality, and Telemt's integrity
|
||||
|
|
@ -183,9 +183,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "aws-lc-sys"
|
||||
version = "0.39.0"
|
||||
version = "0.39.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
||||
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cmake",
|
||||
|
|
@ -234,16 +234,16 @@ checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
|
|||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.8.3"
|
||||
version = "1.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d"
|
||||
checksum = "4d2d5991425dfd0785aed03aedcf0b321d61975c9b5b3689c774a2610ae0b51e"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"constant_time_eq",
|
||||
"cpufeatures 0.2.17",
|
||||
"cpufeatures 0.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -299,9 +299,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.57"
|
||||
version = "1.2.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
||||
checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
|
|
@ -441,9 +441,9 @@ checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
|
|||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.57"
|
||||
version = "0.1.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
||||
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
|
@ -1191,9 +1191,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
|||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.8.1"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
|
||||
checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
|
|
@ -1206,7 +1206,6 @@ dependencies = [
|
|||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"want",
|
||||
|
|
@ -1245,7 +1244,7 @@ dependencies = [
|
|||
"libc",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
|
|
@ -1277,12 +1276,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_collections"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
|
||||
checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"potential_utf",
|
||||
"utf8_iter",
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
"zerovec",
|
||||
|
|
@ -1290,9 +1290,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_locale_core"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
|
||||
checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"litemap",
|
||||
|
|
@ -1303,9 +1303,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_normalizer"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
|
||||
checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_normalizer_data",
|
||||
|
|
@ -1317,15 +1317,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_normalizer_data"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
|
||||
checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38"
|
||||
|
||||
[[package]]
|
||||
name = "icu_properties"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
|
||||
checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_locale_core",
|
||||
|
|
@ -1337,15 +1337,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_properties_data"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
|
||||
checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14"
|
||||
|
||||
[[package]]
|
||||
name = "icu_provider"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
|
||||
checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"icu_locale_core",
|
||||
|
|
@ -1427,14 +1427,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "ipconfig"
|
||||
version = "0.3.2"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f"
|
||||
checksum = "4d40460c0ce33d6ce4b0630ad68ff63d6661961c48b6dba35e5a4d81cfb48222"
|
||||
dependencies = [
|
||||
"socket2 0.5.10",
|
||||
"socket2",
|
||||
"widestring",
|
||||
"windows-sys 0.48.0",
|
||||
"winreg",
|
||||
"windows-registry",
|
||||
"windows-result",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -1454,9 +1455,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.10"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
|
|
@ -1486,7 +1487,7 @@ dependencies = [
|
|||
"cesu8",
|
||||
"cfg-if",
|
||||
"combine",
|
||||
"jni-sys",
|
||||
"jni-sys 0.3.1",
|
||||
"log",
|
||||
"thiserror 1.0.69",
|
||||
"walkdir",
|
||||
|
|
@ -1495,9 +1496,31 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "jni-sys"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
|
||||
checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258"
|
||||
dependencies = [
|
||||
"jni-sys 0.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jni-sys"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2"
|
||||
dependencies = [
|
||||
"jni-sys-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jni-sys-macros"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
|
|
@ -1511,10 +1534,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
||||
checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
|
@ -1553,9 +1578,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.183"
|
||||
version = "0.2.184"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
|
||||
checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
|
|
@ -1565,9 +1590,9 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
|
|||
|
||||
[[package]]
|
||||
name = "litemap"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
|
||||
checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
|
|
@ -1647,9 +1672,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
|||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.1.1"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
|
|
@ -1659,9 +1684,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.14"
|
||||
version = "0.12.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85f8024e1c8e71c778968af91d43700ce1d11b219d127d79fb2934153b82b42b"
|
||||
checksum = "957228ad12042ee839f93c8f257b62b4c0ab5eaae1d4fa60de53b27c9d7c5046"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-epoch",
|
||||
|
|
@ -1745,9 +1770,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
|
|
@ -1869,12 +1894,6 @@ version = "0.2.17"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.10.2"
|
||||
|
|
@ -1944,9 +1963,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
|
|||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
|
||||
checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564"
|
||||
dependencies = [
|
||||
"zerovec",
|
||||
]
|
||||
|
|
@ -1987,9 +2006,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "proptest"
|
||||
version = "1.10.0"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532"
|
||||
checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"bit-vec",
|
||||
|
|
@ -2023,7 +2042,7 @@ dependencies = [
|
|||
"quinn-udp",
|
||||
"rustc-hash",
|
||||
"rustls",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"tracing",
|
||||
|
|
@ -2061,7 +2080,7 @@ dependencies = [
|
|||
"cfg_aliases",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tracing",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
|
@ -2279,9 +2298,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.1.1"
|
||||
version = "2.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
||||
checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
|
|
@ -2533,9 +2552,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "1.0.4"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
|
||||
checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
|
@ -2603,7 +2622,7 @@ dependencies = [
|
|||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"shadowsocks-crypto",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"spin",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
|
|
@ -2675,16 +2694,6 @@ version = "1.15.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.6.3"
|
||||
|
|
@ -2771,7 +2780,7 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
|||
|
||||
[[package]]
|
||||
name = "telemt"
|
||||
version = "3.3.29"
|
||||
version = "3.3.39"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"anyhow",
|
||||
|
|
@ -2812,7 +2821,7 @@ dependencies = [
|
|||
"sha1",
|
||||
"sha2",
|
||||
"shadowsocks",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"static_assertions",
|
||||
"subtle",
|
||||
"thiserror 2.0.18",
|
||||
|
|
@ -2822,6 +2831,7 @@ dependencies = [
|
|||
"tokio-util",
|
||||
"toml",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
"webpki-roots",
|
||||
|
|
@ -2925,9 +2935,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tinystr"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
|
||||
checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"zerovec",
|
||||
|
|
@ -2970,7 +2980,7 @@ dependencies = [
|
|||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"tracing",
|
||||
"windows-sys 0.61.2",
|
||||
|
|
@ -3031,7 +3041,7 @@ dependencies = [
|
|||
"log",
|
||||
"once_cell",
|
||||
"pin-project",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
|
@ -3055,9 +3065,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "1.0.7+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96"
|
||||
checksum = "81f3d15e84cbcd896376e6730314d59fb5a87f31e4b038454184435cd57defee"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde_core",
|
||||
|
|
@ -3070,27 +3080,27 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "1.0.1+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
|
||||
checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.0.10+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
|
||||
checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.0.7+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
|
||||
checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db"
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
|
|
@ -3148,6 +3158,18 @@ dependencies = [
|
|||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"thiserror 2.0.18",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.31"
|
||||
|
|
@ -3275,9 +3297,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
|||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.22.0"
|
||||
version = "1.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
||||
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||
dependencies = [
|
||||
"getrandom 0.4.2",
|
||||
"js-sys",
|
||||
|
|
@ -3350,9 +3372,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
||||
checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
|
|
@ -3363,23 +3385,19 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.64"
|
||||
version = "0.4.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
||||
checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
||||
checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
|
|
@ -3387,9 +3405,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
||||
checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"proc-macro2",
|
||||
|
|
@ -3400,9 +3418,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
||||
checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
|
@ -3443,9 +3461,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
||||
checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
|
|
@ -3557,6 +3575,17 @@ version = "0.2.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
|
||||
|
||||
[[package]]
|
||||
name = "windows-registry"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.4.1"
|
||||
|
|
@ -3584,15 +3613,6 @@ dependencies = [
|
|||
"windows-targets 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||
dependencies = [
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
|
|
@ -3635,21 +3655,6 @@ dependencies = [
|
|||
"windows_x86_64_msvc 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.48.5",
|
||||
"windows_aarch64_msvc 0.48.5",
|
||||
"windows_i686_gnu 0.48.5",
|
||||
"windows_i686_msvc 0.48.5",
|
||||
"windows_x86_64_gnu 0.48.5",
|
||||
"windows_x86_64_gnullvm 0.48.5",
|
||||
"windows_x86_64_msvc 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3689,12 +3694,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3713,12 +3712,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3737,12 +3730,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3773,12 +3760,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3797,12 +3778,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3821,12 +3796,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3845,12 +3814,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3865,19 +3828,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
|
|||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen"
|
||||
|
|
@ -4004,9 +3957,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "yoke"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
|
||||
checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca"
|
||||
dependencies = [
|
||||
"stable_deref_trait",
|
||||
"yoke-derive",
|
||||
|
|
@ -4015,9 +3968,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "yoke-derive"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
|
||||
checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4027,18 +3980,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87"
|
||||
checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
|
||||
checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4047,18 +4000,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerofrom"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
|
||||
checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df"
|
||||
dependencies = [
|
||||
"zerofrom-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerofrom-derive"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
|
||||
checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4088,9 +4041,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerotrie"
|
||||
version = "0.2.3"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
|
||||
checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"yoke",
|
||||
|
|
@ -4099,9 +4052,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerovec"
|
||||
version = "0.11.5"
|
||||
version = "0.11.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
|
||||
checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239"
|
||||
dependencies = [
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
|
|
@ -4110,9 +4063,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerovec-derive"
|
||||
version = "0.11.2"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
|
||||
checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
|
|||
28
Cargo.toml
28
Cargo.toml
|
|
@ -1,8 +1,11 @@
|
|||
[package]
|
||||
name = "telemt"
|
||||
version = "3.3.29"
|
||||
version = "3.3.39"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
redteam_offline_expected_fail = []
|
||||
|
||||
[dependencies]
|
||||
# C
|
||||
libc = "0.2"
|
||||
|
|
@ -27,7 +30,13 @@ static_assertions = "1.1"
|
|||
|
||||
# Network
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
|
||||
nix = { version = "0.31", default-features = false, features = [
|
||||
"net",
|
||||
"user",
|
||||
"process",
|
||||
"fs",
|
||||
"signal",
|
||||
] }
|
||||
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
|
||||
|
||||
# Serialization
|
||||
|
|
@ -41,6 +50,7 @@ bytes = "1.9"
|
|||
thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tracing-appender = "0.2"
|
||||
parking_lot = "0.12"
|
||||
dashmap = "6.1"
|
||||
arc-swap = "1.7"
|
||||
|
|
@ -65,8 +75,14 @@ hyper = { version = "1", features = ["server", "http1"] }
|
|||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"tls12",
|
||||
] }
|
||||
rustls = { version = "0.23", default-features = false, features = [
|
||||
"std",
|
||||
"tls12",
|
||||
"ring",
|
||||
] }
|
||||
webpki-roots = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
@ -80,4 +96,6 @@ name = "crypto_bench"
|
|||
harness = false
|
||||
|
||||
[profile.release]
|
||||
lto = "thin"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
|
|
|
|||
115
Dockerfile
115
Dockerfile
|
|
@ -1,97 +1,98 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Depcache
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
# Build
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
ARG TELEMT_REPOSITORY=telemt/telemt
|
||||
ARG TELEMT_VERSION=latest
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Compress (strip + UPX)
|
||||
# Minimal Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS minimal
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
upx \
|
||||
ARG TARGETARCH
|
||||
ARG TELEMT_REPOSITORY
|
||||
ARG TELEMT_VERSION
|
||||
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
binutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ca-certificates \
|
||||
curl \
|
||||
tar; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /telemt
|
||||
|
||||
RUN strip /telemt || true
|
||||
RUN upx --best --lzma /telemt || true
|
||||
RUN set -eux; \
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ASSET="telemt-x86_64-linux-musl.tar.gz" ;; \
|
||||
arm64) ASSET="telemt-aarch64-linux-musl.tar.gz" ;; \
|
||||
*) echo "Unsupported TARGETARCH: ${TARGETARCH}" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
VERSION="${TELEMT_VERSION#refs/tags/}"; \
|
||||
if [ -z "${VERSION}" ] || [ "${VERSION}" = "latest" ]; then \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/latest/download"; \
|
||||
else \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/download/${VERSION}"; \
|
||||
fi; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}" \
|
||||
"${BASE_URL}/${ASSET}"; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}.sha256" \
|
||||
"${BASE_URL}/${ASSET}.sha256"; \
|
||||
cd /tmp; \
|
||||
sha256sum -c "${ASSET}.sha256"; \
|
||||
tar -xzf "${ASSET}" -C /tmp; \
|
||||
test -f /tmp/telemt; \
|
||||
install -m 0755 /tmp/telemt /telemt; \
|
||||
strip --strip-unneeded /telemt || true; \
|
||||
rm -f "/tmp/${ASSET}" "/tmp/${ASSET}.sha256" /tmp/telemt
|
||||
|
||||
# ==========================
|
||||
# Stage 3: Debug base
|
||||
# Debug Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS debug-base
|
||||
FROM debian:12-slim AS debug
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# ==========================
|
||||
# Stage 4: Debug image
|
||||
# ==========================
|
||||
FROM debug-base AS debug
|
||||
busybox; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
USER root
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
# ==========================
|
||||
# Stage 5: Production (distroless)
|
||||
# Production Distroless on MUSL
|
||||
# ==========================
|
||||
FROM gcr.io/distroless/base-debian12 AS prod
|
||||
FROM gcr.io/distroless/static-debian12 AS prod
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
# TLS + timezone + shell
|
||||
COPY --from=debug-base /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=debug-base /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
COPY --from=debug-base /bin/busybox /bin/busybox
|
||||
|
||||
RUN ["/bin/busybox", "--install", "-s", "/bin"]
|
||||
|
||||
# distroless user
|
||||
USER nonroot:nonroot
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
16
LICENSE
16
LICENSE
|
|
@ -1,4 +1,4 @@
|
|||
###### TELEMT Public License 3 ######
|
||||
######## TELEMT LICENSE 3.3 #########
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
|
|
@ -14,11 +14,15 @@ are preserved and complied with.
|
|||
The canonical version of this License is the English version.
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
discrepancy, the English version of this License shall prevail
|
||||
|
||||
/----------------------------------------------------------\
|
||||
| Language | Location |
|
||||
|-------------|--------------------------------------------|
|
||||
| English | docs/LICENSE/TELEMT-LICENSE.en.md |
|
||||
| German | docs/LICENSE/TELEMT-LICENSE.de.md |
|
||||
| Russian | docs/LICENSE/TELEMT-LICENSE.ru.md |
|
||||
\----------------------------------------------------------/
|
||||
|
||||
### License Versioning Policy
|
||||
|
||||
|
|
|
|||
238
README.md
238
README.md
|
|
@ -2,234 +2,60 @@
|
|||
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
> Fixed TLS ClientHello is now available in **Telegram Desktop** starting from version **6.7.2**: to work with EE-MTProxy, please update your client.
|
||||
>
|
||||
> Fixed TLS ClientHello is now available in **Telegram Android Client** starting from version **12.6.4**; **official release for iOS is "work in progress"**.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://t.me/telemtrs">
|
||||
<img src="docs/assets/telegram_button.png" alt="Join us in Telegram" width="200" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
- Anti-Replay on Sliding Window
|
||||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md);
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md);
|
||||
- Anti-Replay on Sliding Window;
|
||||
- Prometheus-format Metrics;
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes.
|
||||
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
|
||||
## NEWS and EMERGENCY
|
||||
### ✈️ Telemt 3 is released!
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### О релизах
|
||||
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||
|
||||
---
|
||||
|
||||
Если у вас есть компетенции в:
|
||||
|
||||
- Асинхронных сетевых приложениях
|
||||
- Анализе трафика
|
||||
- Реверс-инжиниринге
|
||||
- Сетевых расследованиях
|
||||
|
||||
Мы открыты к архитектурным предложениям, идеям и pull requests
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### About releases
|
||||
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||
|
||||
---
|
||||
|
||||
If you have expertise in:
|
||||
|
||||
- Asynchronous network applications
|
||||
- Traffic analysis
|
||||
- Reverse engineering
|
||||
- Network forensics
|
||||
|
||||
We welcome ideas, architectural feedback, and pull requests.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Features
|
||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||

|
||||
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
||||
- Full support for all official MTProto proxy modes:
|
||||
- Classic
|
||||
- Secure - with `dd` prefix
|
||||
- Fake TLS - with `ee` prefix + SNI fronting
|
||||
- Replay attack protection
|
||||
- Optional traffic masking: forward unrecognized connections to a real web server, e.g. GitHub 🤪
|
||||
- Configurable keepalives + timeouts + IPv6 and "Fast Mode"
|
||||
- Graceful shutdown on Ctrl+C
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method
|
||||
- Classic;
|
||||
- Secure - with `dd` prefix;
|
||||
- Fake TLS - with `ee` prefix + SNI fronting;
|
||||
- Replay attack protection;
|
||||
- Optional traffic masking: forward unrecognized connections to a real web server, e.g. GitHub 🤪;
|
||||
- Configurable keepalives + timeouts + IPv6 and "Fast Mode";
|
||||
- Graceful shutdown on Ctrl+C;
|
||||
- Extensive logging via `trace` and `debug` with `RUST_LOG` method.
|
||||
|
||||
# GOTO
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [FAQ](#faq)
|
||||
- [Recognizability for DPI and crawler](#recognizability-for-dpi-and-crawler)
|
||||
- [Client WITH secret-key accesses the MTProxy resource:](#client-with-secret-key-accesses-the-mtproxy-resource)
|
||||
- [Client WITHOUT secret-key gets transparent access to the specified resource:](#client-without-secret-key-gets-transparent-access-to-the-specified-resource)
|
||||
- [Telegram Calls via MTProxy](#telegram-calls-via-mtproxy)
|
||||
- [How does DPI see MTProxy TLS?](#how-does-dpi-see-mtproxy-tls)
|
||||
- [Whitelist on IP](#whitelist-on-ip)
|
||||
- [Too many open files](#too-many-open-files)
|
||||
- [Architecture](docs/Architecture)
|
||||
- [Quick Start Guide](#quick-start-guide)
|
||||
- [Config parameters](docs/Config_params)
|
||||
- [Build](#build)
|
||||
- [Why Rust?](#why-rust)
|
||||
- [Issues](#issues)
|
||||
- [Roadmap](#roadmap)
|
||||
|
||||
|
||||
## Quick Start Guide
|
||||
- [Quick Start Guide RU](docs/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/QUICK_START_GUIDE.en.md)
|
||||
- [Quick Start Guide RU](docs/Quick_start/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/Quick_start/QUICK_START_GUIDE.en.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ RU](docs/FAQ.ru.md)
|
||||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
### Recognizability for DPI and crawler
|
||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||
we transparently direct traffic to the target host!
|
||||
|
||||
- We consider this a breakthrough aspect, which has no stable analogues today
|
||||
- Based on this: if `telemt` configured correctly, **TLS mode is completely identical to real-life handshake + communication** with a specified host
|
||||
- Here is our evidence:
|
||||
- 212.220.88.77 - "dummy" host, running `telemt`
|
||||
- `petrovich.ru` - `tls` + `masking` host, in HEX: `706574726f766963682e7275`
|
||||
- **No MITM + No Fake Certificates/Crypto** = pure transparent *TCP Splice* to "best" upstream: MTProxy or tls/mask-host:
|
||||
- DPI see legitimate HTTPS to `tls_host`, including *valid chain-of-trust* and entropy
|
||||
- Crawlers completely satisfied receiving responses from `mask_host`
|
||||
#### Client WITH secret-key accesses the MTProxy resource:
|
||||
|
||||
<img width="360" height="439" alt="telemt" src="https://github.com/user-attachments/assets/39352afb-4a11-4ecc-9d91-9e8cfb20607d" />
|
||||
|
||||
#### Client WITHOUT secret-key gets transparent access to the specified resource:
|
||||
- with trusted certificate
|
||||
- with original handshake
|
||||
- with full request-response way
|
||||
- with low-latency overhead
|
||||
```bash
|
||||
root@debian:~/telemt# curl -v -I --resolve petrovich.ru:443:212.220.88.77 https://petrovich.ru/
|
||||
* Added petrovich.ru:443:212.220.88.77 to DNS cache
|
||||
* Hostname petrovich.ru was found in DNS cache
|
||||
* Trying 212.220.88.77:443...
|
||||
* Connected to petrovich.ru (212.220.88.77) port 443 (#0)
|
||||
* ALPN: offers h2,http/1.1
|
||||
* TLSv1.3 (OUT), TLS handshake, Client hello (1):
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
* CApath: /etc/ssl/certs
|
||||
* TLSv1.3 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
|
||||
* TLSv1.3 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.3 (IN), TLS handshake, CERT verify (15):
|
||||
* TLSv1.3 (IN), TLS handshake, Finished (20):
|
||||
* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
|
||||
* TLSv1.3 (OUT), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
|
||||
* ALPN: server did not agree on a protocol. Uses default.
|
||||
* Server certificate:
|
||||
* subject: C=RU; ST=Saint Petersburg; L=Saint Petersburg; O=STD Petrovich; CN=*.petrovich.ru
|
||||
* start date: Jan 28 11:21:01 2025 GMT
|
||||
* expire date: Mar 1 11:21:00 2026 GMT
|
||||
* subjectAltName: host "petrovich.ru" matched cert's "petrovich.ru"
|
||||
* issuer: C=BE; O=GlobalSign nv-sa; CN=GlobalSign RSA OV SSL CA 2018
|
||||
* SSL certificate verify ok.
|
||||
* using HTTP/1.x
|
||||
> HEAD / HTTP/1.1
|
||||
> Host: petrovich.ru
|
||||
> User-Agent: curl/7.88.1
|
||||
> Accept: */*
|
||||
>
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* old SSL session ID is stale, removing
|
||||
< HTTP/1.1 200 OK
|
||||
HTTP/1.1 200 OK
|
||||
< Server: Variti/0.9.3a
|
||||
Server: Variti/0.9.3a
|
||||
< Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Origin: *
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Cache-Control: no-store
|
||||
Cache-Control: no-store
|
||||
< Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Pragma: no-cache
|
||||
Pragma: no-cache
|
||||
< Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Content-Length: 31253
|
||||
Content-Length: 31253
|
||||
< Connection: keep-alive
|
||||
Connection: keep-alive
|
||||
< Keep-Alive: timeout=60
|
||||
Keep-Alive: timeout=60
|
||||
|
||||
<
|
||||
* Connection #0 to host petrovich.ru left intact
|
||||
|
||||
```
|
||||
- We challenged ourselves, we kept trying and we didn't only *beat the air*: now, we have something to show you
|
||||
- Do not just take our word for it? - This is great and we respect that: you can build your own `telemt` or download a build and check it right now
|
||||
### Telegram Calls via MTProxy
|
||||
- Telegram architecture **does NOT allow calls via MTProxy**, but only via SOCKS5, which cannot be obfuscated
|
||||
### How does DPI see MTProxy TLS?
|
||||
- DPI sees MTProxy in Fake TLS (ee) mode as TLS 1.3
|
||||
- the SNI you specify sends both the client and the server;
|
||||
- ALPN is similar to HTTP 1.1/2;
|
||||
- high entropy, which is normal for AES-encrypted traffic;
|
||||
### Whitelist on IP
|
||||
- MTProxy cannot work when there is:
|
||||
- no IP connectivity to the target host: Russian Whitelist on Mobile Networks - "Белый список"
|
||||
- OR all TCP traffic is blocked
|
||||
- OR high entropy/encrypted traffic is blocked: content filters at universities and critical infrastructure
|
||||
- OR all TLS traffic is blocked
|
||||
- OR specified port is blocked: use 443 to make it "like real"
|
||||
- OR provided SNI is blocked: use "officially approved"/innocuous name
|
||||
- like most protocols on the Internet;
|
||||
- these situations are observed:
|
||||
- in China behind the Great Firewall
|
||||
- in Russia on mobile networks, less in wired networks
|
||||
- in Iran during "activity"
|
||||
### Too many open files
|
||||
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
|
||||
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
|
||||
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
|
||||
```yaml
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
```
|
||||
- **System-wide** (optional): add to `/etc/security/limits.conf`:
|
||||
```
|
||||
* soft nofile 1048576
|
||||
* hard nofile 1048576
|
||||
root soft nofile 1048576
|
||||
root hard nofile 1048576
|
||||
```
|
||||
|
||||
|
||||
## Build
|
||||
```bash
|
||||
# Cloning repo
|
||||
|
|
@ -252,7 +78,7 @@ telemt config.toml
|
|||
```
|
||||
|
||||
### OpenBSD
|
||||
- Build and service setup guide: [OpenBSD Guide (EN)](docs/OPENBSD.en.md)
|
||||
- Build and service setup guide: [OpenBSD Guide (EN)](docs/Quick_start/OPENBSD_QUICK_START_GUIDE.en.md)
|
||||
- Example rc.d script: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd)
|
||||
- Status: OpenBSD sandbox hardening with `pledge(2)` and `unveil(2)` is not implemented yet.
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,123 @@
|
|||
# Telemt — MTProxy на Rust + Tokio
|
||||
|
||||
***Решает проблемы раньше, чем другие узнают об их существовании***
|
||||
|
||||
> [!Примечание]
|
||||
>
|
||||
> Исправленный TLS ClientHello доступен в **Telegram Desktop** начиная с версии **6.7.2**: для работы с EE-MTProxy обновите клиент.
|
||||
>
|
||||
> Исправленный TLS ClientHello доступен в **Telegram Android** начиная с версии **12.6.4**; **официальный релиз для iOS находится в процессе разработки**.
|
||||
|
||||
<p align="center">
|
||||
<a href="https://t.me/telemtrs">
|
||||
<img src="docs/assets/telegram_button.png" alt="Мы в Telegram" width="200" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
**Telemt** — это быстрый, безопасный и функциональный сервер, написанный на Rust. Он полностью реализует официальный алгоритм прокси Telegram и добавляет множество улучшений для продакшена:
|
||||
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + жизненный цикл генераций](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md);
|
||||
- [Полноценный API с управлением](https://github.com/telemt/telemt/blob/main/docs/API.md);
|
||||
- Защита от повторных атак (Anti-Replay on Sliding Window);
|
||||
- Метрики в формате Prometheus;
|
||||
- TLS-fronting и TCP-splicing для маскировки от DPI.
|
||||
|
||||

|
||||
|
||||
## Особенности
|
||||
|
||||
⚓ Реализация **TLS-fronting** максимально приближена к поведению реального HTTPS-трафика.
|
||||
|
||||
⚓ ***Middle-End Pool*** оптимизирован для высокой производительности.
|
||||
|
||||
- Поддержка всех режимов MTProto proxy:
|
||||
- Classic;
|
||||
- Secure (префикс `dd`);
|
||||
- Fake TLS (префикс `ee` + SNI fronting);
|
||||
- Защита от replay-атак;
|
||||
- Маскировка трафика (перенаправление неизвестных подключений на реальные сайты);
|
||||
- Настраиваемые keepalive, таймауты, IPv6 и «быстрый режим»;
|
||||
- Корректное завершение работы (Ctrl+C);
|
||||
- Подробное логирование через `trace` и `debug`.
|
||||
|
||||
# Навигация
|
||||
- [FAQ](#faq)
|
||||
- [Архитектура](docs/Architecture)
|
||||
- [Быстрый старт](#quick-start-guide)
|
||||
- [Параметры конфигурационного файла](docs/Config_params)
|
||||
- [Сборка](#build)
|
||||
- [Почему Rust?](#why-rust)
|
||||
- [Известные проблемы](#issues)
|
||||
- [Планы](#roadmap)
|
||||
|
||||
## Быстрый старт
|
||||
- [Quick Start Guide RU](docs/Quick_start/QUICK_START_GUIDE.ru.md)
|
||||
- [Quick Start Guide EN](docs/Quick_start/QUICK_START_GUIDE.en.md)
|
||||
|
||||
## FAQ
|
||||
|
||||
- [FAQ RU](docs/FAQ.ru.md)
|
||||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
## Сборка
|
||||
|
||||
```bash
|
||||
# Клонируйте репозиторий
|
||||
git clone https://github.com/telemt/telemt
|
||||
# Смените каталог на telemt
|
||||
cd telemt
|
||||
# Начните процесс сборки
|
||||
cargo build --release
|
||||
|
||||
# Устройства с небольшим объёмом оперативной памяти (1 ГБ, например NanoPi Neo3 / Raspberry Pi Zero 2):
|
||||
# используется параметр lto = «thin» для уменьшения пикового потребления памяти.
|
||||
# Если ваш пользовательский набор инструментов переопределяет профили, не используйте Fat LTO.
|
||||
|
||||
# Перейдите в каталог /bin
|
||||
mv ./target/release/telemt /bin
|
||||
# Сделайте файл исполняемым
|
||||
chmod +x /bin/telemt
|
||||
# Запустите!
|
||||
telemt config.toml
|
||||
```
|
||||
|
||||
### Устройства с малым объемом RAM
|
||||
Для устройств с ~1 ГБ RAM (например Raspberry Pi):
|
||||
- используется облегчённая оптимизация линковщика (thin LTO);
|
||||
- не рекомендуется включать fat LTO.
|
||||
|
||||
## OpenBSD
|
||||
|
||||
- Руководство по сборке и настройке на английском языке [OpenBSD Guide (EN)](docs/Quick_start/OPENBSD_QUICK_START_GUIDE.en.md);
|
||||
- Пример rc.d скрипта: [contrib/openbsd/telemt.rcd](contrib/openbsd/telemt.rcd);
|
||||
- Поддержка sandbox с `pledge(2)` и `unveil(2)` пока не реализована.
|
||||
|
||||
## Почему Rust?
|
||||
|
||||
- Надёжность для долгоживущих процессов;
|
||||
- Детерминированное управление ресурсами (RAII);
|
||||
- Отсутствие сборщика мусора;
|
||||
- Безопасность памяти;
|
||||
- Асинхронная архитектура Tokio.
|
||||
|
||||
## Известные проблемы
|
||||
|
||||
- ✅ [Поддержка SOCKS5 как upstream](https://github.com/telemt/telemt/issues/1) -> добавлен Upstream Management;
|
||||
- ✅ [Проблема зависания загрузки медиа на iOS](https://github.com/telemt/telemt/issues/2).
|
||||
|
||||
## Планы
|
||||
|
||||
- Публичный IP в ссылках;
|
||||
- Перезагрузка конфигурации на лету;
|
||||
- Привязка к устройству или IP для входящих и исходящих соединений;
|
||||
- Поддержка рекламных тегов по SNI / секретному ключу;
|
||||
- Улучшенная обработка ошибок;
|
||||
- Zero-copy оптимизации;
|
||||
- Проверка состояния дата-центров;
|
||||
- Отсутствие глобального изменяемого состояния;
|
||||
- Изоляция клиентов и справедливое распределение трафика;
|
||||
- «Политика секретов» — маршрутизация по SNI / секрету;
|
||||
- Балансировщик с несколькими источниками и отработка отказов;
|
||||
- Строгие FSM для handshake;
|
||||
- Улучшенная защита от replay-атак;
|
||||
- Веб-интерфейс: статистика, состояние работоспособности, задержка, пользовательский опыт...
|
||||
|
Before Width: | Height: | Size: 650 KiB After Width: | Height: | Size: 650 KiB |
|
Before Width: | Height: | Size: 838 KiB After Width: | Height: | Size: 838 KiB |
|
|
@ -1,417 +0,0 @@
|
|||
# Telemt Config Parameters Reference
|
||||
|
||||
This document lists all configuration keys accepted by `config.toml`.
|
||||
|
||||
> [!WARNING]
|
||||
>
|
||||
> The configuration parameters detailed in this document are intended for advanced users and fine-tuning purposes. Modifying these settings without a clear understanding of their function may lead to application instability or other unexpected behavior. Please proceed with caution and at your own risk.
|
||||
|
||||
## Top-level keys
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| include | `String` (special directive) | `null` | — | Includes another TOML file with `include = "relative/or/absolute/path.toml"`; includes are processed recursively before parsing. |
|
||||
| show_link | `"*" \| String[]` | `[]` (`ShowLink::None`) | — | Legacy top-level link visibility selector (`"*"` for all users or explicit usernames list). |
|
||||
| dc_overrides | `Map<String, String[]>` | `{}` | — | Overrides DC endpoints for non-standard DCs; key is DC id string, value is `ip:port` list. |
|
||||
| default_dc | `u8 \| null` | `null` (effective fallback: `2` in ME routing) | — | Default DC index used for unmapped non-standard DCs. |
|
||||
|
||||
## [general]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| data_path | `String \| null` | `null` | — | Optional runtime data directory path. |
|
||||
| prefer_ipv6 | `bool` | `false` | — | Prefer IPv6 where applicable in runtime logic. |
|
||||
| fast_mode | `bool` | `true` | — | Enables fast-path optimizations for traffic processing. |
|
||||
| use_middle_proxy | `bool` | `true` | none | Enables ME transport mode; if `false`, runtime falls back to direct DC routing. |
|
||||
| proxy_secret_path | `String \| null` | `"proxy-secret"` | Path may be `null`. | Path to Telegram infrastructure proxy-secret file used by ME handshake logic. |
|
||||
| proxy_config_v4_cache_path | `String \| null` | `"cache/proxy-config-v4.txt"` | — | Optional cache path for raw `getProxyConfig` (IPv4) snapshot. |
|
||||
| proxy_config_v6_cache_path | `String \| null` | `"cache/proxy-config-v6.txt"` | — | Optional cache path for raw `getProxyConfigV6` (IPv6) snapshot. |
|
||||
| ad_tag | `String \| null` | `null` | — | Global fallback ad tag (32 hex characters). |
|
||||
| middle_proxy_nat_ip | `IpAddr \| null` | `null` | Must be a valid IP when set. | Manual public NAT IP override used as ME address material when set. |
|
||||
| middle_proxy_nat_probe | `bool` | `true` | Auto-forced to `true` when `use_middle_proxy = true`. | Enables ME NAT probing; runtime may force it on when ME mode is active. |
|
||||
| middle_proxy_nat_stun | `String \| null` | `null` | Deprecated. Use `network.stun_servers`. | Deprecated legacy single STUN server for NAT probing. |
|
||||
| middle_proxy_nat_stun_servers | `String[]` | `[]` | Deprecated. Use `network.stun_servers`. | Deprecated legacy STUN list for NAT probing fallback. |
|
||||
| stun_nat_probe_concurrency | `usize` | `8` | Must be `> 0`. | Maximum number of parallel STUN probes during NAT/public endpoint discovery. |
|
||||
| middle_proxy_pool_size | `usize` | `8` | none | Target size of active ME writer pool. |
|
||||
| middle_proxy_warm_standby | `usize` | `16` | none | Reserved compatibility field in current runtime revision. |
|
||||
| me_init_retry_attempts | `u32` | `0` | `0..=1_000_000`. | Startup retries for ME pool initialization (`0` means unlimited). |
|
||||
| me2dc_fallback | `bool` | `true` | — | Allows fallback from ME mode to direct DC when ME startup fails. |
|
||||
| me_keepalive_enabled | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. |
|
||||
| me_keepalive_interval_secs | `u64` | `8` | none | Base ME keepalive interval in seconds. |
|
||||
| me_keepalive_jitter_secs | `u64` | `2` | none | Keepalive jitter in seconds to reduce synchronized bursts. |
|
||||
| me_keepalive_payload_random | `bool` | `true` | none | Randomizes keepalive payload bytes instead of fixed zero payload. |
|
||||
| rpc_proxy_req_every | `u64` | `0` | `0` or `10..=300`. | Interval for service `RPC_PROXY_REQ` activity signals (`0` disables). |
|
||||
| me_writer_cmd_channel_capacity | `usize` | `4096` | Must be `> 0`. | Capacity of per-writer command channel. |
|
||||
| me_route_channel_capacity | `usize` | `768` | Must be `> 0`. | Capacity of per-connection ME response route channel. |
|
||||
| me_c2me_channel_capacity | `usize` | `1024` | Must be `> 0`. | Capacity of per-client command queue (client reader -> ME sender). |
|
||||
| me_reader_route_data_wait_ms | `u64` | `2` | `0..=20`. | Bounded wait for routing ME DATA to per-connection queue (`0` = no wait). |
|
||||
| me_d2c_flush_batch_max_frames | `usize` | `32` | `1..=512`. | Max ME->client frames coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_bytes | `usize` | `131072` | `4096..=2_097_152`. | Max ME->client payload bytes coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_delay_us | `u64` | `500` | `0..=5000`. | Max microsecond wait for coalescing more ME->client frames (`0` disables timed coalescing). |
|
||||
| me_d2c_ack_flush_immediate | `bool` | `true` | — | Flushes client writer immediately after quick-ack write. |
|
||||
| direct_relay_copy_buf_c2s_bytes | `usize` | `65536` | `4096..=1_048_576`. | Copy buffer size for client->DC direction in direct relay. |
|
||||
| direct_relay_copy_buf_s2c_bytes | `usize` | `262144` | `8192..=2_097_152`. | Copy buffer size for DC->client direction in direct relay. |
|
||||
| crypto_pending_buffer | `usize` | `262144` | — | Max pending ciphertext buffer per client writer (bytes). |
|
||||
| max_client_frame | `usize` | `16777216` | — | Maximum allowed client MTProto frame size (bytes). |
|
||||
| desync_all_full | `bool` | `false` | — | Emits full crypto-desync forensic logs for every event. |
|
||||
| beobachten | `bool` | `true` | — | Enables per-IP forensic observation buckets. |
|
||||
| beobachten_minutes | `u64` | `10` | Must be `> 0`. | Retention window (minutes) for per-IP observation buckets. |
|
||||
| beobachten_flush_secs | `u64` | `15` | Must be `> 0`. | Snapshot flush interval (seconds) for observation output file. |
|
||||
| beobachten_file | `String` | `"cache/beobachten.txt"` | — | Observation snapshot output file path. |
|
||||
| hardswap | `bool` | `true` | none | Enables generation-based ME hardswap strategy. |
|
||||
| me_warmup_stagger_enabled | `bool` | `true` | none | Staggers extra ME warmup dials to avoid connection spikes. |
|
||||
| me_warmup_step_delay_ms | `u64` | `500` | none | Base delay in milliseconds between warmup dial steps. |
|
||||
| me_warmup_step_jitter_ms | `u64` | `300` | none | Additional random delay in milliseconds for warmup steps. |
|
||||
| me_reconnect_max_concurrent_per_dc | `u32` | `8` | none | Limits concurrent reconnect workers per DC during health recovery. |
|
||||
| me_reconnect_backoff_base_ms | `u64` | `500` | none | Initial reconnect backoff in milliseconds. |
|
||||
| me_reconnect_backoff_cap_ms | `u64` | `30000` | none | Maximum reconnect backoff cap in milliseconds. |
|
||||
| me_reconnect_fast_retry_count | `u32` | `16` | none | Immediate retry budget before long backoff behavior applies. |
|
||||
| me_single_endpoint_shadow_writers | `u8` | `2` | `0..=32`. | Additional reserve writers for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_mode_enabled | `bool` | `true` | — | Enables aggressive outage recovery for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_disable_quarantine | `bool` | `true` | — | Ignores endpoint quarantine in one-endpoint outage mode. |
|
||||
| me_single_endpoint_outage_backoff_min_ms | `u64` | `250` | Must be `> 0`; also `<= me_single_endpoint_outage_backoff_max_ms`. | Minimum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_outage_backoff_max_ms | `u64` | `3000` | Must be `> 0`; also `>= me_single_endpoint_outage_backoff_min_ms`. | Maximum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_shadow_rotate_every_secs | `u64` | `900` | — | Periodic shadow writer rotation interval (`0` disables). |
|
||||
| me_floor_mode | `"static" \| "adaptive"` | `"adaptive"` | — | Writer floor policy mode. |
|
||||
| me_adaptive_floor_idle_secs | `u64` | `90` | — | Idle time before adaptive floor may reduce one-endpoint target. |
|
||||
| me_adaptive_floor_min_writers_single_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for one-endpoint DC groups. |
|
||||
| me_adaptive_floor_min_writers_multi_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for multi-endpoint DC groups. |
|
||||
| me_adaptive_floor_recover_grace_secs | `u64` | `180` | — | Grace period to hold static floor after activity. |
|
||||
| me_adaptive_floor_writers_per_core_total | `u16` | `48` | Must be `> 0`. | Global writer budget per logical CPU core in adaptive mode. |
|
||||
| me_adaptive_floor_cpu_cores_override | `u16` | `0` | — | Manual CPU core count override (`0` uses auto-detection). |
|
||||
| me_adaptive_floor_max_extra_writers_single_per_core | `u16` | `1` | — | Per-core max extra writers above base floor for one-endpoint DCs. |
|
||||
| me_adaptive_floor_max_extra_writers_multi_per_core | `u16` | `2` | — | Per-core max extra writers above base floor for multi-endpoint DCs. |
|
||||
| me_adaptive_floor_max_active_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for active ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_warm_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for warm ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_active_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for active ME writers. |
|
||||
| me_adaptive_floor_max_warm_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for warm ME writers. |
|
||||
| upstream_connect_retry_attempts | `u32` | `2` | Must be `> 0`. | Connect attempts for selected upstream before error/fallback. |
|
||||
| upstream_connect_retry_backoff_ms | `u64` | `100` | — | Delay between upstream connect attempts (ms). |
|
||||
| upstream_connect_budget_ms | `u64` | `3000` | Must be `> 0`. | Total wall-clock budget for one upstream connect request (ms). |
|
||||
| upstream_unhealthy_fail_threshold | `u32` | `5` | Must be `> 0`. | Consecutive failed requests before upstream is marked unhealthy. |
|
||||
| upstream_connect_failfast_hard_errors | `bool` | `false` | — | Skips additional retries for hard non-transient connect errors. |
|
||||
| stun_iface_mismatch_ignore | `bool` | `false` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| unknown_dc_log_path | `String \| null` | `"unknown-dc.txt"` | — | File path for unknown-DC request logging (`null` disables file path). |
|
||||
| unknown_dc_file_log_enabled | `bool` | `false` | — | Enables unknown-DC file logging. |
|
||||
| log_level | `"debug" \| "verbose" \| "normal" \| "silent"` | `"normal"` | — | Runtime logging verbosity. |
|
||||
| disable_colors | `bool` | `false` | — | Disables ANSI colors in logs. |
|
||||
| me_socks_kdf_policy | `"strict" \| "compat"` | `"strict"` | — | SOCKS-bound KDF fallback policy for ME handshake. |
|
||||
| me_route_backpressure_base_timeout_ms | `u64` | `25` | Must be `> 0`. | Base backpressure timeout for route-channel send (ms). |
|
||||
| me_route_backpressure_high_timeout_ms | `u64` | `120` | Must be `>= me_route_backpressure_base_timeout_ms`. | High backpressure timeout when queue occupancy exceeds watermark (ms). |
|
||||
| me_route_backpressure_high_watermark_pct | `u8` | `80` | `1..=100`. | Queue occupancy threshold (%) for high timeout mode. |
|
||||
| me_health_interval_ms_unhealthy | `u64` | `1000` | Must be `> 0`. | Health monitor interval while writer coverage is degraded (ms). |
|
||||
| me_health_interval_ms_healthy | `u64` | `3000` | Must be `> 0`. | Health monitor interval while writer coverage is healthy (ms). |
|
||||
| me_admission_poll_ms | `u64` | `1000` | Must be `> 0`. | Poll interval for conditional-admission checks (ms). |
|
||||
| me_warn_rate_limit_ms | `u64` | `5000` | Must be `> 0`. | Cooldown for repetitive ME warning logs (ms). |
|
||||
| me_route_no_writer_mode | `"async_recovery_failfast" \| "inline_recovery_legacy" \| "hybrid_async_persistent"` | `"hybrid_async_persistent"` | — | Route behavior when no writer is immediately available. |
|
||||
| me_route_no_writer_wait_ms | `u64` | `250` | `10..=5000`. | Max wait in async-recovery failfast mode (ms). |
|
||||
| me_route_inline_recovery_attempts | `u32` | `3` | Must be `> 0`. | Inline recovery attempts in legacy mode. |
|
||||
| me_route_inline_recovery_wait_ms | `u64` | `3000` | `10..=30000`. | Max inline recovery wait in legacy mode (ms). |
|
||||
| fast_mode_min_tls_record | `usize` | `0` | — | Minimum TLS record size when fast-mode coalescing is enabled (`0` disables). |
|
||||
| update_every | `u64 \| null` | `300` | If set: must be `> 0`; if `null`: legacy fallback path is used. | Unified refresh interval for ME config and proxy-secret updater tasks. |
|
||||
| me_reinit_every_secs | `u64` | `900` | Must be `> 0`. | Periodic interval for zero-downtime ME reinit cycle. |
|
||||
| me_hardswap_warmup_delay_min_ms | `u64` | `1000` | Must be `<= me_hardswap_warmup_delay_max_ms`. | Lower bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_delay_max_ms | `u64` | `2000` | Must be `> 0`. | Upper bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_extra_passes | `u8` | `3` | Must be within `[0, 10]`. | Additional warmup passes after the base pass in one hardswap cycle. |
|
||||
| me_hardswap_warmup_pass_backoff_base_ms | `u64` | `500` | Must be `> 0`. | Base backoff between extra hardswap warmup passes. |
|
||||
| me_config_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical ME config snapshots required before apply. |
|
||||
| me_config_apply_cooldown_secs | `u64` | `300` | none | Cooldown between applied ME endpoint-map updates. |
|
||||
| me_snapshot_require_http_2xx | `bool` | `true` | — | Requires 2xx HTTP responses for applying config snapshots. |
|
||||
| me_snapshot_reject_empty_map | `bool` | `true` | — | Rejects empty config snapshots. |
|
||||
| me_snapshot_min_proxy_for_lines | `u32` | `1` | Must be `> 0`. | Minimum parsed `proxy_for` rows required to accept snapshot. |
|
||||
| proxy_secret_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical proxy-secret snapshots required before rotation. |
|
||||
| proxy_secret_rotate_runtime | `bool` | `true` | none | Enables runtime proxy-secret rotation from updater snapshots. |
|
||||
| me_secret_atomic_snapshot | `bool` | `true` | — | Keeps selector and secret bytes from the same snapshot atomically. |
|
||||
| proxy_secret_len_max | `usize` | `256` | Must be within `[32, 4096]`. | Upper length limit for accepted proxy-secret bytes. |
|
||||
| me_pool_drain_ttl_secs | `u64` | `90` | none | Time window where stale writers remain fallback-eligible after map change. |
|
||||
| me_pool_drain_threshold | `u64` | `128` | — | Max draining stale writers before batch force-close (`0` disables threshold cleanup). |
|
||||
| me_pool_drain_soft_evict_enabled | `bool` | `true` | — | Enables gradual soft-eviction of stale writers during drain/reinit instead of immediate hard close. |
|
||||
| me_pool_drain_soft_evict_grace_secs | `u64` | `30` | `0..=3600`. | Grace period before stale writers become soft-evict candidates. |
|
||||
| me_pool_drain_soft_evict_per_writer | `u8` | `1` | `1..=16`. | Maximum stale routes soft-evicted per writer in one eviction pass. |
|
||||
| me_pool_drain_soft_evict_budget_per_core | `u16` | `8` | `1..=64`. | Per-core budget limiting aggregate soft-eviction work per pass. |
|
||||
| me_pool_drain_soft_evict_cooldown_ms | `u64` | `5000` | Must be `> 0`. | Cooldown between consecutive soft-eviction passes (ms). |
|
||||
| me_bind_stale_mode | `"never" \| "ttl" \| "always"` | `"ttl"` | — | Policy for new binds on stale draining writers. |
|
||||
| me_bind_stale_ttl_secs | `u64` | `90` | — | TTL for stale bind allowance when stale mode is `ttl`. |
|
||||
| me_pool_min_fresh_ratio | `f32` | `0.8` | Must be within `[0.0, 1.0]`. | Minimum fresh desired-DC coverage ratio before stale writers are drained. |
|
||||
| me_reinit_drain_timeout_secs | `u64` | `120` | `0` disables force-close; if `> 0` and `< me_pool_drain_ttl_secs`, runtime bumps it to TTL. | Force-close timeout for draining stale writers (`0` keeps indefinite draining). |
|
||||
| proxy_secret_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy secret reload interval (fallback when `update_every` is not set). |
|
||||
| proxy_config_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy config reload interval (fallback when `update_every` is not set). |
|
||||
| me_reinit_singleflight | `bool` | `true` | — | Serializes ME reinit cycles across trigger sources. |
|
||||
| me_reinit_trigger_channel | `usize` | `64` | Must be `> 0`. | Trigger queue capacity for reinit scheduler. |
|
||||
| me_reinit_coalesce_window_ms | `u64` | `200` | — | Trigger coalescing window before starting reinit (ms). |
|
||||
| me_deterministic_writer_sort | `bool` | `true` | — | Enables deterministic candidate sort for writer binding path. |
|
||||
| me_writer_pick_mode | `"sorted_rr" \| "p2c"` | `"p2c"` | — | Writer selection mode for route bind path. |
|
||||
| me_writer_pick_sample_size | `u8` | `3` | `2..=4`. | Number of candidates sampled by picker in `p2c` mode. |
|
||||
| ntp_check | `bool` | `true` | — | Enables NTP drift check at startup. |
|
||||
| ntp_servers | `String[]` | `["pool.ntp.org"]` | — | NTP servers used for drift check. |
|
||||
| auto_degradation_enabled | `bool` | `true` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| degradation_min_unavailable_dc_groups | `u8` | `2` | none | Reserved compatibility threshold in current runtime revision. |
|
||||
|
||||
## [general.modes]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| classic | `bool` | `false` | — | Enables classic MTProxy mode. |
|
||||
| secure | `bool` | `false` | — | Enables secure mode. |
|
||||
| tls | `bool` | `true` | — | Enables TLS mode. |
|
||||
|
||||
## [general.links]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| show | `"*" \| String[]` | `"*"` | — | Selects users whose tg:// links are shown at startup. |
|
||||
| public_host | `String \| null` | `null` | — | Public hostname/IP override for generated tg:// links. |
|
||||
| public_port | `u16 \| null` | `null` | — | Public port override for generated tg:// links. |
|
||||
|
||||
## [general.telemetry]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| core_enabled | `bool` | `true` | — | Enables core hot-path telemetry counters. |
|
||||
| user_enabled | `bool` | `true` | — | Enables per-user telemetry counters. |
|
||||
| me_level | `"silent" \| "normal" \| "debug"` | `"normal"` | — | Middle-End telemetry verbosity level. |
|
||||
|
||||
## [network]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ipv4 | `bool` | `true` | — | Enables IPv4 networking. |
|
||||
| ipv6 | `bool` | `false` | — | Enables/disables IPv6 when set |
|
||||
| prefer | `u8` | `4` | Must be `4` or `6`. | Preferred IP family for selection (`4` or `6`). |
|
||||
| multipath | `bool` | `false` | — | Enables multipath behavior where supported. |
|
||||
| stun_use | `bool` | `true` | none | Global STUN switch; when `false`, STUN probing path is disabled. |
|
||||
| stun_servers | `String[]` | Built-in STUN list (13 hosts) | Deduplicated; empty values are removed. | Primary STUN server list for NAT/public endpoint discovery. |
|
||||
| stun_tcp_fallback | `bool` | `true` | none | Enables TCP fallback for STUN when UDP path is blocked. |
|
||||
| http_ip_detect_urls | `String[]` | `["https://ifconfig.me/ip", "https://api.ipify.org"]` | none | HTTP fallback endpoints for public IP detection when STUN is unavailable. |
|
||||
| cache_public_ip_path | `String` | `"cache/public_ip.txt"` | — | File path for caching detected public IP. |
|
||||
| dns_overrides | `String[]` | `[]` | Must match `host:port:ip`; IPv6 must be bracketed. | Runtime DNS overrides in `host:port:ip` format. |
|
||||
|
||||
## [server]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| port | `u16` | `443` | — | Main proxy listen port. |
|
||||
| listen_addr_ipv4 | `String \| null` | `"0.0.0.0"` | — | IPv4 bind address for TCP listener. |
|
||||
| listen_addr_ipv6 | `String \| null` | `"::"` | — | IPv6 bind address for TCP listener. |
|
||||
| listen_unix_sock | `String \| null` | `null` | — | Unix socket path for listener. |
|
||||
| listen_unix_sock_perm | `String \| null` | `null` | — | Unix socket permissions in octal string (e.g., `"0666"`). |
|
||||
| listen_tcp | `bool \| null` | `null` (auto) | — | Explicit TCP listener enable/disable override. |
|
||||
| proxy_protocol | `bool` | `false` | — | Enables HAProxy PROXY protocol parsing on incoming client connections. |
|
||||
| proxy_protocol_header_timeout_ms | `u64` | `500` | Must be `> 0`. | Timeout for PROXY protocol header read/parse (ms). |
|
||||
| metrics_port | `u16 \| null` | `null` | — | Metrics endpoint port (enables metrics listener). |
|
||||
| metrics_listen | `String \| null` | `null` | — | Full metrics bind address (`IP:PORT`), overrides `metrics_port`. |
|
||||
| metrics_whitelist | `IpNetwork[]` | `["127.0.0.1/32", "::1/128"]` | — | CIDR whitelist for metrics endpoint access. |
|
||||
| max_connections | `u32` | `10000` | — | Max concurrent client connections (`0` = unlimited). |
|
||||
|
||||
## [server.api]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| enabled | `bool` | `true` | — | Enables control-plane REST API. |
|
||||
| listen | `String` | `"0.0.0.0:9091"` | Must be valid `IP:PORT`. | API bind address in `IP:PORT` format. |
|
||||
| whitelist | `IpNetwork[]` | `["127.0.0.0/8"]` | — | CIDR whitelist allowed to access API. |
|
||||
| auth_header | `String` | `""` | — | Exact expected `Authorization` header value (empty = disabled). |
|
||||
| request_body_limit_bytes | `usize` | `65536` | Must be `> 0`. | Maximum accepted HTTP request body size. |
|
||||
| minimal_runtime_enabled | `bool` | `true` | — | Enables minimal runtime snapshots endpoint logic. |
|
||||
| minimal_runtime_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for minimal runtime snapshots (ms; `0` disables cache). |
|
||||
| runtime_edge_enabled | `bool` | `false` | — | Enables runtime edge endpoints. |
|
||||
| runtime_edge_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for runtime edge aggregation payloads (ms). |
|
||||
| runtime_edge_top_n | `usize` | `10` | `1..=1000`. | Top-N size for edge connection leaderboard. |
|
||||
| runtime_edge_events_capacity | `usize` | `256` | `16..=4096`. | Ring-buffer capacity for runtime edge events. |
|
||||
| read_only | `bool` | `false` | — | Rejects mutating API endpoints when enabled. |
|
||||
|
||||
## [[server.listeners]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ip | `IpAddr` | — | — | Listener bind IP. |
|
||||
| announce | `String \| null` | — | — | Public IP/domain announced in proxy links (priority over `announce_ip`). |
|
||||
| announce_ip | `IpAddr \| null` | — | — | Deprecated legacy announce IP (migrated to `announce` if needed). |
|
||||
| proxy_protocol | `bool \| null` | `null` | — | Per-listener override for PROXY protocol enable flag. |
|
||||
| reuse_allow | `bool` | `false` | — | Enables `SO_REUSEPORT` for multi-instance bind sharing. |
|
||||
|
||||
## [timeouts]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| client_handshake | `u64` | `30` | — | Client handshake timeout. |
|
||||
| tg_connect | `u64` | `10` | — | Upstream Telegram connect timeout. |
|
||||
| client_keepalive | `u64` | `15` | — | Client keepalive timeout. |
|
||||
| client_ack | `u64` | `90` | — | Client ACK timeout. |
|
||||
| me_one_retry | `u8` | `12` | none | Fast reconnect attempts budget for single-endpoint DC scenarios. |
|
||||
| me_one_timeout_ms | `u64` | `1200` | none | Timeout in milliseconds for each quick single-endpoint reconnect attempt. |
|
||||
|
||||
## [censorship]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| tls_domain | `String` | `"petrovich.ru"` | — | Primary TLS domain used in fake TLS handshake profile. |
|
||||
| tls_domains | `String[]` | `[]` | — | Additional TLS domains for generating multiple links. |
|
||||
| mask | `bool` | `true` | — | Enables masking/fronting relay mode. |
|
||||
| mask_host | `String \| null` | `null` | — | Upstream mask host for TLS fronting relay. |
|
||||
| mask_port | `u16` | `443` | — | Upstream mask port for TLS fronting relay. |
|
||||
| mask_unix_sock | `String \| null` | `null` | — | Unix socket path for mask backend instead of TCP host/port. |
|
||||
| fake_cert_len | `usize` | `2048` | — | Length of synthetic certificate payload when emulation data is unavailable. |
|
||||
| tls_emulation | `bool` | `true` | — | Enables certificate/TLS behavior emulation from cached real fronts. |
|
||||
| tls_front_dir | `String` | `"tlsfront"` | — | Directory path for TLS front cache storage. |
|
||||
| server_hello_delay_min_ms | `u64` | `0` | — | Minimum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| server_hello_delay_max_ms | `u64` | `0` | — | Maximum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| tls_new_session_tickets | `u8` | `0` | — | Number of `NewSessionTicket` messages to emit after handshake. |
|
||||
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
|
||||
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
|
||||
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
|
||||
| mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
|
||||
| mask_shape_hardening_aggressive_mode | `bool` | `false` | Requires `mask_shape_hardening = true`. | Opt-in aggressive shaping profile: allows shaping on backend-silent non-EOF paths and switches above-cap blur to strictly positive random tail. |
|
||||
| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
|
||||
| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
|
||||
| mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. |
|
||||
| mask_shape_above_cap_blur_max_bytes | `usize` | `512` | Must be `<= 1048576`; must be `> 0` when `mask_shape_above_cap_blur = true`. | Maximum randomized extra bytes appended above cap. |
|
||||
| mask_timing_normalization_enabled | `bool` | `false` | Requires `mask_timing_normalization_floor_ms > 0`; requires `ceiling >= floor`. | Enables timing envelope normalization on masking outcomes. |
|
||||
| mask_timing_normalization_floor_ms | `u64` | `0` | Must be `> 0` when timing normalization is enabled; must be `<= ceiling`. | Lower bound (ms) for masking outcome normalization target. |
|
||||
| mask_timing_normalization_ceiling_ms | `u64` | `0` | Must be `>= floor`; must be `<= 60000`. | Upper bound (ms) for masking outcome normalization target. |
|
||||
|
||||
### Shape-channel hardening notes (`[censorship]`)
|
||||
|
||||
These parameters are designed to reduce one specific fingerprint source during masking: the exact number of bytes sent from proxy to `mask_host` for invalid or probing traffic.
|
||||
|
||||
Without hardening, a censor can often correlate probe input length with backend-observed length very precisely (for example: `5 + body_sent` on early TLS reject paths). That creates a length-based classifier signal.
|
||||
|
||||
When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail to a bucket boundary at relay shutdown:
|
||||
|
||||
- Total bytes sent to mask are first measured.
|
||||
- A bucket is selected using powers of two starting from `mask_shape_bucket_floor_bytes`.
|
||||
- Padding is added only if total bytes are below `mask_shape_bucket_cap_bytes`.
|
||||
- If bytes already exceed cap, no extra padding is added.
|
||||
|
||||
This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
|
||||
|
||||
What each parameter changes in practice:
|
||||
|
||||
- `mask_shape_hardening`
|
||||
Enables or disables this entire length-shaping stage on the fallback path.
|
||||
When `false`, backend-observed length stays close to the real forwarded probe length.
|
||||
When `true`, clean relay shutdown can append random padding bytes to move the total into a bucket.
|
||||
|
||||
- `mask_shape_bucket_floor_bytes`
|
||||
Sets the first bucket boundary used for small probes.
|
||||
Example: with floor `512`, a malformed probe that would otherwise forward `37` bytes can be expanded to `512` bytes on clean EOF.
|
||||
Larger floor values hide very small probes better, but increase egress cost.
|
||||
|
||||
- `mask_shape_bucket_cap_bytes`
|
||||
Sets the largest bucket Telemt will pad up to with bucket logic.
|
||||
Example: with cap `4096`, a forwarded total of `1800` bytes may be padded to `2048` or `4096` depending on the bucket ladder, but a total already above `4096` will not be bucket-padded further.
|
||||
Larger cap values increase the range over which size classes are collapsed, but also increase worst-case overhead.
|
||||
|
||||
- Clean EOF matters in conservative mode
|
||||
In the default profile, shape padding is intentionally conservative: it is applied on clean relay shutdown, not on every timeout/drip path.
|
||||
This avoids introducing new timeout-tail artifacts that some backends or tests interpret as a separate fingerprint.
|
||||
|
||||
Practical trade-offs:
|
||||
|
||||
- Better anti-fingerprinting on size/shape channel.
|
||||
- Slightly higher egress overhead for small probes due to padding.
|
||||
- Behavior is intentionally conservative and enabled by default.
|
||||
|
||||
Recommended starting profile:
|
||||
|
||||
- `mask_shape_hardening = true` (default)
|
||||
- `mask_shape_bucket_floor_bytes = 512`
|
||||
- `mask_shape_bucket_cap_bytes = 4096`
|
||||
|
||||
### Aggressive mode notes (`[censorship]`)
|
||||
|
||||
`mask_shape_hardening_aggressive_mode` is an opt-in profile for higher anti-classifier pressure.
|
||||
|
||||
- Default is `false` to preserve conservative timeout/no-tail behavior.
|
||||
- Requires `mask_shape_hardening = true`.
|
||||
- When enabled, backend-silent non-EOF masking paths may be shaped.
|
||||
- When enabled together with above-cap blur, the random extra tail uses `[1, max]` instead of `[0, max]`.
|
||||
|
||||
What changes when aggressive mode is enabled:
|
||||
|
||||
- Backend-silent timeout paths can be shaped
|
||||
In default mode, a client that keeps the socket half-open and times out will usually not receive shape padding on that path.
|
||||
In aggressive mode, Telemt may still shape that backend-silent session if no backend bytes were returned.
|
||||
This is specifically aimed at active probes that try to avoid EOF in order to preserve an exact backend-observed length.
|
||||
|
||||
- Above-cap blur always adds at least one byte
|
||||
In default mode, above-cap blur may choose `0`, so some oversized probes still land on their exact base forwarded length.
|
||||
In aggressive mode, that exact-base sample is removed by construction.
|
||||
|
||||
- Tradeoff
|
||||
Aggressive mode improves resistance to active length classifiers, but it is more opinionated and less conservative.
|
||||
If your deployment prioritizes strict compatibility with timeout/no-tail semantics, leave it disabled.
|
||||
If your threat model includes repeated active probing by a censor, this mode is the stronger profile.
|
||||
|
||||
Use this mode only when your threat model prioritizes classifier resistance over strict compatibility with conservative masking semantics.
|
||||
|
||||
### Above-cap blur notes (`[censorship]`)
|
||||
|
||||
`mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`.
|
||||
|
||||
- A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended in default mode.
|
||||
- In aggressive mode, the random tail becomes strictly positive: `[1, mask_shape_above_cap_blur_max_bytes]`.
|
||||
- This reduces exact-size leakage above cap at bounded overhead.
|
||||
- Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth.
|
||||
|
||||
Operational meaning:
|
||||
|
||||
- Without above-cap blur
|
||||
A probe that forwards `5005` bytes will still look like `5005` bytes to the backend if it is already above cap.
|
||||
|
||||
- With above-cap blur enabled
|
||||
That same probe may look like any value in a bounded window above its base length.
|
||||
Example with `mask_shape_above_cap_blur_max_bytes = 64`:
|
||||
backend-observed size becomes `5005..5069` in default mode, or `5006..5069` in aggressive mode.
|
||||
|
||||
- Choosing `mask_shape_above_cap_blur_max_bytes`
|
||||
Small values reduce cost but preserve more separability between far-apart oversized classes.
|
||||
Larger values blur oversized classes more aggressively, but add more egress overhead and more output variance.
|
||||
|
||||
### Timing normalization envelope notes (`[censorship]`)
|
||||
|
||||
`mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope.
|
||||
|
||||
- A random target is selected in `[mask_timing_normalization_floor_ms, mask_timing_normalization_ceiling_ms]`.
|
||||
- Fast paths are delayed up to the selected target.
|
||||
- Slow paths are not forced to finish by the ceiling (the envelope is best-effort shaping, not truncation).
|
||||
|
||||
Recommended starting profile for timing shaping:
|
||||
|
||||
- `mask_timing_normalization_enabled = true`
|
||||
- `mask_timing_normalization_floor_ms = 180`
|
||||
- `mask_timing_normalization_ceiling_ms = 320`
|
||||
|
||||
If your backend or network is very bandwidth-constrained, reduce cap first. If probes are still too distinguishable in your environment, increase floor gradually.
|
||||
|
||||
## [access]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | TOML shape example | Description |
|
||||
|---|---|---|---|---|---|
|
||||
| users | `Map<String, String>` | `{"default": "000…000"}` | Secret must be 32 hex characters. | `[access.users]`<br>`user = "32-hex secret"`<br>`user2 = "32-hex secret"` | User credentials map used for client authentication. |
|
||||
| user_ad_tags | `Map<String, String>` | `{}` | Every value must be exactly 32 hex characters. | `[access.user_ad_tags]`<br>`user = "32-hex ad_tag"` | Per-user ad tags used as override over `general.ad_tag`. |
|
||||
| user_max_tcp_conns | `Map<String, usize>` | `{}` | — | `[access.user_max_tcp_conns]`<br>`user = 500` | Per-user maximum concurrent TCP connections. |
|
||||
| user_expirations | `Map<String, DateTime<Utc>>` | `{}` | Timestamp must be valid RFC3339/ISO-8601 datetime. | `[access.user_expirations]`<br>`user = "2026-12-31T23:59:59Z"` | Per-user account expiration timestamps. |
|
||||
| user_data_quota | `Map<String, u64>` | `{}` | — | `[access.user_data_quota]`<br>`user = 1073741824` | Per-user traffic quota in bytes. |
|
||||
| user_max_unique_ips | `Map<String, usize>` | `{}` | — | `[access.user_max_unique_ips]`<br>`user = 16` | Per-user unique source IP limits. |
|
||||
| user_max_unique_ips_global_each | `usize` | `0` | — | `user_max_unique_ips_global_each = 0` | Global fallback used when `[access.user_max_unique_ips]` has no per-user override. |
|
||||
| user_max_unique_ips_mode | `"active_window" \| "time_window" \| "combined"` | `"active_window"` | — | `user_max_unique_ips_mode = "active_window"` | Unique source IP limit accounting mode. |
|
||||
| user_max_unique_ips_window_secs | `u64` | `30` | Must be `> 0`. | `user_max_unique_ips_window_secs = 30` | Window size (seconds) used by unique-IP accounting modes that use time windows. |
|
||||
| replay_check_len | `usize` | `65536` | — | `replay_check_len = 65536` | Replay-protection storage length. |
|
||||
| replay_window_secs | `u64` | `1800` | — | `replay_window_secs = 1800` | Replay-protection window in seconds. |
|
||||
| ignore_time_skew | `bool` | `false` | — | `ignore_time_skew = false` | Disables client/server timestamp skew checks in replay validation when enabled. |
|
||||
|
||||
## [[upstreams]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| type | `"direct" \| "socks4" \| "socks5"` | — | Required field. | Upstream transport type selector. |
|
||||
| weight | `u16` | `1` | none | Base weight used by weighted-random upstream selection. |
|
||||
| enabled | `bool` | `true` | none | Disabled entries are excluded from upstream selection at runtime. |
|
||||
| scopes | `String` | `""` | none | Comma-separated scope tags used for request-level upstream filtering. |
|
||||
| interface | `String \| null` | `null` | Optional; type-specific runtime rules apply. | Optional outbound interface/local bind hint (supported with type-specific rules). |
|
||||
| bind_addresses | `String[] \| null` | `null` | Applies to `type = "direct"`. | Optional explicit local source bind addresses for `type = "direct"`. |
|
||||
| address | `String` | — | Required for `type = "socks4"` and `type = "socks5"`. | SOCKS server endpoint (`host:port` or `ip:port`) for SOCKS upstream types. |
|
||||
| user_id | `String \| null` | `null` | Only for `type = "socks4"`. | SOCKS4 CONNECT user ID (`type = "socks4"` only). |
|
||||
| username | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 username (`type = "socks5"` only). |
|
||||
| password | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 password (`type = "socks5"` only). |
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
241
docs/FAQ.en.md
241
docs/FAQ.en.md
|
|
@ -1,107 +1,256 @@
|
|||
## How to set up "proxy sponsor" channel and statistics via @MTProxybot bot
|
||||
|
||||
1. Go to @MTProxybot bot.
|
||||
2. Enter the command `/newproxy`
|
||||
3. Send the server IP and port. For example: 1.2.3.4:443
|
||||
4. Open the config `nano /etc/telemt.toml`.
|
||||
5. Copy and send the user secret from the [access.users] section to the bot.
|
||||
6. Copy the tag received from the bot. For example 1234567890abcdef1234567890abcdef.
|
||||
## How to set up a "proxy sponsor" channel and statistics via the @MTProxybot
|
||||
1. Go to the @MTProxybot.
|
||||
2. Enter the `/newproxy` command.
|
||||
3. Send your server's IP address and port. For example: `1.2.3.4:443`.
|
||||
4. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
5. Copy and send the user secret from the `[access.users]` section to the bot.
|
||||
6. Copy the tag provided by the bot. For example: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> The link provided by the bot will not work. Do not copy or use it!
|
||||
7. Uncomment the ad_tag parameter and enter the tag received from the bot.
|
||||
8. Uncomment/add the parameter `use_middle_proxy = true`.
|
||||
7. Uncomment the `ad_tag` parameter and enter the tag received from the bot.
|
||||
8. Uncomment or add the `use_middle_proxy = true` parameter.
|
||||
|
||||
Config example:
|
||||
Configuration example:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Save the config. Ctrl+S -> Ctrl+X.
|
||||
10. Restart telemt `systemctl restart telemt`.
|
||||
11. In the bot, send the command /myproxies and select the added server.
|
||||
9. Save the changes (in nano: Ctrl+S -> Ctrl+X).
|
||||
10. Restart the telemt service: `systemctl restart telemt`.
|
||||
11. Send the `/myproxies` command to the bot and select the added server.
|
||||
12. Click the "Set promotion" button.
|
||||
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||
14. Wait approximately 1 hour for the information to update on Telegram servers.
|
||||
14. Wait for about 1 hour for the information to update on Telegram servers.
|
||||
> [!WARNING]
|
||||
> You will not see the "proxy sponsor" if you are already subscribed to the channel.
|
||||
> The sponsored channel will not be displayed to you if you are already subscribed to it.
|
||||
|
||||
**You can also set up different channels for different users.**
|
||||
**You can also configure different sponsored channels for different users:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
## Recognizability for DPI and crawler
|
||||
|
||||
## How many people can use 1 link
|
||||
On April 1, 2026, we became aware of a method for detecting MTProxy Fake-TLS,
|
||||
based on the ECH extension and the ordering of cipher suites,
|
||||
as well as an overall unique JA3/JA4 fingerprint
|
||||
that does not occur in modern browsers:
|
||||
we have already submitted initial changes to the Telegram Desktop developers and are working on updates for other clients.
|
||||
|
||||
By default, 1 link can be used by any number of people.
|
||||
You can limit the number of IPs using the proxy.
|
||||
- We consider this a breakthrough aspect, which has no stable analogues today
|
||||
- Based on this: if `telemt` configured correctly, **TLS mode is completely identical to real-life handshake + communication** with a specified host
|
||||
- Here is our evidence:
|
||||
- 212.220.88.77 - "dummy" host, running `telemt`
|
||||
- `petrovich.ru` - `tls` + `masking` host, in HEX: `706574726f766963682e7275`
|
||||
- **No MITM + No Fake Certificates/Crypto** = pure transparent *TCP Splice* to "best" upstream: MTProxy or tls/mask-host:
|
||||
- DPI see legitimate HTTPS to `tls_host`, including *valid chain-of-trust* and entropy
|
||||
- Crawlers completely satisfied receiving responses from `mask_host`
|
||||
### Client WITH secret-key accesses the MTProxy resource:
|
||||
|
||||
<img width="360" height="439" alt="telemt" src="https://github.com/user-attachments/assets/39352afb-4a11-4ecc-9d91-9e8cfb20607d" />
|
||||
|
||||
### Client WITHOUT secret-key gets transparent access to the specified resource:
|
||||
- with trusted certificate
|
||||
- with original handshake
|
||||
- with full request-response way
|
||||
- with low-latency overhead
|
||||
```bash
|
||||
root@debian:~/telemt# curl -v -I --resolve petrovich.ru:443:212.220.88.77 https://petrovich.ru/
|
||||
* Added petrovich.ru:443:212.220.88.77 to DNS cache
|
||||
* Hostname petrovich.ru was found in DNS cache
|
||||
* Trying 212.220.88.77:443...
|
||||
* Connected to petrovich.ru (212.220.88.77) port 443 (#0)
|
||||
* ALPN: offers h2,http/1.1
|
||||
* TLSv1.3 (OUT), TLS handshake, Client hello (1):
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
* CApath: /etc/ssl/certs
|
||||
* TLSv1.3 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
|
||||
* TLSv1.3 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.3 (IN), TLS handshake, CERT verify (15):
|
||||
* TLSv1.3 (IN), TLS handshake, Finished (20):
|
||||
* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
|
||||
* TLSv1.3 (OUT), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
|
||||
* ALPN: server did not agree on a protocol. Uses default.
|
||||
* Server certificate:
|
||||
* subject: C=RU; ST=Saint Petersburg; L=Saint Petersburg; O=STD Petrovich; CN=*.petrovich.ru
|
||||
* start date: Jan 28 11:21:01 2025 GMT
|
||||
* expire date: Mar 1 11:21:00 2026 GMT
|
||||
* subjectAltName: host "petrovich.ru" matched cert's "petrovich.ru"
|
||||
* issuer: C=BE; O=GlobalSign nv-sa; CN=GlobalSign RSA OV SSL CA 2018
|
||||
* SSL certificate verify ok.
|
||||
* using HTTP/1.x
|
||||
> HEAD / HTTP/1.1
|
||||
> Host: petrovich.ru
|
||||
> User-Agent: curl/7.88.1
|
||||
> Accept: */*
|
||||
>
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* old SSL session ID is stale, removing
|
||||
< HTTP/1.1 200 OK
|
||||
HTTP/1.1 200 OK
|
||||
< Server: Variti/0.9.3a
|
||||
Server: Variti/0.9.3a
|
||||
< Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Origin: *
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Cache-Control: no-store
|
||||
Cache-Control: no-store
|
||||
< Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Pragma: no-cache
|
||||
Pragma: no-cache
|
||||
< Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Content-Length: 31253
|
||||
Content-Length: 31253
|
||||
< Connection: keep-alive
|
||||
Connection: keep-alive
|
||||
< Keep-Alive: timeout=60
|
||||
Keep-Alive: timeout=60
|
||||
|
||||
<
|
||||
* Connection #0 to host petrovich.ru left intact
|
||||
|
||||
```
|
||||
- We challenged ourselves, we kept trying and we didn't only *beat the air*: now, we have something to show you
|
||||
- Do not just take our word for it? - This is great and we respect that: you can build your own `telemt` or download a build and check it right now
|
||||
|
||||
|
||||
## F.A.Q.
|
||||
|
||||
### Telegram Calls via MTProxy
|
||||
- Telegram architecture **does NOT allow calls via MTProxy**, but only via SOCKS5, which cannot be obfuscated
|
||||
|
||||
### How does DPI see MTProxy TLS?
|
||||
- DPI sees MTProxy in Fake TLS (ee) mode as TLS 1.3
|
||||
- the SNI you specify sends both the client and the server;
|
||||
- ALPN is similar to HTTP 1.1/2;
|
||||
- high entropy, which is normal for AES-encrypted traffic;
|
||||
|
||||
### Whitelist on IP
|
||||
- MTProxy cannot work when there is:
|
||||
- no IP connectivity to the target host: Russian Whitelist on Mobile Networks - "Белый список"
|
||||
- OR all TCP traffic is blocked
|
||||
- OR high entropy/encrypted traffic is blocked: content filters at universities and critical infrastructure
|
||||
- OR all TLS traffic is blocked
|
||||
- OR specified port is blocked: use 443 to make it "like real"
|
||||
- OR provided SNI is blocked: use "officially approved"/innocuous name
|
||||
- like most protocols on the Internet;
|
||||
- these situations are observed:
|
||||
- in China behind the Great Firewall
|
||||
- in Russia on mobile networks, less in wired networks
|
||||
- in Iran during "activity"
|
||||
|
||||
### Why do you need a middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
### How many people can use one link
|
||||
By default, an unlimited number of people can use a single link.
|
||||
However, you can limit the number of unique IP addresses for each user:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
This parameter limits how many unique IPs can use 1 link simultaneously. If one user disconnects, a second user can connect. Also, multiple users can sit behind the same IP.
|
||||
This parameter sets the maximum number of unique IP addresses from which a single link can be used simultaneously. If the first user disconnects, a second one can connect. At the same time, multiple users can connect from a single IP address simultaneously (for example, devices on the same Wi-Fi network).
|
||||
|
||||
## How to create multiple different links
|
||||
|
||||
1. Generate the required number of secrets `openssl rand -hex 16`
|
||||
2. Open the config `nano /etc/telemt.toml`
|
||||
3. Add new users.
|
||||
### How to create multiple different links
|
||||
1. Generate the required number of secrets using the command: `openssl rand -hex 16`.
|
||||
2. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
3. Add new users to the `[access.users]` section:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||
5. Get the links via
|
||||
4. Save the configuration (Ctrl+S -> Ctrl+X). There is no need to restart the telemt service.
|
||||
5. Get the ready-to-use links using the command:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## How to view metrics
|
||||
### "Unknown TLS SNI" error
|
||||
Usually, this error occurs if you have changed the `tls_domain` parameter, but users continue to connect using old links with the previous domain.
|
||||
|
||||
1. Open the config `nano /etc/telemt.toml`
|
||||
2. Add the following parameters
|
||||
If you need to allow connections with any domains (ignoring SNI mismatches), add the following parameters:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
### How to view metrics
|
||||
|
||||
1. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
2. Add the following parameters:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Save the config. Ctrl+S -> Ctrl+X.
|
||||
4. Metrics are available at SERVER_IP:9090/metrics.
|
||||
3. Save the changes (Ctrl+S -> Ctrl+X).
|
||||
4. After that, metrics will be available at: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" in metrics_whitelist opens access from any IP. Replace with your own IP. For example "1.2.3.4"
|
||||
> The value `"0.0.0.0/0"` in `metrics_whitelist` opens access to metrics from any IP address. It is recommended to replace it with your personal IP, for example: `"1.2.3.4/32"`.
|
||||
|
||||
### Too many open files
|
||||
- On a fresh Linux install the default open file limit is low; under load `telemt` may fail with `Accept error: Too many open files`
|
||||
- **Systemd**: add `LimitNOFILE=65536` to the `[Service]` section (already included in the example above)
|
||||
- **Docker**: add `--ulimit nofile=65536:65536` to your `docker run` command, or in `docker-compose.yml`:
|
||||
```yaml
|
||||
ulimits:
|
||||
nofile:
|
||||
soft: 65536
|
||||
hard: 65536
|
||||
```
|
||||
- **System-wide** (optional): add to `/etc/security/limits.conf`:
|
||||
```
|
||||
* soft nofile 1048576
|
||||
* hard nofile 1048576
|
||||
root soft nofile 1048576
|
||||
root hard nofile 1048576
|
||||
```
|
||||
|
||||
|
||||
## Additional parameters
|
||||
|
||||
### Domain in link instead of IP
|
||||
To specify a domain in the links, add to the `[general.links]` section of the config file.
|
||||
### Domain in the link instead of IP
|
||||
To display a domain instead of an IP address in the connection links, add the following lines to the configuration file:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Server connection limit
|
||||
Limits the total number of open connections to the server:
|
||||
### Total server connection limit
|
||||
This parameter limits the total number of active connections to the server:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||
#### Binding to IP
|
||||
To configure outbound connections (upstreams), add the corresponding parameters to the `[[upstreams]]` section of the configuration file:
|
||||
|
||||
#### Binding to an outbound IP address
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
interface = "192.168.1.100" # Replace with your outbound IP
|
||||
```
|
||||
#### SOCKS4/5 as Upstream
|
||||
- Without authentication:
|
||||
|
||||
#### Using SOCKS4/5 as an Upstream
|
||||
- Without authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
|
|
@ -110,7 +259,7 @@ weight = 1 # Set Weight for Scenarios
|
|||
enabled = true
|
||||
```
|
||||
|
||||
- With authentication:
|
||||
- With authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
|
|
@ -121,8 +270,8 @@ weight = 1 # Set Weight for Scenarios
|
|||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks as Upstream
|
||||
Requires `use_middle_proxy = false`.
|
||||
#### Using Shadowsocks as an Upstream
|
||||
For this method to work, the `use_middle_proxy = false` parameter must be set.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
|
|
|
|||
249
docs/FAQ.ru.md
249
docs/FAQ.ru.md
|
|
@ -1,128 +1,277 @@
|
|||
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
1. Зайдите в бота @MTProxybot.
|
||||
2. Введите команду `/newproxy`.
|
||||
3. Отправьте IP-адрес и порт сервера. Например: `1.2.3.4:443`.
|
||||
4. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
5. Скопируйте и отправьте боту секрет пользователя из раздела `[access.users]`.
|
||||
6. Скопируйте тег (tag), который выдаст бот. Например: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> Ссылка, которую выдает бот, не будет работать. Не копируйте и не используйте её!
|
||||
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||
> Ссылка, которую выдает бот, работать не будет. Не копируйте и не используйте её!
|
||||
7. Раскомментируйте параметр `ad_tag` и впишите тег, полученный от бота.
|
||||
8. Раскомментируйте или добавьте параметр `use_middle_proxy = true`.
|
||||
|
||||
Пример конфига:
|
||||
Пример конфигурации:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
10. Перезапустить telemt `systemctl restart telemt`.
|
||||
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||
12. Нажать кнопку "Set promotion".
|
||||
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
9. Сохраните изменения (в nano: Ctrl+S -> Ctrl+X).
|
||||
10. Перезапустите службу telemt: `systemctl restart telemt`.
|
||||
11. В боте отправьте команду `/myproxies` и выберите добавленный сервер.
|
||||
12. Нажмите кнопку «Set promotion».
|
||||
13. Отправьте **публичную ссылку** на канал. Приватные каналы добавлять нельзя!
|
||||
14. Подождите примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
> Спонсорский канал не будет у вас отображаться, если вы уже на него подписаны.
|
||||
|
||||
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||
**Вы также можете настроить разные спонсорские каналы для разных пользователей:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
## Распознаваемость для DPI и сканеров
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
1 апреля 2026 года нам стало известно о методе обнаружения MTProxy Fake-TLS, основанном на расширении ECH и порядке набора шифров,
|
||||
а также об общем уникальном отпечатке JA3/JA4, который не встречается в современных браузерах: мы уже отправили первоначальные изменения разработчикам Telegram Desktop и работаем над обновлениями для других клиентов.
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
Вы можете ограничить число IP, использующих прокси.
|
||||
- Мы считаем это прорывом, которому на сегодняшний день нет стабильных аналогов;
|
||||
- Исходя из этого: если `telemt` настроен правильно, **режим TLS полностью идентичен реальному «рукопожатию» + обмену данными** с указанным хостом;
|
||||
- Вот наши доказательства:
|
||||
- 212.220.88.77 — «фиктивный» хост, на котором запущен `telemt`;
|
||||
- `petrovich.ru` — хост с `tls` + `masking`, в HEX: `706574726f766963682e7275`;
|
||||
- **Без MITM + без поддельных сертификатов/шифрования** = чистое прозрачное *TCP Splice* к «лучшему» исходному серверу: MTProxy или tls/mask-host:
|
||||
- DPI видит легитимный HTTPS к `tls_host`, включая *достоверную цепочку доверия* и энтропию;
|
||||
- Краулеры полностью удовлетворены получением ответов от `mask_host`.
|
||||
|
||||
### Клиент С секретным ключом получает доступ к ресурсу MTProxy:
|
||||
|
||||
<img width="360" height="439" alt="telemt" src="https://github.com/user-attachments/assets/39352afb-4a11-4ecc-9d91-9e8cfb20607d" />
|
||||
|
||||
### Клиент БЕЗ секретного ключа получает прозрачный доступ к указанному ресурсу:
|
||||
- с доверенным сертификатом;
|
||||
- с исходным «рукопожатием»;
|
||||
- с полным циклом запрос-ответ;
|
||||
- с низкой задержкой.
|
||||
|
||||
```bash
|
||||
root@debian:~/telemt# curl -v -I --resolve petrovich.ru:443:212.220.88.77 https://petrovich.ru/
|
||||
* Added petrovich.ru:443:212.220.88.77 to DNS cache
|
||||
* Hostname petrovich.ru was found in DNS cache
|
||||
* Trying 212.220.88.77:443...
|
||||
* Connected to petrovich.ru (212.220.88.77) port 443 (#0)
|
||||
* ALPN: offers h2,http/1.1
|
||||
* TLSv1.3 (OUT), TLS handshake, Client hello (1):
|
||||
* CAfile: /etc/ssl/certs/ca-certificates.crt
|
||||
* CApath: /etc/ssl/certs
|
||||
* TLSv1.3 (IN), TLS handshake, Server hello (2):
|
||||
* TLSv1.3 (IN), TLS handshake, Encrypted Extensions (8):
|
||||
* TLSv1.3 (IN), TLS handshake, Certificate (11):
|
||||
* TLSv1.3 (IN), TLS handshake, CERT verify (15):
|
||||
* TLSv1.3 (IN), TLS handshake, Finished (20):
|
||||
* TLSv1.3 (OUT), TLS change cipher, Change cipher spec (1):
|
||||
* TLSv1.3 (OUT), TLS handshake, Finished (20):
|
||||
* SSL connection using TLSv1.3 / TLS_AES_256_GCM_SHA384
|
||||
* ALPN: server did not agree on a protocol. Uses default.
|
||||
* Server certificate:
|
||||
* subject: C=RU; ST=Saint Petersburg; L=Saint Petersburg; O=STD Petrovich; CN=*.petrovich.ru
|
||||
* start date: Jan 28 11:21:01 2025 GMT
|
||||
* expire date: Mar 1 11:21:00 2026 GMT
|
||||
* subjectAltName: host "petrovich.ru" matched cert's "petrovich.ru"
|
||||
* issuer: C=BE; O=GlobalSign nv-sa; CN=GlobalSign RSA OV SSL CA 2018
|
||||
* SSL certificate verify ok.
|
||||
* using HTTP/1.x
|
||||
> HEAD / HTTP/1.1
|
||||
> Host: petrovich.ru
|
||||
> User-Agent: curl/7.88.1
|
||||
> Accept: */*
|
||||
>
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* TLSv1.3 (IN), TLS handshake, Newsession Ticket (4):
|
||||
* old SSL session ID is stale, removing
|
||||
< HTTP/1.1 200 OK
|
||||
HTTP/1.1 200 OK
|
||||
< Server: Variti/0.9.3a
|
||||
Server: Variti/0.9.3a
|
||||
< Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Date: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Access-Control-Allow-Origin: *
|
||||
Access-Control-Allow-Origin: *
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Cache-Control: no-store
|
||||
Cache-Control: no-store
|
||||
< Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
Expires: Thu, 01 Jan 2026 00:0000 GMT
|
||||
< Pragma: no-cache
|
||||
Pragma: no-cache
|
||||
< Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
Set-Cookie: ipp_uid=XXXXX/XXXXX/XXXXX==; Expires=Tue, 31 Dec 2040 23:59:59 GMT; Domain=.petrovich.ru; Path=/
|
||||
< Content-Type: text/html
|
||||
Content-Type: text/html
|
||||
< Content-Length: 31253
|
||||
Content-Length: 31253
|
||||
< Connection: keep-alive
|
||||
Connection: keep-alive
|
||||
< Keep-Alive: timeout=60
|
||||
Keep-Alive: timeout=60
|
||||
|
||||
<
|
||||
* Connection #0 to host petrovich.ru left intact
|
||||
|
||||
```
|
||||
- Мы поставили перед собой задачу, не сдавались и не просто «бились в пустоту»: теперь у нас есть что вам показать.
|
||||
- Не верите нам на слово? — Это прекрасно, и мы уважаем ваше решение: вы можете собрать свой собственный `telemt` или скачать готовую сборку и проверить её прямо сейчас.
|
||||
|
||||
### Звонки в Telegram через MTProxy
|
||||
- Архитектура Telegram **НЕ поддерживает звонки через MTProxy**, а только через SOCKS5, который невозможно замаскировать
|
||||
|
||||
### Как DPI распознает TLS-соединение MTProxy?
|
||||
- DPI распознает MTProxy в режиме Fake TLS (ee) как TLS 1.3
|
||||
- указанный вами SNI отправляется как клиентом, так и сервером;
|
||||
- ALPN аналогичен HTTP 1.1/2;
|
||||
- высокая энтропия, что нормально для трафика, зашифрованного AES;
|
||||
|
||||
### Белый список по IP
|
||||
- MTProxy не может работать, если:
|
||||
- отсутствует IP-связь с целевым хостом: российский белый список в мобильных сетях — «Белый список»;
|
||||
- ИЛИ весь TCP-трафик заблокирован;
|
||||
- ИЛИ трафик с высокой энтропией/зашифрованный трафик заблокирован: контент-фильтры в университетах и критически важной инфраструктуре;
|
||||
- ИЛИ весь TLS-трафик заблокирован;
|
||||
- ИЛИ заблокирован указанный порт: используйте 443, чтобы сделать его «как настоящий»;
|
||||
- ИЛИ заблокирован предоставленный SNI: используйте «официально одобренное»/безобидное имя;
|
||||
- как и большинство протоколов в Интернете;
|
||||
- такие ситуации наблюдаются:
|
||||
- в Китае за Великим файрволом;
|
||||
- в России в мобильных сетях, реже в проводных сетях;
|
||||
- в Иране во время «активности».
|
||||
|
||||
|
||||
## Зачем нужен middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
|
||||
## Что такое dd и ee в контексте MTProxy?
|
||||
|
||||
Это два разных режима работы прокси. Понять, какой режим используется, можно взглянув на начало секрета — там будет dd или ee, вот пример:
|
||||
tg://proxy?server=s1.dimasssss.space&port=443&secret=eebe3007e927acd147dde12bee8b1a7c9364726976652e676f6f676c652e636f6d
|
||||
|
||||
dd — режим с мусорным трафиком, обфускацией данных, похожий на shadowsocks. У такого трафика есть заметный паттерн, который DPI умеют распознавать и впоследствии блокировать. Использовать этот режим на текущий момент не рекомендуется.
|
||||
|
||||
ee — режим маскировки под существующий домен (FakeTLS), словно вы сёрфите в интернете через браузер. На текущий момент не попадает под блокировку.
|
||||
|
||||
### Где эти режимы настраиваются?
|
||||
|
||||
```toml
|
||||
В конфиге telemt.toml в разделе [general.modes]:
|
||||
classic = false # классический режим, давно стал бесполезным
|
||||
secure = false # переменная dd-режима
|
||||
tls = true # переменная ee-режима
|
||||
```
|
||||
|
||||
## Сколько человек может пользоваться одной ссылкой
|
||||
|
||||
По умолчанию одной ссылкой может пользоваться неограниченное число людей.
|
||||
Однако вы можете ограничить количество уникальных IP-адресов для каждого пользователя:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||
Этот параметр задает максимальное количество уникальных IP-адресов, с которых можно одновременно использовать одну ссылку. Если первый пользователь отключится, второй сможет подключиться. При этом с одного IP-адреса могут подключаться несколько пользователей одновременно (например, устройства в одной Wi-Fi сети).
|
||||
|
||||
## Как сделать несколько разных ссылок
|
||||
## Как создать несколько разных ссылок
|
||||
|
||||
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||
3. Добавить новых пользователей.
|
||||
1. Сгенерируйте необходимое количество секретов с помощью команды: `openssl rand -hex 16`.
|
||||
2. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
3. Добавьте новых пользователей в секцию `[access.users]`:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через
|
||||
4. Сохраните конфигурацию (Ctrl+S -> Ctrl+X). Перезапускать службу telemt не нужно.
|
||||
5. Получите готовые ссылки с помощью команды:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## Ошибка "Unknown TLS SNI"
|
||||
Обычно эта ошибка возникает, если вы изменили параметр `tls_domain`, но пользователи продолжают подключаться по старым ссылкам с прежним доменом.
|
||||
|
||||
Если необходимо разрешить подключение с любыми доменами (игнорируя несовпадения SNI), добавьте следующие параметры:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||
2. Добавить следующие параметры
|
||||
1. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
2. Добавьте следующие параметры:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
3. Сохраните изменения (Ctrl+S -> Ctrl+X).
|
||||
4. После этого метрики будут доступны по адресу: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
> Значение `"0.0.0.0/0"` в `metrics_whitelist` открывает доступ к метрикам с любого IP-адреса. Рекомендуется заменить его на ваш личный IP, например: `"1.2.3.4/32"`.
|
||||
|
||||
## Дополнительные параметры
|
||||
|
||||
### Домен в ссылке вместо IP
|
||||
Чтобы указать домен в ссылках, добавьте в секцию `[general.links]` файла config.
|
||||
Чтобы в ссылках для подключения отображался домен вместо IP-адреса, добавьте следующие строки в файл конфигурации:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Общий лимит подключений к серверу
|
||||
Ограничивает общее число открытых подключений к серверу:
|
||||
Этот параметр ограничивает общее количество активных подключений к серверу:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
max_connections = 10000 # 0 - без ограничений, 10000 - по умолчанию
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||
#### Привязка к IP
|
||||
Для настройки исходящих подключений (Upstreams) добавьте соответствующие параметры в секцию `[[upstreams]]` файла конфигурации:
|
||||
|
||||
#### Привязка к исходящему IP-адресу
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
interface = "192.168.1.100" # Замените на ваш исходящий IP
|
||||
```
|
||||
#### SOCKS4/5 как Upstream
|
||||
|
||||
#### Использование SOCKS4/5 в качестве Upstream
|
||||
- Без авторизации:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
type = "socks5" # выбор типа SOCKS4 или SOCKS5
|
||||
address = "1.2.3.4:1234" # адрес сервера SOCKS
|
||||
weight = 1 # вес
|
||||
enabled = true
|
||||
```
|
||||
|
||||
- С авторизацией:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
address = "1.2.3.4:1234" # SOCKS-server Address
|
||||
username = "user" # Username for Auth on SOCKS-server
|
||||
password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
type = "socks5" # выбор типа SOCKS4 или SOCKS5
|
||||
address = "1.2.3.4:1234" # адрес сервера SOCKS
|
||||
username = "user" # имя пользователя
|
||||
password = "pass" # пароль
|
||||
weight = 1 # вес
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks как Upstream
|
||||
Требует `use_middle_proxy = false`.
|
||||
#### Использование Shadowsocks в качестве Upstream
|
||||
Для работы этого метода требуется установить параметр `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
|
|
|
|||
|
|
@ -1,92 +0,0 @@
|
|||
# Öffentliche TELEMT-Lizenz 3
|
||||
|
||||
***Alle Rechte vorbehalten (c) 2026 Telemt***
|
||||
|
||||
Hiermit wird jeder Person, die eine Kopie dieser Software und der dazugehörigen Dokumentation (nachfolgend "Software") erhält, unentgeltlich die Erlaubnis erteilt, die Software ohne Einschränkungen zu nutzen, einschließlich des Rechts, die Software zu verwenden, zu vervielfältigen, zu ändern, abgeleitete Werke zu erstellen, zu verbinden, zu veröffentlichen, zu verbreiten, zu unterlizenzieren und/oder Kopien der Software zu verkaufen sowie diese Rechte auch denjenigen einzuräumen, denen die Software zur Verfügung gestellt wird, vorausgesetzt, dass sämtliche Urheberrechtshinweise sowie die Bedingungen und Bestimmungen dieser Lizenz eingehalten werden.
|
||||
|
||||
### Begriffsbestimmungen
|
||||
|
||||
Für die Zwecke dieser Lizenz gelten die folgenden Definitionen:
|
||||
|
||||
**"Software" (Software)** — die Telemt-Software einschließlich Quellcode, Dokumentation und sämtlicher zugehöriger Dateien, die unter den Bedingungen dieser Lizenz verbreitet werden.
|
||||
|
||||
**"Contributor" (Contributor)** — jede natürliche oder juristische Person, die Code, Patches, Dokumentation oder andere Materialien eingereicht hat, die von den Maintainers des Projekts angenommen und in die Software aufgenommen wurden.
|
||||
|
||||
**"Beitrag" (Contribution)** — jedes urheberrechtlich geschützte Werk, das bewusst zur Aufnahme in die Software eingereicht wurde.
|
||||
|
||||
**"Modifizierte Version" (Modified Version)** — jede Version der Software, die gegenüber der ursprünglichen Software geändert, angepasst, erweitert oder anderweitig modifiziert wurde.
|
||||
|
||||
**"Maintainers" (Maintainers)** — natürliche oder juristische Personen, die für das offizielle Telemt-Projekt und dessen offizielle Veröffentlichungen verantwortlich sind.
|
||||
|
||||
### 1 Urheberrechtshinweis (Attribution)
|
||||
|
||||
Bei der Weitergabe der Software, sowohl in Form des Quellcodes als auch in binärer Form, MÜSSEN folgende Elemente erhalten bleiben:
|
||||
|
||||
- der oben genannte Urheberrechtshinweis;
|
||||
- der vollständige Text dieser Lizenz;
|
||||
- sämtliche bestehenden Hinweise auf Urheberschaft.
|
||||
|
||||
### 2 Hinweis auf Modifikationen
|
||||
|
||||
Wenn Änderungen an der Software vorgenommen werden, MUSS die Person, die diese Änderungen vorgenommen hat, eindeutig darauf hinweisen, dass die Software modifiziert wurde, und eine kurze Beschreibung der vorgenommenen Änderungen beifügen.
|
||||
|
||||
Modifizierte Versionen der Software DÜRFEN NICHT als die originale Version von Telemt dargestellt werden.
|
||||
|
||||
### 3 Marken und Bezeichnungen
|
||||
|
||||
Diese Lizenz GEWÄHRT KEINE Rechte zur Nutzung der Bezeichnung **"Telemt"**, des Telemt-Logos oder sonstiger Marken, Kennzeichen oder Branding-Elemente von Telemt.
|
||||
|
||||
Weiterverbreitete oder modifizierte Versionen der Software DÜRFEN die Bezeichnung Telemt nicht in einer Weise verwenden, die bei Nutzern den Eindruck eines offiziellen Ursprungs oder einer Billigung durch das Telemt-Projekt erwecken könnte, sofern hierfür keine ausdrückliche Genehmigung der Maintainers vorliegt.
|
||||
|
||||
Die Verwendung der Bezeichnung **Telemt** zur Beschreibung einer modifizierten Version der Software ist nur zulässig, wenn diese Version eindeutig als modifiziert oder inoffiziell gekennzeichnet ist.
|
||||
|
||||
Jegliche Verbreitung, die Nutzer vernünftigerweise darüber täuschen könnte, dass es sich um eine offizielle Veröffentlichung von Telemt handelt, ist untersagt.
|
||||
|
||||
### 4 Transparenz bei der Verbreitung von Binärversionen
|
||||
|
||||
Im Falle der Verbreitung kompilierter Binärversionen der Software wird der Verbreiter HIERMIT ERMUTIGT (encouraged), soweit dies vernünftigerweise möglich ist, Zugang zum entsprechenden Quellcode sowie zu den Build-Anweisungen bereitzustellen.
|
||||
|
||||
Diese Praxis trägt zur Transparenz bei und ermöglicht es Empfängern, die Integrität und Reproduzierbarkeit der verbreiteten Builds zu überprüfen.
|
||||
|
||||
## 5 Gewährung einer Patentlizenz und Beendigung von Rechten
|
||||
|
||||
Jeder Contributor gewährt den Empfängern der Software eine unbefristete, weltweite, nicht-exklusive, unentgeltliche, lizenzgebührenfreie und unwiderrufliche Patentlizenz für:
|
||||
|
||||
- die Herstellung,
|
||||
- die Beauftragung der Herstellung,
|
||||
- die Nutzung,
|
||||
- das Anbieten zum Verkauf,
|
||||
- den Verkauf,
|
||||
- den Import,
|
||||
- sowie jede sonstige Verbreitung der Software.
|
||||
|
||||
Diese Patentlizenz erstreckt sich ausschließlich auf solche Patentansprüche, die notwendigerweise durch den jeweiligen Beitrag des Contributors allein oder in Kombination mit der Software verletzt würden.
|
||||
|
||||
Leitet eine Person ein Patentverfahren ein oder beteiligt sich daran, einschließlich Gegenklagen oder Kreuzklagen, mit der Behauptung, dass die Software oder ein darin enthaltener Beitrag ein Patent verletzt, **erlöschen sämtliche durch diese Lizenz gewährten Rechte für diese Person unmittelbar mit Einreichung der Klage**.
|
||||
|
||||
Darüber hinaus erlöschen alle durch diese Lizenz gewährten Rechte **automatisch**, wenn eine Person ein gerichtliches Verfahren einleitet, in dem behauptet wird, dass die Software selbst ein Patent oder andere Rechte des geistigen Eigentums verletzt.
|
||||
|
||||
### 6 Beteiligung und Beiträge zur Entwicklung
|
||||
|
||||
Sofern ein Contributor nicht ausdrücklich etwas anderes erklärt, gilt jeder Beitrag, der bewusst zur Aufnahme in die Software eingereicht wird, als unter den Bedingungen dieser Lizenz lizenziert.
|
||||
|
||||
Durch die Einreichung eines Beitrags gewährt der Contributor den Maintainers des Telemt-Projekts sowie allen Empfängern der Software die in dieser Lizenz beschriebenen Rechte in Bezug auf diesen Beitrag.
|
||||
|
||||
### 7 Urheberhinweis bei Netzwerk- und Servicenutzung
|
||||
|
||||
Wird die Software zur Bereitstellung eines öffentlich zugänglichen Netzwerkdienstes verwendet, MUSS der Betreiber dieses Dienstes einen Hinweis auf die Urheberschaft von Telemt an mindestens einer der folgenden Stellen anbringen:
|
||||
|
||||
* in der Servicedokumentation;
|
||||
* in der Dienstbeschreibung;
|
||||
* auf einer Seite "Über" oder einer vergleichbaren Informationsseite;
|
||||
* in anderen für Nutzer zugänglichen Materialien, die in angemessenem Zusammenhang mit dem Dienst stehen.
|
||||
|
||||
Ein solcher Hinweis DARF NICHT den Eindruck erwecken, dass der Dienst vom Telemt-Projekt oder dessen Maintainers unterstützt oder offiziell gebilligt wird.
|
||||
|
||||
### 8 Haftungsausschluss und salvatorische Klausel
|
||||
|
||||
DIE SOFTWARE WIRD "WIE BESEHEN" BEREITGESTELLT, OHNE JEGLICHE AUSDRÜCKLICHE ODER STILLSCHWEIGENDE GEWÄHRLEISTUNG, EINSCHLIESSLICH, ABER NICHT BESCHRÄNKT AUF GEWÄHRLEISTUNGEN DER MARKTGÄNGIGKEIT, DER EIGNUNG FÜR EINEN BESTIMMTEN ZWECK UND DER NICHTVERLETZUNG VON RECHTEN.
|
||||
|
||||
IN KEINEM FALL HAFTEN DIE AUTOREN ODER RECHTEINHABER FÜR IRGENDWELCHE ANSPRÜCHE, SCHÄDEN ODER SONSTIGE HAFTUNG, DIE AUS VERTRAG, UNERLAUBTER HANDLUNG ODER AUF ANDERE WEISE AUS DER SOFTWARE ODER DER NUTZUNG DER SOFTWARE ENTSTEHEN.
|
||||
|
||||
SOLLTE EINE BESTIMMUNG DIESER LIZENZ ALS UNWIRKSAM ODER NICHT DURCHSETZBAR ANGESEHEN WERDEN, IST DIESE BESTIMMUNG SO AUSZULEGEN, DASS SIE DEM URSPRÜNGLICHEN WILLEN DER PARTEIEN MÖGLICHST NAHEKOMMT; DIE ÜBRIGEN BESTIMMUNGEN BLEIBEN DAVON UNBERÜHRT UND IN VOLLER WIRKUNG.
|
||||
|
|
@ -1,143 +0,0 @@
|
|||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
|
||||
"Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
|
||||
"Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
|
||||
"Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
|
||||
"Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
|
||||
"Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service MUST provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
|
|
@ -1,90 +0,0 @@
|
|||
# Публичная лицензия TELEMT 3
|
||||
|
||||
***Все права защищёны (c) 2026 Telemt***
|
||||
|
||||
Настоящим любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), безвозмездно предоставляется разрешение использовать Программное обеспечение без ограничений, включая право использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и (или) продавать копии Программного обеспечения, а также предоставлять такие права лицам, которым предоставляется Программное обеспечение, при условии соблюдения всех уведомлений об авторских правах, условий и положений настоящей Лицензии.
|
||||
|
||||
### Определения
|
||||
|
||||
Для целей настоящей Лицензии применяются следующие определения:
|
||||
|
||||
**"Программное обеспечение" (Software)** — программное обеспечение Telemt, включая исходный код, документацию и любые связанные файлы, распространяемые на условиях настоящей Лицензии.
|
||||
|
||||
**"Контрибьютор" (Contributor)** — любое физическое или юридическое лицо, направившее код, исправления (патчи), документацию или иные материалы, которые были приняты мейнтейнерами проекта и включены в состав Программного обеспечения.
|
||||
|
||||
**"Вклад" (Contribution)** — любое произведение авторского права, намеренно представленное для включения в состав Программного обеспечения.
|
||||
|
||||
**"Модифицированная версия" (Modified Version)** — любая версия Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с исходным Программным обеспечением.
|
||||
|
||||
**"Мейнтейнеры" (Maintainers)** — физические или юридические лица, ответственные за официальный проект Telemt и его официальные релизы.
|
||||
|
||||
### 1 Указание авторства
|
||||
|
||||
При распространении Программного обеспечения, как в форме исходного кода, так и в бинарной форме, ДОЛЖНЫ СОХРАНЯТЬСЯ:
|
||||
|
||||
- указанное выше уведомление об авторских правах;
|
||||
- текст настоящей Лицензии;
|
||||
- любые существующие уведомления об авторстве.
|
||||
|
||||
### 2 Уведомление о модификации
|
||||
|
||||
В случае внесения изменений в Программное обеспечение лицо, осуществившее такие изменения, ОБЯЗАНО явно указать, что Программное обеспечение было модифицировано, а также включить краткое описание внесённых изменений.
|
||||
|
||||
Модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ представляться как оригинальная версия Telemt.
|
||||
|
||||
### 3 Товарные знаки и обозначения
|
||||
|
||||
Настоящая Лицензия НЕ ПРЕДОСТАВЛЯЕТ права использовать наименование **"Telemt"**, логотип Telemt, а также любые товарные знаки, фирменные обозначения или элементы бренда Telemt.
|
||||
|
||||
Распространяемые или модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ использовать наименование Telemt таким образом, который может создавать у пользователей впечатление официального происхождения либо одобрения со стороны проекта Telemt без явного разрешения мейнтейнеров проекта.
|
||||
|
||||
Использование наименования **Telemt** для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия ясно обозначена как модифицированная или неофициальная.
|
||||
|
||||
Запрещается любое распространение, которое может разумно вводить пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
|
||||
|
||||
### 4 Прозрачность распространения бинарных версий
|
||||
|
||||
В случае распространения скомпилированных бинарных версий Программного обеспечения распространитель НАСТОЯЩИМ ПОБУЖДАЕТСЯ предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
|
||||
|
||||
Такая практика способствует прозрачности распространения и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
|
||||
|
||||
### 5 Предоставление патентной лицензии и прекращение прав
|
||||
|
||||
Каждый контрибьютор предоставляет получателям Программного обеспечения бессрочную, всемирную, неисключительную, безвозмездную, не требующую выплаты роялти и безотзывную патентную лицензию на:
|
||||
|
||||
- изготовление,
|
||||
- поручение изготовления,
|
||||
- использование,
|
||||
- предложение к продаже,
|
||||
- продажу,
|
||||
- импорт,
|
||||
- и иное распространение Программного обеспечения.
|
||||
|
||||
Такая патентная лицензия распространяется исключительно на те патентные требования, которые неизбежно нарушаются соответствующим вкладом контрибьютора как таковым либо его сочетанием с Программным обеспечением.
|
||||
|
||||
Если лицо инициирует либо участвует в каком-либо судебном разбирательстве по патентному спору, включая встречные или перекрёстные иски, утверждая, что Программное обеспечение либо любой вклад, включённый в него, нарушает патент, **все права, предоставленные такому лицу настоящей Лицензией, немедленно прекращаются** с даты подачи соответствующего иска.
|
||||
|
||||
Кроме того, если лицо инициирует судебное разбирательство, утверждая, что само Программное обеспечение нарушает его патентные либо иные права интеллектуальной собственности, все права, предоставленные настоящей Лицензией, **автоматически прекращаются**.
|
||||
|
||||
### 6 Участие и вклад в разработку
|
||||
|
||||
Если контрибьютор явно не указал иное, любой Вклад, намеренно представленный для включения в Программное обеспечение, считается лицензированным на условиях настоящей Лицензии.
|
||||
Путём предоставления Вклада контрибьютор предоставляет мейнтейнером проекта Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
|
||||
|
||||
### 7 Указание авторства при сетевом и сервисном использовании
|
||||
|
||||
В случае использования Программного обеспечения для предоставления публично доступного сетевого сервиса оператор такого сервиса ОБЯЗАН обеспечить указание авторства Telemt как минимум в одном из следующих мест:
|
||||
- документация сервиса;
|
||||
- описание сервиса;
|
||||
- страница "О программе" или аналогичная информационная страница;
|
||||
- иные материалы, доступные пользователям и разумно связанные с данным сервисом.
|
||||
|
||||
Такое указание авторства НЕ ДОЛЖНО создавать впечатление одобрения или официальной поддержки со стороны проекта Telemt либо его мейнтейнеров.
|
||||
|
||||
### 8 Отказ от гарантий и делимость положений
|
||||
|
||||
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ КОММЕРЧЕСКОЙ ПРИГОДНОСТИ, ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ И НЕНАРУШЕНИЯ ПРАВ.
|
||||
|
||||
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩЕЙ В РЕЗУЛЬТАТЕ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ЕГО ИСПОЛЬЗОВАНИЕМ.
|
||||
|
||||
В СЛУЧАЕ ЕСЛИ КАКОЕ-ЛИБО ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, ПРИ ЭТОМ ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ ЮРИДИЧЕСКУЮ СИЛУ.
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
# TELEMT License 3.3
|
||||
|
||||
***Copyright (c) 2026 Telemt***
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this Software and associated documentation files (the "Software"), to use, reproduce, modify, prepare derivative works of, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, provided that all copyright notices, license terms, and conditions set forth in this License are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
Official translations are provided for informational purposes only and for convenience, and do not have legal force. In case of any discrepancy, the English version of this License shall prevail.
|
||||
|
||||
| Language | Location |
|
||||
|-------------|----------|
|
||||
| English | [docs/LICENSE/TELEMT-LICENSE.en.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.en.md)|
|
||||
| German | [docs/LICENSE/TELEMT-LICENSE.de.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.de.md)|
|
||||
| Russian | [docs/LICENSE/TELEMT-LICENSE.ru.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.ru.md)|
|
||||
|
||||
### License Versioning Policy
|
||||
|
||||
This License is version 3.3 of the TELEMT License.
|
||||
Each version of the Software is licensed under the License that accompanies its corresponding source code distribution.
|
||||
|
||||
Future versions of the Software may be distributed under a different version of the TELEMT Public License or under a different license, as determined by the Telemt maintainers.
|
||||
|
||||
Any such change of license applies only to the versions of the Software distributed with the new license and SHALL NOT retroactively affect any previously released versions of the Software.
|
||||
|
||||
Recipients of the Software are granted rights only under the License provided with the version of the Software they received.
|
||||
|
||||
Redistributions of the Software, including Modified Versions, MUST preserve the copyright notices, license text, and conditions of this License for all portions of the Software derived from Telemt.
|
||||
|
||||
Additional terms or licenses may be applied to modifications or additional code added by a redistributor, provided that such terms do not restrict or alter the rights granted under this License for the original Telemt Software.
|
||||
|
||||
Nothing in this section limits the rights granted under this License for versions of the Software already released.
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
|
||||
**"Software"** means the Telemt software, including source code, documentation, and any associated files distributed under this License.
|
||||
|
||||
**"Contributor"** means any person or entity that submits code, patches, documentation, or other contributions to the Software that are accepted into the Software by the maintainers.
|
||||
|
||||
**"Contribution"** means any work of authorship intentionally submitted to the Software for inclusion in the Software.
|
||||
|
||||
**"Modified Version"** means any version of the Software that has been changed, adapted, extended, or otherwise modified from the original Software.
|
||||
|
||||
**"Maintainers"** means the individuals or entities responsible for the official Telemt project and its releases.
|
||||
|
||||
### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN:
|
||||
|
||||
- the above copyright notice;
|
||||
- this license text;
|
||||
- any existing attribution notices.
|
||||
|
||||
### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt", the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt name in a way that suggests endorsement or official origin without explicit permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software is permitted only if the modified version is clearly identified as a modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that the software is an official Telemt release is prohibited.
|
||||
|
||||
### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software, you are ENCOURAGED to provide access to the corresponding source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the integrity and reproducibility of distributed builds.
|
||||
|
||||
### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable patent license to:
|
||||
|
||||
- make,
|
||||
- have made,
|
||||
- use,
|
||||
- offer to sell,
|
||||
- sell,
|
||||
- import,
|
||||
- and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily infringed by the contributor’s contribution alone or by combination of their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including cross-claims or counterclaims, alleging that the Software or any contribution incorporated within the Software constitutes patent infringement, then **all rights granted to you under this license shall terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the Software itself infringes your patent or other intellectual property rights, then all rights granted to you under this license SHALL TERMINATE automatically.
|
||||
|
||||
### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Software shall be licensed under the terms of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all recipients of the Software the rights described in this License with respect to that Contribution.
|
||||
|
||||
### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service, the operator of such service SHOULD provide attribution to Telemt in at least one of the following locations:
|
||||
|
||||
- service documentation;
|
||||
- service description;
|
||||
- an "About" or similar informational page;
|
||||
- other user-visible materials reasonably associated with the service.
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its maintainers.
|
||||
|
||||
### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE, SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS SHALL REMAIN IN FULL FORCE AND EFFECT.
|
||||
|
|
@ -0,0 +1,120 @@
|
|||
# TELEMT Лицензия 3.3
|
||||
|
||||
***Copyright (c) 2026 Telemt***
|
||||
|
||||
Настоящим безвозмездно предоставляется разрешение любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и/или продавать копии Программного обеспечения, а также разрешать лицам, которым предоставляется Программное обеспечение, осуществлять указанные действия при условии соблюдения и сохранения всех уведомлений об авторском праве, условий и положений настоящей Лицензии.
|
||||
|
||||
### Официальные переводы
|
||||
|
||||
Канонической версией настоящей Лицензии является версия на английском языке.
|
||||
Официальные переводы предоставляются исключительно в информационных целях и для удобства и не имеют юридической силы. В случае любых расхождений приоритет имеет английская версия.
|
||||
|
||||
| Язык | Расположение |
|
||||
|------------|--------------|
|
||||
| Русский | [docs/LICENSE/TELEMT-LICENSE.ru.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.ru.md)|
|
||||
| Английский | [docs/LICENSE/TELEMT-LICENSE.en.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.en.md)|
|
||||
| Немецкий | [docs/LICENSE/TELEMT-LICENSE.de.md](https://github.com/telemt/telemt/tree/main/docs/LICENSE/TELEMT-LICENSE.de.md)|
|
||||
|
||||
### Политика версионирования лицензии
|
||||
|
||||
Настоящая Лицензия является версией 3.3 Лицензии TELEMT.
|
||||
Каждая версия Программного обеспечения лицензируется в соответствии с Лицензией, сопровождающей соответствующее распространение исходного кода.
|
||||
|
||||
Будущие версии Программного обеспечения могут распространяться в соответствии с иной версией Лицензии TELEMT Public License либо под иной лицензией, определяемой мейнтейнерами Telemt.
|
||||
|
||||
Любое такое изменение лицензии применяется исключительно к версиям Программного обеспечения, распространяемым с новой лицензией, и НЕ распространяется ретроактивно на ранее выпущенные версии Программного обеспечения.
|
||||
|
||||
Получатели Программного обеспечения приобретают права исключительно в соответствии с Лицензией, предоставленной вместе с полученной ими версией Программного обеспечения.
|
||||
|
||||
При распространении Программного обеспечения, включая Модифицированные версии, ОБЯЗАТЕЛЬНО сохранение уведомлений об авторском праве, текста лицензии и условий настоящей Лицензии в отношении всех частей Программного обеспечения, производных от Telemt.
|
||||
|
||||
Дополнительные условия или лицензии могут применяться к модификациям или дополнительному коду, добавленному распространителем, при условии, что такие условия не ограничивают и не изменяют права, предоставленные настоящей Лицензией в отношении оригинального Программного обеспечения Telemt.
|
||||
|
||||
Ничто в настоящем разделе не ограничивает права, предоставленные настоящей Лицензией в отношении уже выпущенных версий Программного обеспечения.
|
||||
|
||||
### Определения
|
||||
|
||||
Для целей настоящей Лицензии:
|
||||
|
||||
**"Программное обеспечение"** означает программное обеспечение Telemt, включая исходный код, документацию и любые сопутствующие файлы, распространяемые в соответствии с настоящей Лицензией.
|
||||
|
||||
**"Контрибьютор"** означает любое физическое или юридическое лицо, которое предоставляет код, исправления, документацию или иные материалы в качестве вклада в Программное обеспечение, принятые мейнтейнерами для включения в Программное обеспечение.
|
||||
|
||||
**"Вклад"** означает любое произведение, сознательно представленное для включения в Программное обеспечение.
|
||||
|
||||
**"Модифицированная версия"** означает любую версию Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с оригинальным Программным обеспечением.
|
||||
|
||||
**"Мейнтейнеры"** означает физических или юридических лиц, ответственных за официальный проект Telemt и его релизы.
|
||||
|
||||
### 1. Атрибуция
|
||||
|
||||
При распространении Программного обеспечения, как в виде исходного кода, так и в бинарной форме, ОБЯЗАТЕЛЬНО СОХРАНЕНИЕ:
|
||||
|
||||
- указанного выше уведомления об авторском праве;
|
||||
- текста настоящей Лицензии;
|
||||
- всех существующих уведомлений об атрибуции.
|
||||
|
||||
### 2. Уведомление о модификациях
|
||||
|
||||
В случае внесения изменений в Программное обеспечение вы ОБЯЗАНЫ явно указать факт модификации Программного обеспечения и включить краткое описание внесённых изменений.
|
||||
|
||||
Модифицированные версии НЕ ДОЛЖНЫ представляться как оригинальное Программное обеспечение Telemt.
|
||||
|
||||
### 3. Товарные знаки и брендинг
|
||||
|
||||
Настоящая Лицензия НЕ предоставляет право на использование наименования "Telemt", логотипа Telemt или любых товарных знаков и элементов брендинга Telemt.
|
||||
|
||||
Распространяемые или модифицированные версии Программного обеспечения НЕ МОГУТ использовать наименование Telemt таким образом, который может создавать впечатление одобрения или официального происхождения без явного разрешения мейнтейнеров Telemt.
|
||||
|
||||
Использование наименования "Telemt" для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия чётко обозначена как модифицированная или неофициальная.
|
||||
|
||||
Запрещается любое распространение, способное разумно ввести пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
|
||||
|
||||
### 4. Прозрачность распространения бинарных файлов
|
||||
|
||||
В случае распространения скомпилированных бинарных файлов Программного обеспечения рекомендуется (ENCOURAGED) предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
|
||||
|
||||
Это способствует обеспечению прозрачности и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
|
||||
|
||||
### 5. Патентная лицензия и условие защитного прекращения
|
||||
|
||||
Каждый контрибьютор предоставляет вам бессрочную, всемирную, неисключительную, безвозмездную, без лицензионных отчислений, безотзывную патентную лицензию на:
|
||||
|
||||
- изготовление,
|
||||
- поручение изготовления,
|
||||
- использование,
|
||||
- предложение к продаже,
|
||||
- продажу,
|
||||
- импорт,
|
||||
- а также иные формы передачи Программного обеспечения.
|
||||
|
||||
Данная патентная лицензия распространяется исключительно на те патентные притязания, которые неизбежно нарушаются вкладом контрибьютора отдельно либо в сочетании его вклада с Программным обеспечением.
|
||||
|
||||
Если вы инициируете или участвуете в любом патентном судебном разбирательстве, включая встречные иски или требования, утверждая, что Программное обеспечение или любой Вклад, включённый в Программное обеспечение, нарушает патент, то **все предоставленные вам настоящей Лицензией права немедленно прекращаются** с даты подачи такого иска.
|
||||
|
||||
Дополнительно, если вы инициируете судебное разбирательство, утверждая, что само Программное обеспечение нарушает ваш патент или иные права интеллектуальной собственности, все права, предоставленные вам настоящей Лицензией, ПРЕКРАЩАЮТСЯ автоматически.
|
||||
|
||||
### 6. Вклады
|
||||
|
||||
Если вы прямо не указали иное, любой Вклад, сознательно представленный для включения в Программное обеспечение, лицензируется на условиях настоящей Лицензии.
|
||||
|
||||
Предоставляя Вклад, вы предоставляете мейнтейнерам Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
|
||||
|
||||
### 7. Атрибуция при сетевом использовании
|
||||
|
||||
Если Программное обеспечение используется для предоставления общедоступного сетевого сервиса, оператор такого сервиса ДОЛЖЕН (SHOULD) обеспечить указание атрибуции Telemt как минимум в одном из следующих мест:
|
||||
|
||||
- документация сервиса;
|
||||
- описание сервиса;
|
||||
- раздел "О программе" или аналогичная информационная страница;
|
||||
- иные материалы, доступные пользователю и разумно связанные с сервисом.
|
||||
|
||||
Такая атрибуция НЕ ДОЛЖНА подразумевать одобрение со стороны проекта Telemt или его мейнтейнеров.
|
||||
|
||||
### 8. Отказ от гарантий и оговорка о делимости
|
||||
|
||||
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, В ЧАСТНОСТИ, ГАРАНТИИ ТОВАРНОЙ ПРИГОДНОСТИ, СООТВЕТСТВИЯ ОПРЕДЕЛЁННОЙ ЦЕЛИ И ОТСУТСТВИЯ НАРУШЕНИЙ ПРАВ.
|
||||
|
||||
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩИМ В РАМКАХ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, ИЗ, В СВЯЗИ С ИЛИ В РЕЗУЛЬТАТЕ ИСПОЛЬЗОВАНИЯ ПРОГРАММНОГО ОБЕСПЕЧЕНИЯ ИЛИ ИНЫХ ДЕЙСТВИЙ С НИМ.
|
||||
|
||||
ЕСЛИ ЛЮБОЕ ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, А ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ СИЛУ И ДЕЙСТВИЕ.
|
||||
|
|
@ -27,12 +27,12 @@ chmod +x /bin/telemt
|
|||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
The port you have selected for use should not be in the list:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
Generate 16 bytes/32 characters in HEX format with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
|
|
@ -50,7 +50,7 @@ Save the obtained result somewhere. You will need it later!
|
|||
|
||||
**1. Place your config to /etc/telemt/telemt.toml**
|
||||
|
||||
Create config directory:
|
||||
Create the config directory:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
|
@ -59,7 +59,7 @@ Open nano
|
|||
```bash
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
Insert your configuration:
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
|
|
@ -94,7 +94,8 @@ then Ctrl+S -> Ctrl+X to save
|
|||
|
||||
> [!WARNING]
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Replace the value of the tls_domain parameter with another website.
|
||||
> Additionally, change the value of the tls_domain parameter to a different website.
|
||||
> Changing the tls_domain parameter will break all links that use the old domain!
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -105,14 +106,14 @@ useradd -d /opt/telemt -m -r -U telemt
|
|||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Create service on /etc/systemd/system/telemt.service**
|
||||
**3. Create service in /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
paste this Systemd Module
|
||||
Insert this Systemd module:
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
|
|
@ -127,8 +128,8 @@ WorkingDirectory=/opt/telemt
|
|||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
|
|
@ -147,13 +148,16 @@ systemctl daemon-reload
|
|||
|
||||
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**7.** To get the link(s), enter
|
||||
**7.** To get the link(s), enter:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq -r '.data[] | "[\(.username)]", (.links.classic[]? | "classic: \(.)"), (.links.secure[]? | "secure: \(.)"), (.links.tls[]? | "tls: \(.)"), ""'
|
||||
```
|
||||
|
||||
> Any number of people can use one link.
|
||||
|
||||
> [!WARNING]
|
||||
> Only the command from step 7 can provide a working link. Do not try to create it yourself or copy it from anywhere if you are not sure what you are doing!
|
||||
|
||||
---
|
||||
|
||||
# Telemt via Docker Compose
|
||||
|
|
@ -95,6 +95,7 @@ hello = "00000000000000000000000000000000"
|
|||
> [!WARNING]
|
||||
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||
> Так же замените значение параметра tls_domain на другой сайт.
|
||||
> Изменение параметра tls_domain сделает нерабочими все ссылки, использующие старый домен!
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -127,8 +128,8 @@ WorkingDirectory=/opt/telemt
|
|||
ExecStart=/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
AmbientCapabilities=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_ADMIN CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
[Install]
|
||||
|
|
@ -149,7 +150,7 @@ systemctl daemon-reload
|
|||
|
||||
**7.** Для получения ссылки/ссылок введите
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq -r '.data[] | "[\(.username)]", (.links.classic[]? | "classic: \(.)"), (.links.secure[]? | "secure: \(.)"), (.links.tls[]? | "tls: \(.)"), ""'
|
||||
```
|
||||
> Одной ссылкой может пользоваться сколько угодно человек.
|
||||
|
||||
|
|
@ -0,0 +1,287 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Concept
|
||||
- **Server A** (__conditionally Russian Federation_):\
|
||||
Entry point, receives Telegram proxy user traffic via **HAProxy** (port `443`)\
|
||||
and sends it to the tunnel to Server **B**.\
|
||||
Internal IP in the tunnel — `10.10.10.2`\
|
||||
Port for HAProxy clients — `443\tcp`
|
||||
- **Server B** (_conditionally Netherlands_):\
|
||||
Exit point, runs **telemt** and accepts client connections through Server **A**.\
|
||||
The server must have unrestricted access to Telegram servers.\
|
||||
Internal IP in the tunnel — `10.10.10.1`\
|
||||
AmneziaWG port — `8443\udp`\
|
||||
Port for telemt clients — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Step 1. Setting up the AmneziaWG tunnel (A <-> B)
|
||||
[AmneziaWG](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module) must be installed on all servers.\
|
||||
All following commands are given for **Ubuntu 24.04**.\
|
||||
For RHEL-based distributions, installation instructions are available at the link above.
|
||||
|
||||
### Installing AmneziaWG (Servers A and B)
|
||||
The following steps must be performed on each server:
|
||||
|
||||
#### 1. Adding the AmneziaWG repository and installing required packages:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Generating a unique key pair:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
|
||||
As a result, you will get two files in the `/etc/amnezia/amneziawg` folder:\
|
||||
`private.key` - private, and\
|
||||
`public.key` - public server keys
|
||||
|
||||
#### 3. Configuring network interfaces:
|
||||
Obfuscation parameters `S1`, `S2`, `H1`, `H2`, `H3`, `H4` must be strictly identical on both servers.\
|
||||
Parameters `Jc`, `Jmin` and `Jmax` can differ.\
|
||||
Parameters `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) must be specified on the client side (Server **A**).
|
||||
|
||||
Recommendations for choosing values:
|
||||
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; from 4 to 12 inclusive
|
||||
Jmin — Jmax > Jmin < 1280*; recommended 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; recommended 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
recommended range from 15 to 150 inclusive
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
recommended range from 15 to 150 inclusive
|
||||
H1/H2/H3/H4 — must be unique and differ from each other;
|
||||
recommended range from 5 to 2147483647 inclusive
|
||||
|
||||
* It is assumed that the Internet connection has an MTU of 1280.
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> It is recommended to use your own, unique values.\
|
||||
> You can use the [generator](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html) to select parameters.
|
||||
|
||||
#### Server B Configuration (Netherlands):
|
||||
|
||||
Create the interface configuration file (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
`ListenPort` - the port on which the server will wait for connections, you can choose any free one.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - the content of the `private.key` file from Server **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - the content of the `public.key` file from Server **A**.
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - the external IP address of Server **A**.
|
||||
|
||||
#### Server A Configuration (Russian Federation):
|
||||
Create the interface configuration file (awg0)
|
||||
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - the content of the `private.key` file from Server **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - the content of the `public.key` file from Server **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - the public IP address of Server **B**.
|
||||
|
||||
Enable the tunnel on both servers:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Make sure Server B is accessible from Server A through the tunnel.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
```
|
||||
---
|
||||
|
||||
## Step 2. Installing telemt on Server B (conditionally Netherlands)
|
||||
Installation and configuration are described [here](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) or [here](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
It is assumed that telemt expects connections on port `443\tcp`.
|
||||
|
||||
In the telemt config, you must enable the `Proxy` protocol and restrict connections to it only through the tunnel.
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
Also, for correct link generation, specify the FQDN or IP address and port of Server `A`
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3. Configuring HAProxy on Server A (Russian Federation)
|
||||
Since the version in the standard Ubuntu repository is relatively old, it makes sense to use the official Docker image.\
|
||||
[Instructions](https://docs.docker.com/engine/install/ubuntu/) for installing Docker on Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> By default, regular users do not have rights to use ports < 1024.
|
||||
> Attempts to run HAProxy on port 443 can lead to errors:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> There are two simple ways to bypass this restriction, choose one:
|
||||
> 1. At the OS level, change the net.ipv4.ip_unprivileged_port_start setting to allow users to use all ports:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> or
|
||||
>
|
||||
> 2. Run HAProxy as root:
|
||||
> Uncomment the `user: "root"` parameter in docker-compose.yaml.
|
||||
|
||||
#### Create a folder for HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
|
||||
#### Create the docker-compose.yaml file
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
File content
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
|
||||
#### Create the haproxy.cfg config file
|
||||
Accept connections on port 443\tcp and send them through the tunnel to Server `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
File content
|
||||
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
> [!WARNING]
|
||||
> **The file must end with an empty line, otherwise HAProxy will not start!**
|
||||
|
||||
#### Allow port 443\tcp in the firewall (if enabled)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Start the HAProxy container
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
If everything is configured correctly, you can now try connecting Telegram clients using links from the telemt log\api.
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Концепция
|
||||
- **Сервер A** (_РФ_):\
|
||||
Точка входа, принимает трафик пользователей Telegram-прокси через **HAProxy** (порт `443`)\
|
||||
и отправляет в туннель на Сервер **B**.\
|
||||
Внутренний IP в туннеле — `10.10.10.2`\
|
||||
Порт для клиентов HAProxy — `443\tcp`
|
||||
- **Сервер B** (_условно Нидерланды_):\
|
||||
Точка выхода, на нем работает **telemt** и принимает подключения клиентов через Сервер **A**.\
|
||||
На сервере должен быть неограниченный доступ до серверов Telegram.\
|
||||
Внутренний IP в туннеле — `10.10.10.1`\
|
||||
Порт AmneziaWG — `8443\udp`\
|
||||
Порт для клиентов telemt — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Шаг 1. Настройка туннеля AmneziaWG (A <-> B)
|
||||
|
||||
На всех серверах необходимо установить [amneziawg](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module).\
|
||||
Далее все команды даны для **Ununtu 24.04**.\
|
||||
Для RHEL-based дистрибутивов инструкция по установке есть по ссылке выше.
|
||||
|
||||
### Установка AmneziaWG (Сервера A и B)
|
||||
На каждом из серверов необходимо выполнить следующие шаги:
|
||||
|
||||
#### 1. Добавление репозитория AmneziaWG и установка необходимых пакетов:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Генерация уникальной пары ключей:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
В результате вы получите в папке `/etc/amnezia/amneziawg` два файла:\
|
||||
`private.key` - приватный и\
|
||||
`public.key` - публичный ключи сервера
|
||||
|
||||
#### 3. Настройка сетевых интерфейсов:
|
||||
|
||||
Параметры обфускации `S1`, `S2`, `H1`, `H2`, `H3`, `H4` должны быть строго идентичными на обоих серверах.\
|
||||
Параметры `Jc`, `Jmin` и `Jmax` могут отличатся.\
|
||||
Параметры `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) нужно указывать на стороне _клиента_ (Сервер **А**).
|
||||
|
||||
Рекомендации по выбору значений:
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; от 4 до 12 включительно
|
||||
Jmin — Jmax > Jmin < 1280*; рекомендовано 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; рекомендовано 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
H1/H2/H3/H4 — должны быть уникальны и отличаться друг от друга;
|
||||
рекомендованный диапазон от 5 до 2147483647 включительно
|
||||
|
||||
* Предполагается, что подключение к Интернету имеет MTU 1280.
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Рекомендуется использовать собственные, уникальные значения.\
|
||||
> Для выбора параметров можете воспользоваться [генератором](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html).
|
||||
|
||||
#### Конфигурация Сервера B (_Нидерланды_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
|
||||
`ListenPort` - порт, на котором сервер будет ждать подключения, можете выбрать любой свободный.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - содержимое файла `private.key` с сервера **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - содержимое файла `public.key` с сервера **A**.
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - внешний IP адрес Сервера **A**.
|
||||
|
||||
#### Конфигурация Сервера A (_РФ_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - содержимое файла `private.key` с сервера **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - содержимое файла `public.key` с сервера **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - публичный IP адресс сервера **B**.
|
||||
|
||||
#### Включаем туннель на обоих серверах:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Убедитесь, что с Сервера `A` доступен Сервер `B` через туннель.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Шаг 2. Установка telemt на Сервере B (_условно Нидерланды_)
|
||||
|
||||
Установка и настройка описаны [здесь](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) или [здесь](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
Подразумевается что telemt ожидает подключения на порту `443\tcp`.
|
||||
|
||||
В конфиге telemt необходимо включить протокол `Proxy` и ограничить подключения к нему только через туннель.
|
||||
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
А также, для правильной генерации ссылок, указать FQDN или IP адрес и порт Сервера `A`
|
||||
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Шаг 3. Настройка HAProxy на Сервере A (_РФ_)
|
||||
|
||||
Т.к. в стандартном репозитории Ubuntu версия относительно старая, имеет смысл воспользоваться официальным образом Docker.\
|
||||
[Инструкция](https://docs.docker.com/engine/install/ubuntu/) по установке Docker на Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> По умолчанию у обычных пользователей нет прав на использование портов < 1024.\
|
||||
> Попытки запустить HAProxy на 443 порту могут приводить к ошибкам:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> Есть два простых способа обойти это ограничение, выберите что-то одно:
|
||||
> 1. На уровне ОС изменить настройку net.ipv4.ip_unprivileged_port_start, разрешив пользователям использовать все порты:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> или
|
||||
>
|
||||
> 2. Запустить HAProxy под root:\
|
||||
> Раскомментируйте в docker-compose.yaml параметр `user: "root"`.
|
||||
|
||||
#### Создаем папку для HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
#### Создаем файл docker-compose.yaml
|
||||
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
Содержимое файла
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
#### Создаем файл конфига haproxy.cfg
|
||||
Принимаем подключения на порту 443\tcp и отправляем их через туннель на Сервер `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
Содержимое файла
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
>[!WARNING]
|
||||
>**Файл должен заканчиваться пустой строкой, иначе HAProxy не запустится!**
|
||||
|
||||
#### Разрешаем порт 443\tcp в фаерволе (если включен)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Запускаем контейнер HAProxy
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Если все настроено верно, то теперь можно пробовать подключить клиентов Telegram с использованием ссылок из лога\api telemt.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 16 KiB |
Binary file not shown.
|
After Width: | Height: | Size: 161 KiB |
269
install.sh
269
install.sh
|
|
@ -8,18 +8,62 @@ CONFIG_DIR="${CONFIG_DIR:-/etc/telemt}"
|
|||
CONFIG_FILE="${CONFIG_FILE:-${CONFIG_DIR}/telemt.toml}"
|
||||
WORK_DIR="${WORK_DIR:-/opt/telemt}"
|
||||
TLS_DOMAIN="${TLS_DOMAIN:-petrovich.ru}"
|
||||
SERVER_PORT="${SERVER_PORT:-443}"
|
||||
USER_SECRET=""
|
||||
AD_TAG=""
|
||||
SERVICE_NAME="telemt"
|
||||
TEMP_DIR=""
|
||||
SUDO=""
|
||||
CONFIG_PARENT_DIR=""
|
||||
SERVICE_START_FAILED=0
|
||||
|
||||
PORT_PROVIDED=0
|
||||
SECRET_PROVIDED=0
|
||||
AD_TAG_PROVIDED=0
|
||||
DOMAIN_PROVIDED=0
|
||||
|
||||
ACTION="install"
|
||||
TARGET_VERSION="${VERSION:-latest}"
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) ACTION="help"; shift ;;
|
||||
-d|--domain)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a domain argument.\n' "$1" >&2
|
||||
exit 1
|
||||
fi
|
||||
TLS_DOMAIN="$2"; DOMAIN_PROVIDED=1; shift 2 ;;
|
||||
-p|--port)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a port argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
case "$2" in
|
||||
*[!0-9]*) printf '[ERROR] Port must be a valid number.\n' >&2; exit 1 ;;
|
||||
esac
|
||||
port_num="$(printf '%s\n' "$2" | sed 's/^0*//')"
|
||||
[ -z "$port_num" ] && port_num="0"
|
||||
if [ "${#port_num}" -gt 5 ] || [ "$port_num" -lt 1 ] || [ "$port_num" -gt 65535 ]; then
|
||||
printf '[ERROR] Port must be between 1 and 65535.\n' >&2; exit 1
|
||||
fi
|
||||
SERVER_PORT="$port_num"; PORT_PROVIDED=1; shift 2 ;;
|
||||
-s|--secret)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a secret argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
case "$2" in
|
||||
*[!0-9a-fA-F]*)
|
||||
printf '[ERROR] Secret must contain only hex characters.\n' >&2; exit 1 ;;
|
||||
esac
|
||||
if [ "${#2}" -ne 32 ]; then
|
||||
printf '[ERROR] Secret must be exactly 32 chars.\n' >&2; exit 1
|
||||
fi
|
||||
USER_SECRET="$2"; SECRET_PROVIDED=1; shift 2 ;;
|
||||
-a|--ad-tag|--ad_tag)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires an ad_tag argument.\n' "$1" >&2; exit 1
|
||||
fi
|
||||
AD_TAG="$2"; AD_TAG_PROVIDED=1; shift 2 ;;
|
||||
uninstall|--uninstall)
|
||||
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
|
||||
shift ;;
|
||||
|
|
@ -52,11 +96,17 @@ cleanup() {
|
|||
trap cleanup EXIT INT TERM
|
||||
|
||||
show_help() {
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge | --help ]"
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge ] [ options ]"
|
||||
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
|
||||
say " install Install the latest version"
|
||||
say " uninstall Remove the binary and service (keeps config and user)"
|
||||
say " uninstall Remove the binary and service"
|
||||
say " purge Remove everything including configuration, data, and user"
|
||||
say ""
|
||||
say "Options:"
|
||||
say " -d, --domain Set TLS domain (default: petrovich.ru)"
|
||||
say " -p, --port Set server port (default: 443)"
|
||||
say " -s, --secret Set specific user secret (32 hex characters)"
|
||||
say " -a, --ad-tag Set ad_tag"
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
|
@ -112,6 +162,14 @@ get_svc_mgr() {
|
|||
else echo "none"; fi
|
||||
}
|
||||
|
||||
is_config_exists() {
|
||||
if [ -n "$SUDO" ]; then
|
||||
$SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"
|
||||
else
|
||||
[ -f "$CONFIG_FILE" ]
|
||||
fi
|
||||
}
|
||||
|
||||
verify_common() {
|
||||
[ -n "$BIN_NAME" ] || die "BIN_NAME cannot be empty."
|
||||
[ -n "$INSTALL_DIR" ] || die "INSTALL_DIR cannot be empty."
|
||||
|
|
@ -119,7 +177,7 @@ verify_common() {
|
|||
[ -n "$CONFIG_FILE" ] || die "CONFIG_FILE cannot be empty."
|
||||
|
||||
case "${INSTALL_DIR}${CONFIG_DIR}${WORK_DIR}${CONFIG_FILE}" in
|
||||
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths. Only alphanumeric, _, ., -, and / allowed." ;;
|
||||
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths." ;;
|
||||
esac
|
||||
|
||||
case "$TARGET_VERSION" in *[!a-zA-Z0-9_.-]*) die "Invalid characters in version." ;; esac
|
||||
|
|
@ -137,11 +195,11 @@ verify_common() {
|
|||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=""
|
||||
else
|
||||
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo. Neither found."
|
||||
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo."
|
||||
SUDO="sudo"
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
if ! [ -t 0 ]; then
|
||||
die "sudo requires a password, but no TTY detected. Aborting to prevent hang."
|
||||
die "sudo requires a password, but no TTY detected."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
|
@ -154,21 +212,7 @@ verify_common() {
|
|||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
|
||||
for path in "$CONFIG_DIR" "$CONFIG_PARENT_DIR" "$WORK_DIR"; do
|
||||
check_path="$(get_realpath "$path")"
|
||||
case "$check_path" in
|
||||
/|/bin|/sbin|/usr|/usr/bin|/usr/sbin|/usr/local|/usr/local/bin|/usr/local/sbin|/usr/local/etc|/usr/local/share|/etc|/var|/var/lib|/var/log|/var/run|/home|/root|/tmp|/lib|/lib64|/opt|/run|/boot|/dev|/sys|/proc)
|
||||
die "Safety check failed: '$path' (resolved to '$check_path') is a critical system directory." ;;
|
||||
esac
|
||||
done
|
||||
|
||||
check_install_dir="$(get_realpath "$INSTALL_DIR")"
|
||||
case "$check_install_dir" in
|
||||
/|/etc|/var|/home|/root|/tmp|/usr|/usr/local|/opt|/boot|/dev|/sys|/proc|/run)
|
||||
die "Safety check failed: INSTALL_DIR '$INSTALL_DIR' is a critical system directory." ;;
|
||||
esac
|
||||
|
||||
for cmd in id uname grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip rmdir; do
|
||||
for cmd in id uname awk grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip; do
|
||||
command -v "$cmd" >/dev/null 2>&1 || die "Required command not found: $cmd"
|
||||
done
|
||||
}
|
||||
|
|
@ -177,14 +221,41 @@ verify_install_deps() {
|
|||
command -v curl >/dev/null 2>&1 || command -v wget >/dev/null 2>&1 || die "Neither curl nor wget is installed."
|
||||
command -v cp >/dev/null 2>&1 || command -v install >/dev/null 2>&1 || die "Need cp or install"
|
||||
|
||||
if ! command -v setcap >/dev/null 2>&1; then
|
||||
if ! command -v setcap >/dev/null 2>&1 || ! command -v conntrack >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
$SUDO apk add --no-cache libcap-utils >/dev/null 2>&1 || $SUDO apk add --no-cache libcap >/dev/null 2>&1 || true
|
||||
$SUDO apk add --no-cache libcap-utils libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO apt-get update -q >/dev/null 2>&1 || true
|
||||
$SUDO apt-get install -y -q libcap2-bin >/dev/null 2>&1 || true
|
||||
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap >/dev/null 2>&1 || true
|
||||
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap >/dev/null 2>&1 || true
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install -y -q libcap2-bin conntrack >/dev/null 2>&1 || {
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get update -q >/dev/null 2>&1 || true
|
||||
$SUDO env DEBIAN_FRONTEND=noninteractive apt-get install -y -q libcap2-bin conntrack >/dev/null 2>&1 || true
|
||||
}
|
||||
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap conntrack-tools >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
check_port_availability() {
|
||||
port_info=""
|
||||
|
||||
if command -v ss >/dev/null 2>&1; then
|
||||
port_info=$($SUDO ss -tulnp 2>/dev/null | grep -E ":${SERVER_PORT}([[:space:]]|$)" || true)
|
||||
elif command -v netstat >/dev/null 2>&1; then
|
||||
port_info=$($SUDO netstat -tulnp 2>/dev/null | grep -E ":${SERVER_PORT}([[:space:]]|$)" || true)
|
||||
elif command -v lsof >/dev/null 2>&1; then
|
||||
port_info=$($SUDO lsof -i :${SERVER_PORT} 2>/dev/null | grep LISTEN || true)
|
||||
else
|
||||
say "[WARNING] Network diagnostic tools (ss, netstat, lsof) not found. Skipping port check."
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ -n "$port_info" ]; then
|
||||
if printf '%s\n' "$port_info" | grep -q "${BIN_NAME}"; then
|
||||
say " -> Port ${SERVER_PORT} is in use by ${BIN_NAME}. Ignoring as it will be restarted."
|
||||
else
|
||||
say "[ERROR] Port ${SERVER_PORT} is already in use by another process:"
|
||||
printf ' %s\n' "$port_info"
|
||||
die "Please free the port ${SERVER_PORT} or change it and try again."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
|
@ -192,7 +263,13 @@ verify_install_deps() {
|
|||
detect_arch() {
|
||||
sys_arch="$(uname -m)"
|
||||
case "$sys_arch" in
|
||||
x86_64|amd64) echo "x86_64" ;;
|
||||
x86_64|amd64)
|
||||
if [ -r /proc/cpuinfo ] && grep -q "avx2" /proc/cpuinfo 2>/dev/null && grep -q "bmi2" /proc/cpuinfo 2>/dev/null; then
|
||||
echo "x86_64-v3"
|
||||
else
|
||||
echo "x86_64"
|
||||
fi
|
||||
;;
|
||||
aarch64|arm64) echo "aarch64" ;;
|
||||
*) die "Unsupported architecture: $sys_arch" ;;
|
||||
esac
|
||||
|
|
@ -238,7 +315,7 @@ setup_dirs() {
|
|||
$SUDO mkdir -p "$WORK_DIR" "$CONFIG_DIR" "$CONFIG_PARENT_DIR" || die "Failed to create directories"
|
||||
|
||||
$SUDO chown telemt:telemt "$WORK_DIR" && $SUDO chmod 750 "$WORK_DIR"
|
||||
$SUDO chown root:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
|
||||
$SUDO chown telemt:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
|
||||
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO chown root:telemt "$CONFIG_PARENT_DIR" && $SUDO chmod 750 "$CONFIG_PARENT_DIR"
|
||||
|
|
@ -261,17 +338,19 @@ install_binary() {
|
|||
fi
|
||||
|
||||
$SUDO mkdir -p "$INSTALL_DIR" || die "Failed to create install directory"
|
||||
|
||||
$SUDO rm -f "$bin_dst" 2>/dev/null || true
|
||||
|
||||
if command -v install >/dev/null 2>&1; then
|
||||
$SUDO install -m 0755 "$bin_src" "$bin_dst" || die "Failed to install binary"
|
||||
else
|
||||
$SUDO rm -f "$bin_dst" 2>/dev/null || true
|
||||
$SUDO cp "$bin_src" "$bin_dst" && $SUDO chmod 0755 "$bin_dst" || die "Failed to copy binary"
|
||||
fi
|
||||
|
||||
$SUDO sh -c '[ -x "$1" ]' _ "$bin_dst" || die "Binary not executable: $bin_dst"
|
||||
|
||||
if command -v setcap >/dev/null 2>&1; then
|
||||
$SUDO setcap cap_net_bind_service=+ep "$bin_dst" 2>/dev/null || true
|
||||
$SUDO setcap cap_net_bind_service,cap_net_admin=+ep "$bin_dst" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
|
|
@ -287,11 +366,20 @@ generate_secret() {
|
|||
}
|
||||
|
||||
generate_config_content() {
|
||||
conf_secret="$1"
|
||||
conf_tag="$2"
|
||||
escaped_tls_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
cat <<EOF
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
use_middle_proxy = true
|
||||
EOF
|
||||
|
||||
if [ -n "$conf_tag" ]; then
|
||||
echo "ad_tag = \"${conf_tag}\""
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
|
||||
[general.modes]
|
||||
classic = false
|
||||
|
|
@ -299,7 +387,7 @@ secure = false
|
|||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
port = ${SERVER_PORT}
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
|
|
@ -310,28 +398,73 @@ whitelist = ["127.0.0.1/32"]
|
|||
tls_domain = "${escaped_tls_domain}"
|
||||
|
||||
[access.users]
|
||||
hello = "$1"
|
||||
hello = "${conf_secret}"
|
||||
EOF
|
||||
}
|
||||
|
||||
install_config() {
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
return 0
|
||||
fi
|
||||
elif [ -f "$CONFIG_FILE" ]; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
if is_config_exists; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Updating parameters..."
|
||||
|
||||
tmp_conf="${TEMP_DIR}/config.tmp"
|
||||
$SUDO cat "$CONFIG_FILE" > "$tmp_conf"
|
||||
|
||||
escaped_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
export AWK_PORT="$SERVER_PORT"
|
||||
export AWK_SECRET="$USER_SECRET"
|
||||
export AWK_DOMAIN="$escaped_domain"
|
||||
export AWK_AD_TAG="$AD_TAG"
|
||||
export AWK_FLAG_P="$PORT_PROVIDED"
|
||||
export AWK_FLAG_S="$SECRET_PROVIDED"
|
||||
export AWK_FLAG_D="$DOMAIN_PROVIDED"
|
||||
export AWK_FLAG_A="$AD_TAG_PROVIDED"
|
||||
|
||||
awk '
|
||||
BEGIN { ad_tag_handled = 0 }
|
||||
|
||||
ENVIRON["AWK_FLAG_P"] == "1" && /^[ \t]*port[ \t]*=/ { print "port = " ENVIRON["AWK_PORT"]; next }
|
||||
ENVIRON["AWK_FLAG_S"] == "1" && /^[ \t]*hello[ \t]*=/ { print "hello = \"" ENVIRON["AWK_SECRET"] "\""; next }
|
||||
ENVIRON["AWK_FLAG_D"] == "1" && /^[ \t]*tls_domain[ \t]*=/ { print "tls_domain = \"" ENVIRON["AWK_DOMAIN"] "\""; next }
|
||||
|
||||
ENVIRON["AWK_FLAG_A"] == "1" && /^[ \t]*ad_tag[ \t]*=/ {
|
||||
if (!ad_tag_handled) {
|
||||
print "ad_tag = \"" ENVIRON["AWK_AD_TAG"] "\"";
|
||||
ad_tag_handled = 1;
|
||||
}
|
||||
next
|
||||
}
|
||||
ENVIRON["AWK_FLAG_A"] == "1" && /^\[general\]/ {
|
||||
print;
|
||||
if (!ad_tag_handled) {
|
||||
print "ad_tag = \"" ENVIRON["AWK_AD_TAG"] "\"";
|
||||
ad_tag_handled = 1;
|
||||
}
|
||||
next
|
||||
}
|
||||
|
||||
{ print }
|
||||
' "$tmp_conf" > "${tmp_conf}.new" && mv "${tmp_conf}.new" "$tmp_conf"
|
||||
|
||||
[ "$PORT_PROVIDED" -eq 1 ] && say " -> Updated port: $SERVER_PORT"
|
||||
[ "$SECRET_PROVIDED" -eq 1 ] && say " -> Updated secret for user 'hello'"
|
||||
[ "$DOMAIN_PROVIDED" -eq 1 ] && say " -> Updated tls_domain: $TLS_DOMAIN"
|
||||
[ "$AD_TAG_PROVIDED" -eq 1 ] && say " -> Updated ad_tag"
|
||||
|
||||
write_root "$CONFIG_FILE" < "$tmp_conf"
|
||||
rm -f "$tmp_conf"
|
||||
return 0
|
||||
fi
|
||||
|
||||
toml_secret="$(generate_secret)" || die "Failed to generate secret."
|
||||
if [ -z "$USER_SECRET" ]; then
|
||||
USER_SECRET="$(generate_secret)" || die "Failed to generate secret."
|
||||
fi
|
||||
|
||||
generate_config_content "$toml_secret" | write_root "$CONFIG_FILE" || die "Failed to install config"
|
||||
generate_config_content "$USER_SECRET" "$AD_TAG" | write_root "$CONFIG_FILE" || die "Failed to install config"
|
||||
$SUDO chown root:telemt "$CONFIG_FILE" && $SUDO chmod 640 "$CONFIG_FILE"
|
||||
|
||||
say " -> Config created successfully."
|
||||
say " -> Generated secret for default user 'hello': $toml_secret"
|
||||
say " -> Configured secret for user 'hello': $USER_SECRET"
|
||||
}
|
||||
|
||||
generate_systemd_content() {
|
||||
|
|
@ -348,9 +481,10 @@ Group=telemt
|
|||
WorkingDirectory=$WORK_DIR
|
||||
ExecStart="${INSTALL_DIR}/${BIN_NAME}" "${CONFIG_FILE}"
|
||||
Restart=on-failure
|
||||
RestartSec=5
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE CAP_NET_ADMIN
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE CAP_NET_ADMIN
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
|
@ -415,7 +549,8 @@ kill_user_procs() {
|
|||
if command -v pgrep >/dev/null 2>&1; then
|
||||
pids="$(pgrep -u telemt 2>/dev/null || true)"
|
||||
else
|
||||
pids="$(ps -u telemt -o pid= 2>/dev/null || true)"
|
||||
pids="$(ps -ef 2>/dev/null | awk '$1=="telemt"{print $2}' || true)"
|
||||
[ -z "$pids" ] && pids="$(ps 2>/dev/null | awk '$2=="telemt"{print $1}' || true)"
|
||||
fi
|
||||
|
||||
if [ -n "$pids" ]; then
|
||||
|
|
@ -457,11 +592,12 @@ uninstall() {
|
|||
say ">>> Stage 5: Purging configuration, data, and user"
|
||||
$SUDO rm -rf "$CONFIG_DIR" "$WORK_DIR"
|
||||
$SUDO rm -f "$CONFIG_FILE"
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO rmdir "$CONFIG_PARENT_DIR" 2>/dev/null || true
|
||||
fi
|
||||
sleep 1
|
||||
$SUDO userdel telemt 2>/dev/null || $SUDO deluser telemt 2>/dev/null || true
|
||||
|
||||
if check_os_entity group telemt; then
|
||||
$SUDO groupdel telemt 2>/dev/null || $SUDO delgroup telemt 2>/dev/null || true
|
||||
fi
|
||||
else
|
||||
say "Note: Configuration and user kept. Run with 'purge' to remove completely."
|
||||
fi
|
||||
|
|
@ -479,7 +615,17 @@ case "$ACTION" in
|
|||
say "Starting installation of $BIN_NAME (Version: $TARGET_VERSION)"
|
||||
|
||||
say ">>> Stage 1: Verifying environment and dependencies"
|
||||
verify_common; verify_install_deps
|
||||
verify_common
|
||||
verify_install_deps
|
||||
|
||||
if is_config_exists && [ "$PORT_PROVIDED" -eq 0 ]; then
|
||||
ext_port="$($SUDO awk -F'=' '/^[ \t]*port[ \t]*=/ {gsub(/[^0-9]/, "", $2); print $2; exit}' "$CONFIG_FILE" 2>/dev/null || true)"
|
||||
if [ -n "$ext_port" ]; then
|
||||
SERVER_PORT="$ext_port"
|
||||
fi
|
||||
fi
|
||||
|
||||
check_port_availability
|
||||
|
||||
if [ "$TARGET_VERSION" != "latest" ]; then
|
||||
TARGET_VERSION="${TARGET_VERSION#v}"
|
||||
|
|
@ -500,7 +646,21 @@ case "$ACTION" in
|
|||
die "Temp directory is invalid or was not created"
|
||||
fi
|
||||
|
||||
if ! fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}"; then
|
||||
if [ "$ARCH" = "x86_64-v3" ]; then
|
||||
say " -> x86_64-v3 build not found, falling back to standard x86_64..."
|
||||
ARCH="x86_64"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
else
|
||||
die "Download failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Extracting archive"
|
||||
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
|
||||
|
|
@ -516,7 +676,7 @@ case "$ACTION" in
|
|||
say ">>> Stage 5: Installing binary"
|
||||
install_binary "$EXTRACTED_BIN" "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
say ">>> Stage 6: Generating configuration"
|
||||
say ">>> Stage 6: Generating/Updating configuration"
|
||||
install_config
|
||||
|
||||
say ">>> Stage 7: Installing and starting service"
|
||||
|
|
@ -543,11 +703,14 @@ case "$ACTION" in
|
|||
printf ' rc-service %s status\n\n' "$SERVICE_NAME"
|
||||
fi
|
||||
|
||||
API_LISTEN="$($SUDO awk -F'"' '/^[ \t]*listen[ \t]*=/ {print $2; exit}' "$CONFIG_FILE" 2>/dev/null || true)"
|
||||
API_LISTEN="${API_LISTEN:-127.0.0.1:9091}"
|
||||
|
||||
printf 'To get your user connection links (for Telegram), run:\n'
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users | jq -r '\''.data[] | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n'
|
||||
printf ' curl -s http://%s/v1/users | jq -r '\''.data[]? | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n' "$API_LISTEN"
|
||||
else
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users\n'
|
||||
printf ' curl -s http://%s/v1/users\n' "$API_LISTEN"
|
||||
printf ' (Tip: Install '\''jq'\'' for a much cleaner output)\n'
|
||||
fi
|
||||
|
||||
|
|
|
|||
|
|
@ -37,11 +37,12 @@ mod runtime_watch;
|
|||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use config_store::{current_revision, load_config_from_disk, parse_if_match};
|
||||
use events::ApiEventStore;
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
ApiFailure, CreateUserRequest, DeleteUserResponse, HealthData, PatchUserRequest,
|
||||
RotateSecretRequest, SummaryData, UserActiveIps,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
|
|
@ -362,15 +363,33 @@ async fn handle(
|
|||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users/active-ips") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let usernames: Vec<_> = cfg.access.users.keys().cloned().collect();
|
||||
let active_ips_map = shared.ip_tracker.get_active_ips_for_users(&usernames).await;
|
||||
let mut data: Vec<UserActiveIps> = active_ips_map
|
||||
.into_iter()
|
||||
.filter(|(_, ips)| !ips.is_empty())
|
||||
.map(|(username, active_ips)| UserActiveIps {
|
||||
username,
|
||||
active_ips,
|
||||
})
|
||||
.collect();
|
||||
data.sort_by(|a, b| a.username.cmp(&b.username));
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
|
|
@ -389,7 +408,7 @@ async fn handle(
|
|||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared
|
||||
|
|
@ -398,11 +417,18 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime = runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.create.ok",
|
||||
format!("username={}", data.user.username),
|
||||
);
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::CREATED
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
Ok(success_response(status, data, revision))
|
||||
}
|
||||
_ => {
|
||||
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||
|
|
@ -411,13 +437,16 @@ async fn handle(
|
|||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) =
|
||||
|
|
@ -445,7 +474,7 @@ async fn handle(
|
|||
let body =
|
||||
read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
|
|
@ -455,10 +484,17 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.in_runtime = runtime_cfg.access.users.contains_key(&data.username);
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
let status = if data.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
if api_cfg.read_only {
|
||||
|
|
@ -486,7 +522,18 @@ async fn handle(
|
|||
shared
|
||||
.runtime_events
|
||||
.record("api.user.delete.ok", format!("username={}", deleted_user));
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let in_runtime = runtime_cfg.access.users.contains_key(&deleted_user);
|
||||
let response = DeleteUserResponse {
|
||||
username: deleted_user,
|
||||
in_runtime,
|
||||
};
|
||||
let status = if response.in_runtime {
|
||||
StatusCode::ACCEPTED
|
||||
} else {
|
||||
StatusCode::OK
|
||||
};
|
||||
return Ok(success_response(status, response, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||
|
|
@ -514,7 +561,7 @@ async fn handle(
|
|||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
|
|
@ -524,11 +571,19 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime =
|
||||
runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
return Ok(error_response(
|
||||
|
|
|
|||
|
|
@ -81,10 +81,21 @@ pub(super) struct ZeroCoreData {
|
|||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) accept_permit_timeout_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
pub(super) conntrack_control_enabled: bool,
|
||||
pub(super) conntrack_control_available: bool,
|
||||
pub(super) conntrack_pressure_active: bool,
|
||||
pub(super) conntrack_event_queue_depth: u64,
|
||||
pub(super) conntrack_rule_apply_ok: bool,
|
||||
pub(super) conntrack_delete_attempt_total: u64,
|
||||
pub(super) conntrack_delete_success_total: u64,
|
||||
pub(super) conntrack_delete_not_found_total: u64,
|
||||
pub(super) conntrack_delete_error_total: u64,
|
||||
pub(super) conntrack_close_event_drop_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
|
|
@ -174,6 +185,24 @@ pub(super) struct ZeroMiddleProxyData {
|
|||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) d2c_batches_total: u64,
|
||||
pub(super) d2c_batch_frames_total: u64,
|
||||
pub(super) d2c_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_queue_drain_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_frames_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_max_delay_total: u64,
|
||||
pub(super) d2c_flush_reason_ack_immediate_total: u64,
|
||||
pub(super) d2c_flush_reason_close_total: u64,
|
||||
pub(super) d2c_data_frames_total: u64,
|
||||
pub(super) d2c_ack_frames_total: u64,
|
||||
pub(super) d2c_payload_bytes_total: u64,
|
||||
pub(super) d2c_write_mode_coalesced_total: u64,
|
||||
pub(super) d2c_write_mode_split_total: u64,
|
||||
pub(super) d2c_quota_reject_pre_write_total: u64,
|
||||
pub(super) d2c_quota_reject_post_write_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_bytes_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
|
|
@ -410,6 +439,7 @@ pub(super) struct UserLinks {
|
|||
#[derive(Serialize)]
|
||||
pub(super) struct UserInfo {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
|
|
@ -424,12 +454,24 @@ pub(super) struct UserInfo {
|
|||
pub(super) links: UserLinks,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserActiveIps {
|
||||
pub(super) username: String,
|
||||
pub(super) active_ips: Vec<IpAddr>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct CreateUserResponse {
|
||||
pub(super) user: UserInfo,
|
||||
pub(super) secret: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct DeleteUserResponse {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct CreateUserRequest {
|
||||
pub(super) username: String,
|
||||
|
|
|
|||
|
|
@ -39,10 +39,21 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
|||
connections_total: stats.get_connects_all(),
|
||||
connections_bad_total: stats.get_connects_bad(),
|
||||
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||
accept_permit_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
configured_users,
|
||||
telemetry_core_enabled: telemetry.core_enabled,
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
conntrack_control_enabled: stats.get_conntrack_control_enabled(),
|
||||
conntrack_control_available: stats.get_conntrack_control_available(),
|
||||
conntrack_pressure_active: stats.get_conntrack_pressure_active(),
|
||||
conntrack_event_queue_depth: stats.get_conntrack_event_queue_depth(),
|
||||
conntrack_rule_apply_ok: stats.get_conntrack_rule_apply_ok(),
|
||||
conntrack_delete_attempt_total: stats.get_conntrack_delete_attempt_total(),
|
||||
conntrack_delete_success_total: stats.get_conntrack_delete_success_total(),
|
||||
conntrack_delete_not_found_total: stats.get_conntrack_delete_not_found_total(),
|
||||
conntrack_delete_error_total: stats.get_conntrack_delete_error_total(),
|
||||
conntrack_close_event_drop_total: stats.get_conntrack_close_event_drop_total(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
|
|
@ -68,6 +79,25 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
|||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
d2c_batches_total: stats.get_me_d2c_batches_total(),
|
||||
d2c_batch_frames_total: stats.get_me_d2c_batch_frames_total(),
|
||||
d2c_batch_bytes_total: stats.get_me_d2c_batch_bytes_total(),
|
||||
d2c_flush_reason_queue_drain_total: stats.get_me_d2c_flush_reason_queue_drain_total(),
|
||||
d2c_flush_reason_batch_frames_total: stats.get_me_d2c_flush_reason_batch_frames_total(),
|
||||
d2c_flush_reason_batch_bytes_total: stats.get_me_d2c_flush_reason_batch_bytes_total(),
|
||||
d2c_flush_reason_max_delay_total: stats.get_me_d2c_flush_reason_max_delay_total(),
|
||||
d2c_flush_reason_ack_immediate_total: stats
|
||||
.get_me_d2c_flush_reason_ack_immediate_total(),
|
||||
d2c_flush_reason_close_total: stats.get_me_d2c_flush_reason_close_total(),
|
||||
d2c_data_frames_total: stats.get_me_d2c_data_frames_total(),
|
||||
d2c_ack_frames_total: stats.get_me_d2c_ack_frames_total(),
|
||||
d2c_payload_bytes_total: stats.get_me_d2c_payload_bytes_total(),
|
||||
d2c_write_mode_coalesced_total: stats.get_me_d2c_write_mode_coalesced_total(),
|
||||
d2c_write_mode_split_total: stats.get_me_d2c_write_mode_split_total(),
|
||||
d2c_quota_reject_pre_write_total: stats.get_me_d2c_quota_reject_pre_write_total(),
|
||||
d2c_quota_reject_post_write_total: stats.get_me_d2c_quota_reject_post_write_total(),
|
||||
d2c_frame_buf_shrink_total: stats.get_me_d2c_frame_buf_shrink_total(),
|
||||
d2c_frame_buf_shrink_bytes_total: stats.get_me_d2c_frame_buf_shrink_bytes_total(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
|
|
|
|||
|
|
@ -35,11 +35,14 @@ pub(super) struct RuntimeGatesData {
|
|||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) me2dc_fast_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
pub(super) route_mode: &'static str,
|
||||
pub(super) reroute_active: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_reason: Option<&'static str>,
|
||||
pub(super) startup_status: &'static str,
|
||||
pub(super) startup_stage: String,
|
||||
pub(super) startup_progress_pct: f64,
|
||||
|
|
@ -47,6 +50,7 @@ pub(super) struct RuntimeGatesData {
|
|||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_first_byte_idle_secs: u64,
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
|
|
@ -86,6 +90,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
|
|||
pub(super) writer_pick_mode: &'static str,
|
||||
pub(super) writer_pick_sample_size: u8,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
pub(super) me2dc_fast: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
|
@ -95,6 +100,11 @@ pub(super) struct EffectiveUserIpPolicyLimits {
|
|||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserTcpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
|
|
@ -104,6 +114,7 @@ pub(super) struct EffectiveLimitsData {
|
|||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
pub(super) user_tcp_policy: EffectiveUserTcpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
|
@ -169,6 +180,8 @@ pub(super) async fn build_runtime_gates_data(
|
|||
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||
let route_state = shared.route_runtime.snapshot();
|
||||
let route_mode = route_state.mode.as_str();
|
||||
let fast_fallback_enabled =
|
||||
cfg.general.use_middle_proxy && cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
let reroute_active = cfg.general.use_middle_proxy
|
||||
&& cfg.general.me2dc_fallback
|
||||
&& matches!(route_state.mode, RelayRouteMode::Direct);
|
||||
|
|
@ -177,6 +190,15 @@ pub(super) async fn build_runtime_gates_data(
|
|||
} else {
|
||||
None
|
||||
};
|
||||
let reroute_reason = if reroute_active {
|
||||
if fast_fallback_enabled {
|
||||
Some("fast_not_ready_fallback")
|
||||
} else {
|
||||
Some("strict_grace_fallback")
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
|
|
@ -194,10 +216,12 @@ pub(super) async fn build_runtime_gates_data(
|
|||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
me2dc_fast_enabled: fast_fallback_enabled,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
route_mode,
|
||||
reroute_active,
|
||||
reroute_to_direct_at_epoch_secs,
|
||||
reroute_reason,
|
||||
startup_status: startup_summary.status,
|
||||
startup_stage: startup_summary.stage,
|
||||
startup_progress_pct: startup_summary.progress_pct,
|
||||
|
|
@ -210,8 +234,9 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
|||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_first_byte_idle_secs: cfg.timeouts.client_first_byte_idle_secs,
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||
tg_connect_secs: cfg.general.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
|
|
@ -263,12 +288,16 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
|||
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
me2dc_fast: cfg.general.me2dc_fast,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
user_tcp_policy: EffectiveUserTcpPolicyLimits {
|
||||
global_each: cfg.access.user_max_tcp_conns_global_each,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
117
src/api/users.rs
117
src/api/users.rs
|
|
@ -136,6 +136,7 @@ pub(super) async fn create_user(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
|
|
@ -143,8 +144,16 @@ pub(super) async fn create_user(
|
|||
.find(|entry| entry.username == body.username)
|
||||
.unwrap_or(UserInfo {
|
||||
username: body.username.clone(),
|
||||
in_runtime: false,
|
||||
user_ad_tag: None,
|
||||
max_tcp_conns: None,
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&body.username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: None,
|
||||
data_quota_bytes: None,
|
||||
max_unique_ips: updated_limit,
|
||||
|
|
@ -236,6 +245,7 @@ pub(super) async fn patch_user(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
|
|
@ -293,6 +303,7 @@ pub(super) async fn rotate_secret(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
|
|
@ -365,6 +376,7 @@ pub(super) async fn users_from_config(
|
|||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
runtime_cfg: Option<&ProxyConfig>,
|
||||
) -> Vec<UserInfo> {
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
|
|
@ -394,8 +406,18 @@ pub(super) async fn users_from_config(
|
|||
tls: Vec::new(),
|
||||
});
|
||||
users.push(UserInfo {
|
||||
in_runtime: runtime_cfg
|
||||
.map(|runtime| runtime.access.users.contains_key(&username))
|
||||
.unwrap_or(false),
|
||||
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||
max_tcp_conns: cfg.access.user_max_tcp_conns.get(&username).copied(),
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: cfg
|
||||
.access
|
||||
.user_expirations
|
||||
|
|
@ -572,3 +594,94 @@ fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
|||
}
|
||||
domains
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_reports_effective_tcp_limit_with_global_fallback() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
cfg.access.user_max_tcp_conns_global_each = 7;
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 5);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(5));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 0);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns_global_each = 0;
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_marks_runtime_membership_when_snapshot_is_provided() {
|
||||
let mut disk_cfg = ProxyConfig::default();
|
||||
disk_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
disk_cfg.access.users.insert(
|
||||
"bob".to_string(),
|
||||
"fedcba9876543210fedcba9876543210".to_string(),
|
||||
);
|
||||
|
||||
let mut runtime_cfg = ProxyConfig::default();
|
||||
runtime_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
let users =
|
||||
users_from_config(&disk_cfg, &stats, &tracker, None, None, Some(&runtime_cfg)).await;
|
||||
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
let bob = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "bob")
|
||||
.expect("bob must be present");
|
||||
|
||||
assert!(alice.in_runtime);
|
||||
assert!(!bob.in_runtime);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
450
src/cli.rs
450
src/cli.rs
|
|
@ -1,11 +1,270 @@
|
|||
//! CLI commands: --init (fire-and-forget setup)
|
||||
//! CLI commands: --init (fire-and-forget setup), daemon options, subcommands
|
||||
//!
|
||||
//! Subcommands:
|
||||
//! - `start [OPTIONS] [config.toml]` - Start the daemon
|
||||
//! - `stop [--pid-file PATH]` - Stop a running daemon
|
||||
//! - `reload [--pid-file PATH]` - Reload configuration (SIGHUP)
|
||||
//! - `status [--pid-file PATH]` - Check daemon status
|
||||
//! - `run [OPTIONS] [config.toml]` - Run in foreground (default behavior)
|
||||
|
||||
use rand::RngExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{self, DEFAULT_PID_FILE, DaemonOptions};
|
||||
|
||||
/// CLI subcommand to execute.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Subcommand {
|
||||
/// Run the proxy (default, or explicit `run` subcommand).
|
||||
Run,
|
||||
/// Start as daemon (`start` subcommand).
|
||||
Start,
|
||||
/// Stop a running daemon (`stop` subcommand).
|
||||
Stop,
|
||||
/// Reload configuration (`reload` subcommand).
|
||||
Reload,
|
||||
/// Check daemon status (`status` subcommand).
|
||||
Status,
|
||||
/// Fire-and-forget setup (`--init`).
|
||||
Init,
|
||||
}
|
||||
|
||||
/// Parsed subcommand with its options.
|
||||
#[derive(Debug)]
|
||||
pub struct ParsedCommand {
|
||||
pub subcommand: Subcommand,
|
||||
pub pid_file: PathBuf,
|
||||
pub config_path: String,
|
||||
#[cfg(unix)]
|
||||
pub daemon_opts: DaemonOptions,
|
||||
pub init_opts: Option<InitOptions>,
|
||||
}
|
||||
|
||||
impl Default for ParsedCommand {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
subcommand: Subcommand::Run,
|
||||
#[cfg(unix)]
|
||||
pid_file: PathBuf::from(DEFAULT_PID_FILE),
|
||||
#[cfg(not(unix))]
|
||||
pid_file: PathBuf::from("/var/run/telemt.pid"),
|
||||
config_path: "config.toml".to_string(),
|
||||
#[cfg(unix)]
|
||||
daemon_opts: DaemonOptions::default(),
|
||||
init_opts: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse CLI arguments into a command structure.
|
||||
pub fn parse_command(args: &[String]) -> ParsedCommand {
|
||||
let mut cmd = ParsedCommand::default();
|
||||
|
||||
// Check for --init first (legacy form)
|
||||
if args.iter().any(|a| a == "--init") {
|
||||
cmd.subcommand = Subcommand::Init;
|
||||
cmd.init_opts = parse_init_args(args);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
// Check for subcommand as first argument
|
||||
if let Some(first) = args.first() {
|
||||
match first.as_str() {
|
||||
"start" => {
|
||||
cmd.subcommand = Subcommand::Start;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
// Force daemonize for start command
|
||||
cmd.daemon_opts.daemonize = true;
|
||||
}
|
||||
}
|
||||
"stop" => {
|
||||
cmd.subcommand = Subcommand::Stop;
|
||||
}
|
||||
"reload" => {
|
||||
cmd.subcommand = Subcommand::Reload;
|
||||
}
|
||||
"status" => {
|
||||
cmd.subcommand = Subcommand::Status;
|
||||
}
|
||||
"run" => {
|
||||
cmd.subcommand = Subcommand::Run;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// No subcommand, default to Run
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse remaining options
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
// Skip subcommand names
|
||||
"start" | "stop" | "reload" | "status" | "run" => {}
|
||||
// PID file option (for stop/reload/status)
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
cmd.pid_file = PathBuf::from(&args[i]);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
cmd.pid_file = PathBuf::from(s.trim_start_matches("--pid-file="));
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
// Config path (positional, non-flag argument)
|
||||
s if !s.starts_with('-') => {
|
||||
cmd.config_path = s.to_string();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Execute a subcommand that doesn't require starting the server.
|
||||
/// Returns `Some(exit_code)` if the command was handled, `None` if server should start.
|
||||
#[cfg(unix)]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop => Some(cmd_stop(&cmd.pid_file)),
|
||||
Subcommand::Reload => Some(cmd_reload(&cmd.pid_file)),
|
||||
Subcommand::Status => Some(cmd_status(&cmd.pid_file)),
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
// Run and Start need the server
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop | Subcommand::Reload | Subcommand::Status => {
|
||||
eprintln!("[telemt] Subcommand not supported on this platform");
|
||||
Some(1)
|
||||
}
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop command: send SIGTERM to the running daemon.
|
||||
#[cfg(unix)]
|
||||
fn cmd_stop(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Stopping telemt daemon...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGTERM) {
|
||||
Ok(()) => {
|
||||
println!("Stop signal sent successfully");
|
||||
|
||||
// Wait for process to exit (up to 10 seconds)
|
||||
for _ in 0..20 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
if let daemon::DaemonStatus::NotRunning = daemon::check_status(pid_file) {
|
||||
println!("Daemon stopped");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
println!("Daemon may still be shutting down");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to stop daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reload command: send SIGHUP to trigger config reload.
|
||||
#[cfg(unix)]
|
||||
fn cmd_reload(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Reloading telemt configuration...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGHUP) {
|
||||
Ok(()) => {
|
||||
println!("Reload signal sent successfully");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to reload daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Status command: check if daemon is running.
|
||||
#[cfg(unix)]
|
||||
fn cmd_status(pid_file: &Path) -> i32 {
|
||||
match daemon::check_status(pid_file) {
|
||||
daemon::DaemonStatus::Running(pid) => {
|
||||
println!("telemt is running (pid {})", pid);
|
||||
0
|
||||
}
|
||||
daemon::DaemonStatus::Stale(pid) => {
|
||||
println!("telemt is not running (stale pid file, was pid {})", pid);
|
||||
// Clean up stale PID file
|
||||
let _ = std::fs::remove_file(pid_file);
|
||||
1
|
||||
}
|
||||
daemon::DaemonStatus::NotRunning => {
|
||||
println!("telemt is not running");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for the init command
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InitOptions {
|
||||
pub port: u16,
|
||||
pub domain: String,
|
||||
|
|
@ -15,6 +274,64 @@ pub struct InitOptions {
|
|||
pub no_start: bool,
|
||||
}
|
||||
|
||||
/// Parse daemon-related options from CLI args.
|
||||
#[cfg(unix)]
|
||||
pub fn parse_daemon_args(args: &[String]) -> DaemonOptions {
|
||||
let mut opts = DaemonOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--daemon" | "-d" => {
|
||||
opts.daemonize = true;
|
||||
}
|
||||
"--foreground" | "-f" => {
|
||||
opts.foreground = true;
|
||||
}
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.pid_file = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
opts.pid_file = Some(PathBuf::from(s.trim_start_matches("--pid-file=")));
|
||||
}
|
||||
"--run-as-user" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.user = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user=") => {
|
||||
opts.user = Some(s.trim_start_matches("--run-as-user=").to_string());
|
||||
}
|
||||
"--run-as-group" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.group = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group=") => {
|
||||
opts.group = Some(s.trim_start_matches("--run-as-group=").to_string());
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.working_dir = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
opts.working_dir = Some(PathBuf::from(s.trim_start_matches("--working-dir=")));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
opts
|
||||
}
|
||||
|
||||
impl Default for InitOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
|
@ -84,10 +401,16 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
|||
|
||||
/// Run the fire-and-forget setup.
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
use crate::service::{self, InitSystem, ServiceOptions};
|
||||
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
// 1. Generate or validate secret
|
||||
// 1. Detect init system
|
||||
let init_system = service::detect_init_system();
|
||||
eprintln!("[+] Detected init system: {}", init_system);
|
||||
|
||||
// 2. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
if s.len() != 32 || !s.chars().all(|c| c.is_ascii_hexdigit()) {
|
||||
|
|
@ -104,50 +427,74 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
|||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
// 2. Create config directory
|
||||
// 3. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
// 3. Write config
|
||||
// 4. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
// 4. Write systemd unit
|
||||
// 5. Generate and write service file
|
||||
let exe_path =
|
||||
std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
let service_opts = ServiceOptions {
|
||||
exe_path: &exe_path,
|
||||
config_path: &config_path,
|
||||
user: None, // Let systemd/init handle user
|
||||
group: None,
|
||||
pid_file: "/var/run/telemt.pid",
|
||||
working_dir: Some("/var/lib/telemt"),
|
||||
description: "Telemt MTProxy - Telegram MTProto Proxy",
|
||||
};
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
let service_path = service::service_file_path(init_system);
|
||||
let service_content = service::generate_service_file(init_system, &service_opts);
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = Path::new(service_path).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
match fs::write(service_path, &service_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
eprintln!("[+] Service file written to {}", service_path);
|
||||
|
||||
// Make script executable for OpenRC/FreeBSD
|
||||
#[cfg(unix)]
|
||||
if init_system == InitSystem::OpenRC || init_system == InitSystem::FreeBSDRc {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = fs::metadata(service_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
fs::set_permissions(service_path, perms)?;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
eprintln!("[!] Cannot write service file (run as root?): {}", e);
|
||||
eprintln!("[!] Manual service file content:");
|
||||
eprintln!("{}", service_content);
|
||||
|
||||
// Still print links and config
|
||||
// Still print links and installation instructions
|
||||
eprintln!();
|
||||
eprintln!("{}", service::installation_instructions(init_system));
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Reload systemd
|
||||
// 6. Install and enable service based on init system
|
||||
match init_system {
|
||||
InitSystem::Systemd => {
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
|
|
@ -166,10 +513,40 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
|||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
}
|
||||
InitSystem::OpenRC => {
|
||||
run_cmd("rc-update", &["add", "telemt", "default"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("rc-service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: rc-service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::FreeBSDRc => {
|
||||
run_cmd("sysrc", &["telemt_enable=YES"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::Unknown => {
|
||||
eprintln!("[!] Unknown init system - service file written but not installed");
|
||||
eprintln!("[!] You may need to install it manually");
|
||||
}
|
||||
}
|
||||
|
||||
eprintln!();
|
||||
|
||||
// 8. Print links
|
||||
// 7. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
Ok(())
|
||||
|
|
@ -207,6 +584,7 @@ me_pool_drain_soft_evict_cooldown_ms = 1000
|
|||
me_bind_stale_mode = "never"
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 90
|
||||
tg_connect = 10
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
|
|
@ -232,8 +610,8 @@ ip = "0.0.0.0"
|
|||
ip = "::"
|
||||
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_first_byte_idle_secs = 300
|
||||
client_handshake = 60
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
|
|
@ -245,6 +623,7 @@ fake_cert_len = 2048
|
|||
tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
user_max_tcp_conns_global_each = 0
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 120
|
||||
ignore_time_skew = false
|
||||
|
|
@ -264,35 +643,6 @@ weight = 10
|
|||
)
|
||||
}
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/telemt/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={exe} {config}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
LimitNOFILE=65535
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/etc/telemt
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"#,
|
||||
exe = exe_path.display(),
|
||||
config = config_path.display(),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
match Command::new(cmd).args(args).output() {
|
||||
Ok(output) => {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
|||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
|
||||
const DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES: u64 = 64 * 1024;
|
||||
const DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
|
|
@ -46,6 +48,10 @@ const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
|
|||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_CONNTRACK_CONTROL_ENABLED: bool = true;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT: u8 = 85;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT: u8 = 70;
|
||||
const DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC: u64 = 4096;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
|
|
@ -69,6 +75,22 @@ pub(crate) fn default_tls_fetch_scope() -> String {
|
|||
String::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_attempt_timeout_ms() -> u64 {
|
||||
5_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_total_budget_ms() -> u64 {
|
||||
15_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_strict_route() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_profile_cache_ttl_secs() -> u64 {
|
||||
600
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
|
@ -78,7 +100,7 @@ pub(crate) fn default_fake_cert_len() -> usize {
|
|||
}
|
||||
|
||||
pub(crate) fn default_tls_front_dir() -> String {
|
||||
"tlsfront".to_string()
|
||||
"/etc/telemt/tlsfront".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_check_len() -> usize {
|
||||
|
|
@ -92,7 +114,11 @@ pub(crate) fn default_replay_window_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
60
|
||||
}
|
||||
|
||||
pub(crate) fn default_client_first_byte_idle_secs() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
|
||||
|
|
@ -183,14 +209,38 @@ pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
|||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_trusted_cidrs() -> Vec<IpNetwork> {
|
||||
vec!["0.0.0.0/0".parse().unwrap(), "::/0".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_max_connections() -> u32 {
|
||||
10_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_backlog() -> u32 {
|
||||
1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
|
||||
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_control_enabled() -> bool {
|
||||
DEFAULT_CONNTRACK_CONTROL_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_high_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_low_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_delete_budget_per_sec() -> u64 {
|
||||
DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
|
|
@ -251,6 +301,10 @@ pub(crate) fn default_me2dc_fallback() -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fast() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
|
|
@ -387,6 +441,14 @@ pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
|
|||
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_quota_soft_overshoot_bytes() -> u64 {
|
||||
DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_frame_buf_shrink_threshold_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
|
||||
}
|
||||
|
|
@ -496,7 +558,7 @@ pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_beobachten_file() -> String {
|
||||
"cache/beobachten.txt".to_string()
|
||||
"/etc/telemt/beobachten.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||
|
|
@ -543,6 +605,20 @@ pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
|
|||
512
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
5 * 1024 * 1024
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
32 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_classifier_prefetch_timeout_ms() -> u64 {
|
||||
5
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
|
@ -755,6 +831,10 @@ pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
|||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_tcp_conns_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,6 +106,8 @@ pub struct HotFields {
|
|||
pub me_d2c_flush_batch_max_bytes: usize,
|
||||
pub me_d2c_flush_batch_max_delay_us: u64,
|
||||
pub me_d2c_ack_flush_immediate: bool,
|
||||
pub me_quota_soft_overshoot_bytes: u64,
|
||||
pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
|
||||
pub direct_relay_copy_buf_c2s_bytes: usize,
|
||||
pub direct_relay_copy_buf_s2c_bytes: usize,
|
||||
pub me_health_interval_ms_unhealthy: u64,
|
||||
|
|
@ -115,6 +117,7 @@ pub struct HotFields {
|
|||
pub users: std::collections::HashMap<String, String>,
|
||||
pub user_ad_tags: std::collections::HashMap<String, String>,
|
||||
pub user_max_tcp_conns: std::collections::HashMap<String, usize>,
|
||||
pub user_max_tcp_conns_global_each: usize,
|
||||
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
||||
pub user_data_quota: std::collections::HashMap<String, u64>,
|
||||
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
||||
|
|
@ -225,6 +228,10 @@ impl HotFields {
|
|||
me_d2c_flush_batch_max_bytes: cfg.general.me_d2c_flush_batch_max_bytes,
|
||||
me_d2c_flush_batch_max_delay_us: cfg.general.me_d2c_flush_batch_max_delay_us,
|
||||
me_d2c_ack_flush_immediate: cfg.general.me_d2c_ack_flush_immediate,
|
||||
me_quota_soft_overshoot_bytes: cfg.general.me_quota_soft_overshoot_bytes,
|
||||
me_d2c_frame_buf_shrink_threshold_bytes: cfg
|
||||
.general
|
||||
.me_d2c_frame_buf_shrink_threshold_bytes,
|
||||
direct_relay_copy_buf_c2s_bytes: cfg.general.direct_relay_copy_buf_c2s_bytes,
|
||||
direct_relay_copy_buf_s2c_bytes: cfg.general.direct_relay_copy_buf_s2c_bytes,
|
||||
me_health_interval_ms_unhealthy: cfg.general.me_health_interval_ms_unhealthy,
|
||||
|
|
@ -234,6 +241,7 @@ impl HotFields {
|
|||
users: cfg.access.users.clone(),
|
||||
user_ad_tags: cfg.access.user_ad_tags.clone(),
|
||||
user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
|
||||
user_max_tcp_conns_global_each: cfg.access.user_max_tcp_conns_global_each,
|
||||
user_expirations: cfg.access.user_expirations.clone(),
|
||||
user_data_quota: cfg.access.user_data_quota.clone(),
|
||||
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
||||
|
|
@ -511,6 +519,9 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||
cfg.general.me_d2c_flush_batch_max_bytes = new.general.me_d2c_flush_batch_max_bytes;
|
||||
cfg.general.me_d2c_flush_batch_max_delay_us = new.general.me_d2c_flush_batch_max_delay_us;
|
||||
cfg.general.me_d2c_ack_flush_immediate = new.general.me_d2c_ack_flush_immediate;
|
||||
cfg.general.me_quota_soft_overshoot_bytes = new.general.me_quota_soft_overshoot_bytes;
|
||||
cfg.general.me_d2c_frame_buf_shrink_threshold_bytes =
|
||||
new.general.me_d2c_frame_buf_shrink_threshold_bytes;
|
||||
cfg.general.direct_relay_copy_buf_c2s_bytes = new.general.direct_relay_copy_buf_c2s_bytes;
|
||||
cfg.general.direct_relay_copy_buf_s2c_bytes = new.general.direct_relay_copy_buf_s2c_bytes;
|
||||
cfg.general.me_health_interval_ms_unhealthy = new.general.me_health_interval_ms_unhealthy;
|
||||
|
|
@ -521,6 +532,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||
cfg.access.users = new.access.users.clone();
|
||||
cfg.access.user_ad_tags = new.access.user_ad_tags.clone();
|
||||
cfg.access.user_max_tcp_conns = new.access.user_max_tcp_conns.clone();
|
||||
cfg.access.user_max_tcp_conns_global_each = new.access.user_max_tcp_conns_global_each;
|
||||
cfg.access.user_expirations = new.access.user_expirations.clone();
|
||||
cfg.access.user_data_quota = new.access.user_data_quota.clone();
|
||||
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
|
||||
|
|
@ -528,6 +540,10 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||
cfg.access.user_max_unique_ips_mode = new.access.user_max_unique_ips_mode;
|
||||
cfg.access.user_max_unique_ips_window_secs = new.access.user_max_unique_ips_window_secs;
|
||||
|
||||
if cfg.rebuild_runtime_user_auth().is_err() {
|
||||
cfg.runtime_user_auth = None;
|
||||
}
|
||||
|
||||
cfg
|
||||
}
|
||||
|
||||
|
|
@ -561,6 +577,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
}
|
||||
if old.server.proxy_protocol != new.server.proxy_protocol
|
||||
|| !listeners_equal(&old.server.listeners, &new.server.listeners)
|
||||
|| old.server.listen_backlog != new.server.listen_backlog
|
||||
|| old.server.listen_addr_ipv4 != new.server.listen_addr_ipv4
|
||||
|| old.server.listen_addr_ipv6 != new.server.listen_addr_ipv6
|
||||
|| old.server.listen_tcp != new.server.listen_tcp
|
||||
|
|
@ -593,6 +610,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
|| old.censorship.mask_shape_above_cap_blur != new.censorship.mask_shape_above_cap_blur
|
||||
|| old.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
!= new.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
|| old.censorship.mask_relay_max_bytes != new.censorship.mask_relay_max_bytes
|
||||
|| old.censorship.mask_classifier_prefetch_timeout_ms
|
||||
!= new.censorship.mask_classifier_prefetch_timeout_ms
|
||||
|| old.censorship.mask_timing_normalization_enabled
|
||||
!= new.censorship.mask_timing_normalization_enabled
|
||||
|| old.censorship.mask_timing_normalization_floor_ms
|
||||
|
|
@ -639,6 +659,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
}
|
||||
if old.general.me_route_no_writer_mode != new.general.me_route_no_writer_mode
|
||||
|| old.general.me_route_no_writer_wait_ms != new.general.me_route_no_writer_wait_ms
|
||||
|| old.general.me_route_hybrid_max_wait_ms != new.general.me_route_hybrid_max_wait_ms
|
||||
|| old.general.me_route_blocking_send_timeout_ms
|
||||
!= new.general.me_route_blocking_send_timeout_ms
|
||||
|| old.general.me_route_inline_recovery_attempts
|
||||
!= new.general.me_route_inline_recovery_attempts
|
||||
|| old.general.me_route_inline_recovery_wait_ms
|
||||
|
|
@ -657,9 +680,11 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
warned = true;
|
||||
warn!("config reload: general.me_init_retry_attempts changed; restart required");
|
||||
}
|
||||
if old.general.me2dc_fallback != new.general.me2dc_fallback {
|
||||
if old.general.me2dc_fallback != new.general.me2dc_fallback
|
||||
|| old.general.me2dc_fast != new.general.me2dc_fast
|
||||
{
|
||||
warned = true;
|
||||
warn!("config reload: general.me2dc_fallback changed; restart required");
|
||||
warn!("config reload: general.me2dc_fallback/me2dc_fast changed; restart required");
|
||||
}
|
||||
if old.general.proxy_config_v4_cache_path != new.general.proxy_config_v4_cache_path
|
||||
|| old.general.proxy_config_v6_cache_path != new.general.proxy_config_v6_cache_path
|
||||
|
|
@ -678,6 +703,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
if old.general.upstream_connect_retry_attempts != new.general.upstream_connect_retry_attempts
|
||||
|| old.general.upstream_connect_retry_backoff_ms
|
||||
!= new.general.upstream_connect_retry_backoff_ms
|
||||
|| old.general.tg_connect != new.general.tg_connect
|
||||
|| old.general.upstream_unhealthy_fail_threshold
|
||||
!= new.general.upstream_unhealthy_fail_threshold
|
||||
|| old.general.upstream_connect_failfast_hard_errors
|
||||
|
|
@ -1030,15 +1056,20 @@ fn log_changes(
|
|||
|| old_hot.me_d2c_flush_batch_max_bytes != new_hot.me_d2c_flush_batch_max_bytes
|
||||
|| old_hot.me_d2c_flush_batch_max_delay_us != new_hot.me_d2c_flush_batch_max_delay_us
|
||||
|| old_hot.me_d2c_ack_flush_immediate != new_hot.me_d2c_ack_flush_immediate
|
||||
|| old_hot.me_quota_soft_overshoot_bytes != new_hot.me_quota_soft_overshoot_bytes
|
||||
|| old_hot.me_d2c_frame_buf_shrink_threshold_bytes
|
||||
!= new_hot.me_d2c_frame_buf_shrink_threshold_bytes
|
||||
|| old_hot.direct_relay_copy_buf_c2s_bytes != new_hot.direct_relay_copy_buf_c2s_bytes
|
||||
|| old_hot.direct_relay_copy_buf_s2c_bytes != new_hot.direct_relay_copy_buf_s2c_bytes
|
||||
{
|
||||
info!(
|
||||
"config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} direct_buf_c2s={} direct_buf_s2c={}",
|
||||
"config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} me_quota_soft_overshoot_bytes={} me_d2c_frame_buf_shrink_threshold_bytes={} direct_buf_c2s={} direct_buf_s2c={}",
|
||||
new_hot.me_d2c_flush_batch_max_frames,
|
||||
new_hot.me_d2c_flush_batch_max_bytes,
|
||||
new_hot.me_d2c_flush_batch_max_delay_us,
|
||||
new_hot.me_d2c_ack_flush_immediate,
|
||||
new_hot.me_quota_soft_overshoot_bytes,
|
||||
new_hot.me_d2c_frame_buf_shrink_threshold_bytes,
|
||||
new_hot.direct_relay_copy_buf_c2s_bytes,
|
||||
new_hot.direct_relay_copy_buf_s2c_bytes,
|
||||
);
|
||||
|
|
@ -1121,6 +1152,12 @@ fn log_changes(
|
|||
new_hot.user_max_tcp_conns.len()
|
||||
);
|
||||
}
|
||||
if old_hot.user_max_tcp_conns_global_each != new_hot.user_max_tcp_conns_global_each {
|
||||
info!(
|
||||
"config reload: user_max_tcp_conns policy global_each={}",
|
||||
new_hot.user_max_tcp_conns_global_each
|
||||
);
|
||||
}
|
||||
if old_hot.user_expirations != new_hot.user_expirations {
|
||||
info!(
|
||||
"config reload: user_expirations updated ({} entries)",
|
||||
|
|
|
|||
|
|
@ -1,9 +1,10 @@
|
|||
#![allow(deprecated)]
|
||||
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand::RngExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
|
@ -15,6 +16,13 @@ use crate::error::{ProxyError, Result};
|
|||
use super::defaults::*;
|
||||
use super::types::*;
|
||||
|
||||
const ACCESS_SECRET_BYTES: usize = 16;
|
||||
const MAX_ME_WRITER_CMD_CHANNEL_CAPACITY: usize = 16_384;
|
||||
const MAX_ME_ROUTE_CHANNEL_CAPACITY: usize = 8_192;
|
||||
const MAX_ME_C2ME_CHANNEL_CAPACITY: usize = 8_192;
|
||||
const MIN_MAX_CLIENT_FRAME_BYTES: usize = 4 * 1024;
|
||||
const MAX_MAX_CLIENT_FRAME_BYTES: usize = 16 * 1024 * 1024;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct LoadedConfig {
|
||||
pub(crate) config: ProxyConfig,
|
||||
|
|
@ -22,6 +30,111 @@ pub(crate) struct LoadedConfig {
|
|||
pub(crate) rendered_hash: u64,
|
||||
}
|
||||
|
||||
/// Precomputed, immutable user authentication data used by handshake hot paths.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub(crate) struct UserAuthSnapshot {
|
||||
entries: Vec<UserAuthEntry>,
|
||||
by_name: HashMap<String, u32>,
|
||||
sni_index: HashMap<u64, Vec<u32>>,
|
||||
sni_initial_index: HashMap<u8, Vec<u32>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct UserAuthEntry {
|
||||
pub(crate) user: String,
|
||||
pub(crate) secret: [u8; ACCESS_SECRET_BYTES],
|
||||
}
|
||||
|
||||
impl UserAuthSnapshot {
|
||||
fn from_users(users: &HashMap<String, String>) -> Result<Self> {
|
||||
let mut entries = Vec::with_capacity(users.len());
|
||||
let mut by_name = HashMap::with_capacity(users.len());
|
||||
let mut sni_index = HashMap::with_capacity(users.len());
|
||||
let mut sni_initial_index = HashMap::with_capacity(users.len());
|
||||
|
||||
for (user, secret_hex) in users {
|
||||
let decoded = hex::decode(secret_hex).map_err(|_| ProxyError::InvalidSecret {
|
||||
user: user.clone(),
|
||||
reason: "Must be 32 hex characters".to_string(),
|
||||
})?;
|
||||
if decoded.len() != ACCESS_SECRET_BYTES {
|
||||
return Err(ProxyError::InvalidSecret {
|
||||
user: user.clone(),
|
||||
reason: "Must be 32 hex characters".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let user_id = u32::try_from(entries.len()).map_err(|_| {
|
||||
ProxyError::Config("Too many users for runtime auth snapshot".to_string())
|
||||
})?;
|
||||
|
||||
let mut secret = [0u8; ACCESS_SECRET_BYTES];
|
||||
secret.copy_from_slice(&decoded);
|
||||
entries.push(UserAuthEntry {
|
||||
user: user.clone(),
|
||||
secret,
|
||||
});
|
||||
by_name.insert(user.clone(), user_id);
|
||||
sni_index
|
||||
.entry(Self::sni_lookup_hash(user))
|
||||
.or_insert_with(Vec::new)
|
||||
.push(user_id);
|
||||
if let Some(initial) = user
|
||||
.as_bytes()
|
||||
.first()
|
||||
.map(|byte| byte.to_ascii_lowercase())
|
||||
{
|
||||
sni_initial_index
|
||||
.entry(initial)
|
||||
.or_insert_with(Vec::new)
|
||||
.push(user_id);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Self {
|
||||
entries,
|
||||
by_name,
|
||||
sni_index,
|
||||
sni_initial_index,
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn entries(&self) -> &[UserAuthEntry] {
|
||||
&self.entries
|
||||
}
|
||||
|
||||
pub(crate) fn user_id_by_name(&self, user: &str) -> Option<u32> {
|
||||
self.by_name.get(user).copied()
|
||||
}
|
||||
|
||||
pub(crate) fn entry_by_id(&self, user_id: u32) -> Option<&UserAuthEntry> {
|
||||
let idx = usize::try_from(user_id).ok()?;
|
||||
self.entries.get(idx)
|
||||
}
|
||||
|
||||
pub(crate) fn sni_candidates(&self, sni: &str) -> Option<&[u32]> {
|
||||
self.sni_index
|
||||
.get(&Self::sni_lookup_hash(sni))
|
||||
.map(Vec::as_slice)
|
||||
}
|
||||
|
||||
pub(crate) fn sni_initial_candidates(&self, sni: &str) -> Option<&[u32]> {
|
||||
let initial = sni
|
||||
.as_bytes()
|
||||
.first()
|
||||
.map(|byte| byte.to_ascii_lowercase())?;
|
||||
self.sni_initial_index.get(&initial).map(Vec::as_slice)
|
||||
}
|
||||
|
||||
fn sni_lookup_hash(value: &str) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
for byte in value.bytes() {
|
||||
hasher.write_u8(byte.to_ascii_lowercase());
|
||||
}
|
||||
hasher.finish()
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_config_path(path: &Path) -> PathBuf {
|
||||
path.canonicalize().unwrap_or_else(|_| {
|
||||
if path.is_absolute() {
|
||||
|
|
@ -196,6 +309,10 @@ pub struct ProxyConfig {
|
|||
/// If not set, defaults to 2 (matching Telegram's official `default 2;` in proxy-multi.conf).
|
||||
#[serde(default)]
|
||||
pub default_dc: Option<u8>,
|
||||
|
||||
/// Precomputed authentication snapshot for handshake hot paths.
|
||||
#[serde(skip)]
|
||||
pub(crate) runtime_user_auth: Option<Arc<UserAuthSnapshot>>,
|
||||
}
|
||||
|
||||
impl ProxyConfig {
|
||||
|
|
@ -346,6 +463,12 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.general.tg_connect == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.tg_connect must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.upstream_unhealthy_fail_threshold == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.upstream_unhealthy_fail_threshold must be > 0".to_string(),
|
||||
|
|
@ -430,6 +553,24 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_relay_max_bytes == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_relay_max_bytes must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_relay_max_bytes > 67_108_864 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_relay_max_bytes must be <= 67108864".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(5..=50).contains(&config.censorship.mask_classifier_prefetch_timeout_ms) {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_timing_normalization_ceiling_ms
|
||||
< config.censorship.mask_timing_normalization_floor_ms
|
||||
{
|
||||
|
|
@ -490,18 +631,41 @@ impl ProxyConfig {
|
|||
"general.me_writer_cmd_channel_capacity must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.general.me_writer_cmd_channel_capacity > MAX_ME_WRITER_CMD_CHANNEL_CAPACITY {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"general.me_writer_cmd_channel_capacity must be within [1, {MAX_ME_WRITER_CMD_CHANNEL_CAPACITY}]"
|
||||
)));
|
||||
}
|
||||
|
||||
if config.general.me_route_channel_capacity == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_channel_capacity must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.general.me_route_channel_capacity > MAX_ME_ROUTE_CHANNEL_CAPACITY {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"general.me_route_channel_capacity must be within [1, {MAX_ME_ROUTE_CHANNEL_CAPACITY}]"
|
||||
)));
|
||||
}
|
||||
|
||||
if config.general.me_c2me_channel_capacity == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_c2me_channel_capacity must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.general.me_c2me_channel_capacity > MAX_ME_C2ME_CHANNEL_CAPACITY {
|
||||
return Err(ProxyError::Config(format!(
|
||||
"general.me_c2me_channel_capacity must be within [1, {MAX_ME_C2ME_CHANNEL_CAPACITY}]"
|
||||
)));
|
||||
}
|
||||
|
||||
if !(MIN_MAX_CLIENT_FRAME_BYTES..=MAX_MAX_CLIENT_FRAME_BYTES)
|
||||
.contains(&config.general.max_client_frame)
|
||||
{
|
||||
return Err(ProxyError::Config(format!(
|
||||
"general.max_client_frame must be within [{MIN_MAX_CLIENT_FRAME_BYTES}, {MAX_MAX_CLIENT_FRAME_BYTES}]"
|
||||
)));
|
||||
}
|
||||
|
||||
if config.general.me_c2me_send_timeout_ms > 60_000 {
|
||||
return Err(ProxyError::Config(
|
||||
|
|
@ -533,6 +697,21 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.general.me_quota_soft_overshoot_bytes > 16 * 1024 * 1024 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_quota_soft_overshoot_bytes must be within [0, 16777216]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(4096..=16 * 1024 * 1024)
|
||||
.contains(&config.general.me_d2c_frame_buf_shrink_threshold_bytes)
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_d2c_frame_buf_shrink_threshold_bytes must be within [4096, 16777216]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
|
||||
|
|
@ -883,6 +1062,43 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.pressure_high_watermark_pct == 0
|
||||
|| config.server.conntrack_control.pressure_high_watermark_pct > 100
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.pressure_high_watermark_pct must be within [1, 100]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.pressure_low_watermark_pct
|
||||
>= config.server.conntrack_control.pressure_high_watermark_pct
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.pressure_low_watermark_pct must be < pressure_high_watermark_pct"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.delete_budget_per_sec == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.delete_budget_per_sec must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if matches!(config.server.conntrack_control.mode, ConntrackMode::Hybrid)
|
||||
&& config
|
||||
.server
|
||||
.conntrack_control
|
||||
.hybrid_listener_ips
|
||||
.is_empty()
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.hybrid_listener_ips must be non-empty in mode=hybrid"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.effective_me_pool_force_close_secs() > 0
|
||||
&& config.general.effective_me_pool_force_close_secs()
|
||||
< config.general.me_pool_drain_ttl_secs
|
||||
|
|
@ -944,6 +1160,28 @@ impl ProxyConfig {
|
|||
// Normalize optional TLS fetch scope: whitespace-only values disable scoped routing.
|
||||
config.censorship.tls_fetch_scope = config.censorship.tls_fetch_scope.trim().to_string();
|
||||
|
||||
if config.censorship.tls_fetch.profiles.is_empty() {
|
||||
config.censorship.tls_fetch.profiles = TlsFetchConfig::default().profiles;
|
||||
} else {
|
||||
let mut seen = HashSet::new();
|
||||
config
|
||||
.censorship
|
||||
.tls_fetch
|
||||
.profiles
|
||||
.retain(|profile| seen.insert(*profile));
|
||||
}
|
||||
|
||||
if config.censorship.tls_fetch.attempt_timeout_ms == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.tls_fetch.attempt_timeout_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.censorship.tls_fetch.total_budget_ms == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.tls_fetch.total_budget_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Merge primary + extra TLS domains, deduplicate (primary always first).
|
||||
if !config.censorship.tls_domains.is_empty() {
|
||||
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
|
|
@ -1066,6 +1304,7 @@ impl ProxyConfig {
|
|||
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
|
||||
|
||||
validate_upstreams(&config)?;
|
||||
config.rebuild_runtime_user_auth()?;
|
||||
|
||||
Ok(LoadedConfig {
|
||||
config,
|
||||
|
|
@ -1074,6 +1313,16 @@ impl ProxyConfig {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) fn rebuild_runtime_user_auth(&mut self) -> Result<()> {
|
||||
let snapshot = UserAuthSnapshot::from_users(&self.access.users)?;
|
||||
self.runtime_user_auth = Some(Arc::new(snapshot));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn runtime_user_auth(&self) -> Option<&UserAuthSnapshot> {
|
||||
self.runtime_user_auth.as_deref()
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
if self.access.users.is_empty() {
|
||||
return Err(ProxyError::Config("No users configured".to_string()));
|
||||
|
|
@ -1121,6 +1370,14 @@ mod load_security_tests;
|
|||
#[path = "tests/load_mask_shape_security_tests.rs"]
|
||||
mod load_mask_shape_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_mask_classifier_prefetch_timeout_security_tests.rs"]
|
||||
mod load_mask_classifier_prefetch_timeout_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_memory_envelope_tests.rs"]
|
||||
mod load_memory_envelope_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
@ -1158,6 +1415,7 @@ mod tests {
|
|||
default_me_init_retry_attempts()
|
||||
);
|
||||
assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
|
||||
assert_eq!(cfg.general.me2dc_fast, default_me2dc_fast());
|
||||
assert_eq!(
|
||||
cfg.general.proxy_config_v4_cache_path,
|
||||
default_proxy_config_v4_cache_path()
|
||||
|
|
@ -1226,6 +1484,11 @@ mod tests {
|
|||
assert_eq!(cfg.general.update_every, default_update_every());
|
||||
assert_eq!(cfg.server.listen_addr_ipv4, default_listen_addr_ipv4());
|
||||
assert_eq!(cfg.server.listen_addr_ipv6, default_listen_addr_ipv6_opt());
|
||||
assert_eq!(
|
||||
cfg.server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
assert_eq!(cfg.censorship.unknown_sni_action, UnknownSniAction::Drop);
|
||||
assert_eq!(cfg.server.api.listen, default_api_listen());
|
||||
assert_eq!(cfg.server.api.whitelist, default_api_whitelist());
|
||||
assert_eq!(
|
||||
|
|
@ -1256,7 +1519,36 @@ mod tests {
|
|||
cfg.server.api.runtime_edge_events_capacity,
|
||||
default_api_runtime_edge_events_capacity()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.inline_conntrack_control,
|
||||
default_conntrack_control_enabled()
|
||||
);
|
||||
assert_eq!(cfg.server.conntrack_control.mode, ConntrackMode::default());
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.backend,
|
||||
ConntrackBackend::default()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::default()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.pressure_high_watermark_pct,
|
||||
default_conntrack_pressure_high_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.pressure_low_watermark_pct,
|
||||
default_conntrack_pressure_low_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.delete_budget_per_sec,
|
||||
default_conntrack_delete_budget_per_sec()
|
||||
);
|
||||
assert_eq!(cfg.access.users, default_access_users());
|
||||
assert_eq!(
|
||||
cfg.access.user_max_tcp_conns_global_each,
|
||||
default_user_max_tcp_conns_global_each()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.access.user_max_unique_ips_mode,
|
||||
UserMaxUniqueIpsMode::default()
|
||||
|
|
@ -1292,6 +1584,7 @@ mod tests {
|
|||
default_me_init_retry_attempts()
|
||||
);
|
||||
assert_eq!(general.me2dc_fallback, default_me2dc_fallback());
|
||||
assert_eq!(general.me2dc_fast, default_me2dc_fast());
|
||||
assert_eq!(
|
||||
general.proxy_config_v4_cache_path,
|
||||
default_proxy_config_v4_cache_path()
|
||||
|
|
@ -1358,6 +1651,14 @@ mod tests {
|
|||
|
||||
let server = ServerConfig::default();
|
||||
assert_eq!(server.listen_addr_ipv6, Some(default_listen_addr_ipv6()));
|
||||
assert_eq!(
|
||||
server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
assert_eq!(
|
||||
AntiCensorshipConfig::default().unknown_sni_action,
|
||||
UnknownSniAction::Drop
|
||||
);
|
||||
assert_eq!(server.api.listen, default_api_listen());
|
||||
assert_eq!(server.api.whitelist, default_api_whitelist());
|
||||
assert_eq!(
|
||||
|
|
@ -1388,9 +1689,123 @@ mod tests {
|
|||
server.api.runtime_edge_events_capacity,
|
||||
default_api_runtime_edge_events_capacity()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.inline_conntrack_control,
|
||||
default_conntrack_control_enabled()
|
||||
);
|
||||
assert_eq!(server.conntrack_control.mode, ConntrackMode::default());
|
||||
assert_eq!(
|
||||
server.conntrack_control.backend,
|
||||
ConntrackBackend::default()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::default()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.pressure_high_watermark_pct,
|
||||
default_conntrack_pressure_high_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.pressure_low_watermark_pct,
|
||||
default_conntrack_pressure_low_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.delete_budget_per_sec,
|
||||
default_conntrack_delete_budget_per_sec()
|
||||
);
|
||||
|
||||
let access = AccessConfig::default();
|
||||
assert_eq!(access.users, default_access_users());
|
||||
assert_eq!(
|
||||
access.user_max_tcp_conns_global_each,
|
||||
default_user_max_tcp_conns_global_each()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proxy_protocol_trusted_cidrs_missing_uses_trust_all_but_explicit_empty_stays_empty() {
|
||||
let cfg_missing: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_missing.server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
|
||||
let cfg_explicit_empty: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
proxy_protocol_trusted_cidrs = []
|
||||
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(
|
||||
cfg_explicit_empty
|
||||
.server
|
||||
.proxy_protocol_trusted_cidrs
|
||||
.is_empty()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_sni_action_parses_and_defaults_to_drop() {
|
||||
let cfg_default: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
[censorship]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_default.censorship.unknown_sni_action,
|
||||
UnknownSniAction::Drop
|
||||
);
|
||||
|
||||
let cfg_mask: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_mask.censorship.unknown_sni_action,
|
||||
UnknownSniAction::Mask
|
||||
);
|
||||
|
||||
let cfg_accept: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
[censorship]
|
||||
unknown_sni_action = "accept"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_accept.censorship.unknown_sni_action,
|
||||
UnknownSniAction::Accept
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
@ -1764,6 +2179,26 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tg_connect_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
tg_connect = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tg_connect_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("general.tg_connect must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_proxy_req_every_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
|
|
@ -2227,6 +2662,118 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_pressure_high_watermark_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
pressure_high_watermark_pct = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_high_watermark_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains(
|
||||
"server.conntrack_control.pressure_high_watermark_pct must be within [1, 100]"
|
||||
));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_pressure_low_watermark_must_be_below_high() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
pressure_high_watermark_pct = 50
|
||||
pressure_low_watermark_pct = 50
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_low_watermark_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(
|
||||
err.contains(
|
||||
"server.conntrack_control.pressure_low_watermark_pct must be < pressure_high_watermark_pct"
|
||||
)
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_delete_budget_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
delete_budget_per_sec = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_delete_budget_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("server.conntrack_control.delete_budget_per_sec must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_hybrid_mode_requires_listener_allow_list() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
mode = "hybrid"
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_hybrid_requires_ips_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains(
|
||||
"server.conntrack_control.hybrid_listener_ips must be non-empty in mode=hybrid"
|
||||
));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_profile_is_loaded_from_config() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
profile = "aggressive"
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_profile_parse_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::Aggressive
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn force_close_default_matches_drain_ttl() {
|
||||
let toml = r#"
|
||||
|
|
@ -2340,6 +2887,94 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_defaults_are_applied() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_defaults_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.censorship.tls_fetch.profiles,
|
||||
TlsFetchConfig::default().profiles
|
||||
);
|
||||
assert!(cfg.censorship.tls_fetch.strict_route);
|
||||
assert_eq!(cfg.censorship.tls_fetch.attempt_timeout_ms, 5_000);
|
||||
assert_eq!(cfg.censorship.tls_fetch.total_budget_ms, 15_000);
|
||||
assert_eq!(cfg.censorship.tls_fetch.profile_cache_ttl_secs, 600);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_profiles_are_deduplicated_preserving_order() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
profiles = ["compat_tls12", "modern_chrome_like", "compat_tls12", "legacy_minimal"]
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_profiles_dedup_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.censorship.tls_fetch.profiles,
|
||||
vec![
|
||||
TlsFetchProfile::CompatTls12,
|
||||
TlsFetchProfile::ModernChromeLike,
|
||||
TlsFetchProfile::LegacyMinimal
|
||||
]
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_attempt_timeout_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
attempt_timeout_ms = 0
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_attempt_timeout_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("censorship.tls_fetch.attempt_timeout_ms must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_total_budget_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
total_budget_ms = 0
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_total_budget_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("censorship.tls_fetch.total_budget_ms must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_ad_tag_is_disabled_during_load() {
|
||||
let toml = r#"
|
||||
|
|
|
|||
|
|
@ -17,6 +17,28 @@ fn remove_temp_config(path: &PathBuf) {
|
|||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_timeouts_enable_apple_compatible_handshake_profile() {
|
||||
let cfg = ProxyConfig::default();
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 300);
|
||||
assert_eq!(cfg.timeouts.client_handshake, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_zero_first_byte_idle_timeout_as_legacy_opt_out() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_first_byte_idle_secs = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("config with zero first-byte idle timeout must load");
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 0);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!(
|
||||
"telemt-load-mask-prefetch-timeout-security-{nonce}.toml"
|
||||
));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_below_min_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 4
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout below minimum security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_above_max_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 51
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout above max security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_classifier_prefetch_timeout_within_bounds() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 20
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("prefetch timeout within security bounds must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_classifier_prefetch_timeout_ms, 20);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -236,3 +236,57 @@ mask_shape_above_cap_blur_max_bytes = 8
|
|||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("mask_relay_max_bytes must be > 0");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be > 0"),
|
||||
"error must explain non-zero relay cap invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_relay_max_bytes_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 67108865
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_relay_max_bytes above hard cap must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be <= 67108864"),
|
||||
"error must explain relay cap upper bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 8388608
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("valid mask_relay_max_bytes must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_relay_max_bytes, 8_388_608);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,117 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-memory-envelope-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_writer_cmd_capacity_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[general]
|
||||
me_writer_cmd_channel_capacity = 16385
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("writer command capacity above hard cap must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("general.me_writer_cmd_channel_capacity must be within [1, 16384]"),
|
||||
"error must explain writer command capacity hard cap, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_route_channel_capacity_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[general]
|
||||
me_route_channel_capacity = 8193
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("route channel capacity above hard cap must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("general.me_route_channel_capacity must be within [1, 8192]"),
|
||||
"error must explain route channel hard cap, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_c2me_channel_capacity_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[general]
|
||||
me_c2me_channel_capacity = 8193
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("c2me channel capacity above hard cap must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("general.me_c2me_channel_capacity must be within [1, 8192]"),
|
||||
"error must explain c2me channel hard cap, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_max_client_frame_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[general]
|
||||
max_client_frame = 16777217
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("max_client_frame above hard cap must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("general.max_client_frame must be within [4096, 16777216]"),
|
||||
"error must explain max_client_frame hard cap, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_memory_limits_at_hard_upper_bounds() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[general]
|
||||
me_writer_cmd_channel_capacity = 16384
|
||||
me_route_channel_capacity = 8192
|
||||
me_c2me_channel_capacity = 8192
|
||||
max_client_frame = 16777216
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("hard upper bound values must be accepted");
|
||||
assert_eq!(cfg.general.me_writer_cmd_channel_capacity, 16384);
|
||||
assert_eq!(cfg.general.me_route_channel_capacity, 8192);
|
||||
assert_eq!(cfg.general.me_c2me_channel_capacity, 8192);
|
||||
assert_eq!(cfg.general.max_client_frame, 16 * 1024 * 1024);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -429,6 +429,11 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_me2dc_fallback")]
|
||||
pub me2dc_fallback: bool,
|
||||
|
||||
/// Fast ME->Direct fallback mode for new sessions.
|
||||
/// Active only when both `use_middle_proxy=true` and `me2dc_fallback=true`.
|
||||
#[serde(default = "default_me2dc_fast")]
|
||||
pub me2dc_fast: bool,
|
||||
|
||||
/// Enable ME keepalive padding frames.
|
||||
#[serde(default = "default_true")]
|
||||
pub me_keepalive_enabled: bool,
|
||||
|
|
@ -468,7 +473,7 @@ pub struct GeneralConfig {
|
|||
pub me_c2me_send_timeout_ms: u64,
|
||||
|
||||
/// Bounded wait in milliseconds for routing ME DATA to per-connection queue.
|
||||
/// `0` keeps legacy no-wait behavior.
|
||||
/// `0` keeps non-blocking routing; values >0 enable bounded wait for compatibility.
|
||||
#[serde(default = "default_me_reader_route_data_wait_ms")]
|
||||
pub me_reader_route_data_wait_ms: u64,
|
||||
|
||||
|
|
@ -489,6 +494,14 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_me_d2c_ack_flush_immediate")]
|
||||
pub me_d2c_ack_flush_immediate: bool,
|
||||
|
||||
/// Additional bytes above strict per-user quota allowed in hot-path soft mode.
|
||||
#[serde(default = "default_me_quota_soft_overshoot_bytes")]
|
||||
pub me_quota_soft_overshoot_bytes: u64,
|
||||
|
||||
/// Shrink threshold for reusable ME->Client frame assembly buffer.
|
||||
#[serde(default = "default_me_d2c_frame_buf_shrink_threshold_bytes")]
|
||||
pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
|
||||
|
||||
/// Copy buffer size for client->DC direction in direct relay.
|
||||
#[serde(default = "default_direct_relay_copy_buf_c2s_bytes")]
|
||||
pub direct_relay_copy_buf_c2s_bytes: usize,
|
||||
|
|
@ -650,6 +663,10 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_upstream_connect_budget_ms")]
|
||||
pub upstream_connect_budget_ms: u64,
|
||||
|
||||
/// Per-attempt TCP connect timeout to Telegram DC (seconds).
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
/// Consecutive failed requests before upstream is marked unhealthy.
|
||||
#[serde(default = "default_upstream_unhealthy_fail_threshold")]
|
||||
pub upstream_unhealthy_fail_threshold: u32,
|
||||
|
|
@ -931,6 +948,7 @@ impl Default for GeneralConfig {
|
|||
middle_proxy_warm_standby: default_middle_proxy_warm_standby(),
|
||||
me_init_retry_attempts: default_me_init_retry_attempts(),
|
||||
me2dc_fallback: default_me2dc_fallback(),
|
||||
me2dc_fast: default_me2dc_fast(),
|
||||
me_keepalive_enabled: default_true(),
|
||||
me_keepalive_interval_secs: default_keepalive_interval(),
|
||||
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
||||
|
|
@ -945,6 +963,9 @@ impl Default for GeneralConfig {
|
|||
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
|
||||
me_d2c_flush_batch_max_delay_us: default_me_d2c_flush_batch_max_delay_us(),
|
||||
me_d2c_ack_flush_immediate: default_me_d2c_ack_flush_immediate(),
|
||||
me_quota_soft_overshoot_bytes: default_me_quota_soft_overshoot_bytes(),
|
||||
me_d2c_frame_buf_shrink_threshold_bytes:
|
||||
default_me_d2c_frame_buf_shrink_threshold_bytes(),
|
||||
direct_relay_copy_buf_c2s_bytes: default_direct_relay_copy_buf_c2s_bytes(),
|
||||
direct_relay_copy_buf_s2c_bytes: default_direct_relay_copy_buf_s2c_bytes(),
|
||||
me_warmup_stagger_enabled: default_true(),
|
||||
|
|
@ -990,6 +1011,7 @@ impl Default for GeneralConfig {
|
|||
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
|
||||
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
|
||||
upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
upstream_unhealthy_fail_threshold: default_upstream_unhealthy_fail_threshold(),
|
||||
upstream_connect_failfast_hard_errors: default_upstream_connect_failfast_hard_errors(),
|
||||
stun_iface_mismatch_ignore: false,
|
||||
|
|
@ -1194,6 +1216,118 @@ impl Default for ApiConfig {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackMode {
|
||||
#[default]
|
||||
Tracked,
|
||||
Notrack,
|
||||
Hybrid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackBackend {
|
||||
#[default]
|
||||
Auto,
|
||||
Nftables,
|
||||
Iptables,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackPressureProfile {
|
||||
Conservative,
|
||||
#[default]
|
||||
Balanced,
|
||||
Aggressive,
|
||||
}
|
||||
|
||||
impl ConntrackPressureProfile {
|
||||
pub fn client_first_byte_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 30,
|
||||
Self::Balanced => 20,
|
||||
Self::Aggressive => 10,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn direct_activity_timeout_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 180,
|
||||
Self::Balanced => 120,
|
||||
Self::Aggressive => 60,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn middle_soft_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 60,
|
||||
Self::Balanced => 30,
|
||||
Self::Aggressive => 20,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn middle_hard_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 180,
|
||||
Self::Balanced => 90,
|
||||
Self::Aggressive => 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConntrackControlConfig {
|
||||
/// Enables runtime conntrack-control worker for pressure mitigation.
|
||||
#[serde(default = "default_conntrack_control_enabled")]
|
||||
pub inline_conntrack_control: bool,
|
||||
|
||||
/// Conntrack mode for listener ingress traffic.
|
||||
#[serde(default)]
|
||||
pub mode: ConntrackMode,
|
||||
|
||||
/// Netfilter backend used to reconcile notrack rules.
|
||||
#[serde(default)]
|
||||
pub backend: ConntrackBackend,
|
||||
|
||||
/// Pressure profile for timeout caps under resource saturation.
|
||||
#[serde(default)]
|
||||
pub profile: ConntrackPressureProfile,
|
||||
|
||||
/// Listener IP allow-list for hybrid mode.
|
||||
/// Ignored in tracked/notrack mode.
|
||||
#[serde(default)]
|
||||
pub hybrid_listener_ips: Vec<IpAddr>,
|
||||
|
||||
/// Pressure high watermark as percentage.
|
||||
#[serde(default = "default_conntrack_pressure_high_watermark_pct")]
|
||||
pub pressure_high_watermark_pct: u8,
|
||||
|
||||
/// Pressure low watermark as percentage.
|
||||
#[serde(default = "default_conntrack_pressure_low_watermark_pct")]
|
||||
pub pressure_low_watermark_pct: u8,
|
||||
|
||||
/// Maximum conntrack delete operations per second.
|
||||
#[serde(default = "default_conntrack_delete_budget_per_sec")]
|
||||
pub delete_budget_per_sec: u64,
|
||||
}
|
||||
|
||||
impl Default for ConntrackControlConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inline_conntrack_control: default_conntrack_control_enabled(),
|
||||
mode: ConntrackMode::default(),
|
||||
backend: ConntrackBackend::default(),
|
||||
profile: ConntrackPressureProfile::default(),
|
||||
hybrid_listener_ips: Vec::new(),
|
||||
pressure_high_watermark_pct: default_conntrack_pressure_high_watermark_pct(),
|
||||
pressure_low_watermark_pct: default_conntrack_pressure_low_watermark_pct(),
|
||||
delete_budget_per_sec: default_conntrack_delete_budget_per_sec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
#[serde(default = "default_port")]
|
||||
|
|
@ -1229,9 +1363,10 @@ pub struct ServerConfig {
|
|||
|
||||
/// Trusted source CIDRs allowed to send incoming PROXY protocol headers.
|
||||
///
|
||||
/// When non-empty, connections from addresses outside this allowlist are
|
||||
/// rejected before `src_addr` is applied.
|
||||
#[serde(default)]
|
||||
/// If this field is omitted in config, it defaults to trust-all CIDRs
|
||||
/// (`0.0.0.0/0` and `::/0`). If it is explicitly set to an empty list,
|
||||
/// all PROXY protocol headers are rejected.
|
||||
#[serde(default = "default_proxy_protocol_trusted_cidrs")]
|
||||
pub proxy_protocol_trusted_cidrs: Vec<IpNetwork>,
|
||||
|
||||
/// Port for the Prometheus-compatible metrics endpoint.
|
||||
|
|
@ -1254,6 +1389,11 @@ pub struct ServerConfig {
|
|||
#[serde(default)]
|
||||
pub listeners: Vec<ListenerConfig>,
|
||||
|
||||
/// TCP `listen(2)` backlog for client-facing sockets (also used for the metrics HTTP listener).
|
||||
/// The effective queue is capped by the kernel (for example `somaxconn` on Linux).
|
||||
#[serde(default = "default_listen_backlog")]
|
||||
pub listen_backlog: u32,
|
||||
|
||||
/// Maximum number of concurrent client connections.
|
||||
/// 0 means unlimited.
|
||||
#[serde(default = "default_server_max_connections")]
|
||||
|
|
@ -1263,6 +1403,10 @@ pub struct ServerConfig {
|
|||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_accept_permit_timeout_ms")]
|
||||
pub accept_permit_timeout_ms: u64,
|
||||
|
||||
/// Runtime conntrack control and pressure policy.
|
||||
#[serde(default)]
|
||||
pub conntrack_control: ConntrackControlConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
|
|
@ -1276,20 +1420,28 @@ impl Default for ServerConfig {
|
|||
listen_tcp: None,
|
||||
proxy_protocol: false,
|
||||
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
||||
proxy_protocol_trusted_cidrs: Vec::new(),
|
||||
proxy_protocol_trusted_cidrs: default_proxy_protocol_trusted_cidrs(),
|
||||
metrics_port: None,
|
||||
metrics_listen: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
api: ApiConfig::default(),
|
||||
listeners: Vec::new(),
|
||||
listen_backlog: default_listen_backlog(),
|
||||
max_connections: default_server_max_connections(),
|
||||
accept_permit_timeout_ms: default_accept_permit_timeout_ms(),
|
||||
conntrack_control: ConntrackControlConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TimeoutsConfig {
|
||||
/// Maximum idle wait in seconds for the first client byte before handshake parsing starts.
|
||||
/// `0` disables the separate idle phase and keeps legacy timeout behavior.
|
||||
#[serde(default = "default_client_first_byte_idle_secs")]
|
||||
pub client_first_byte_idle_secs: u64,
|
||||
|
||||
/// Maximum active handshake duration in seconds after the first client byte is received.
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake: u64,
|
||||
|
||||
|
|
@ -1311,9 +1463,6 @@ pub struct TimeoutsConfig {
|
|||
#[serde(default = "default_relay_idle_grace_after_downstream_activity_secs")]
|
||||
pub relay_idle_grace_after_downstream_activity_secs: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
#[serde(default = "default_keepalive")]
|
||||
pub client_keepalive: u64,
|
||||
|
||||
|
|
@ -1332,13 +1481,13 @@ pub struct TimeoutsConfig {
|
|||
impl Default for TimeoutsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_first_byte_idle_secs: default_client_first_byte_idle_secs(),
|
||||
client_handshake: default_handshake_timeout(),
|
||||
relay_idle_policy_v2_enabled: default_relay_idle_policy_v2_enabled(),
|
||||
relay_client_idle_soft_secs: default_relay_client_idle_soft_secs(),
|
||||
relay_client_idle_hard_secs: default_relay_client_idle_hard_secs(),
|
||||
relay_idle_grace_after_downstream_activity_secs:
|
||||
default_relay_idle_grace_after_downstream_activity_secs(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack: default_ack_timeout(),
|
||||
me_one_retry: default_me_one_retry(),
|
||||
|
|
@ -1347,6 +1496,91 @@ impl Default for TimeoutsConfig {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UnknownSniAction {
|
||||
#[default]
|
||||
Drop,
|
||||
Mask,
|
||||
Accept,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum TlsFetchProfile {
|
||||
ModernChromeLike,
|
||||
ModernFirefoxLike,
|
||||
CompatTls12,
|
||||
LegacyMinimal,
|
||||
}
|
||||
|
||||
impl TlsFetchProfile {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
TlsFetchProfile::ModernChromeLike => "modern_chrome_like",
|
||||
TlsFetchProfile::ModernFirefoxLike => "modern_firefox_like",
|
||||
TlsFetchProfile::CompatTls12 => "compat_tls12",
|
||||
TlsFetchProfile::LegacyMinimal => "legacy_minimal",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_tls_fetch_profiles() -> Vec<TlsFetchProfile> {
|
||||
vec![
|
||||
TlsFetchProfile::ModernChromeLike,
|
||||
TlsFetchProfile::ModernFirefoxLike,
|
||||
TlsFetchProfile::CompatTls12,
|
||||
TlsFetchProfile::LegacyMinimal,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsFetchConfig {
|
||||
/// Ordered list of ClientHello profiles used for adaptive fallback.
|
||||
#[serde(default = "default_tls_fetch_profiles")]
|
||||
pub profiles: Vec<TlsFetchProfile>,
|
||||
|
||||
/// When true and upstream route is configured, TLS fetch fails closed on
|
||||
/// upstream connect errors and does not fallback to direct TCP.
|
||||
#[serde(default = "default_tls_fetch_strict_route")]
|
||||
pub strict_route: bool,
|
||||
|
||||
/// Timeout per one profile attempt in milliseconds.
|
||||
#[serde(default = "default_tls_fetch_attempt_timeout_ms")]
|
||||
pub attempt_timeout_ms: u64,
|
||||
|
||||
/// Total wall-clock budget in milliseconds across all profile attempts.
|
||||
#[serde(default = "default_tls_fetch_total_budget_ms")]
|
||||
pub total_budget_ms: u64,
|
||||
|
||||
/// Adds GREASE-style values into selected ClientHello extensions.
|
||||
#[serde(default)]
|
||||
pub grease_enabled: bool,
|
||||
|
||||
/// Produces deterministic ClientHello randomness for debugging/tests.
|
||||
#[serde(default)]
|
||||
pub deterministic: bool,
|
||||
|
||||
/// TTL for winner-profile cache entries in seconds.
|
||||
/// Set to 0 to disable profile cache.
|
||||
#[serde(default = "default_tls_fetch_profile_cache_ttl_secs")]
|
||||
pub profile_cache_ttl_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for TlsFetchConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
profiles: default_tls_fetch_profiles(),
|
||||
strict_route: default_tls_fetch_strict_route(),
|
||||
attempt_timeout_ms: default_tls_fetch_attempt_timeout_ms(),
|
||||
total_budget_ms: default_tls_fetch_total_budget_ms(),
|
||||
grease_enabled: false,
|
||||
deterministic: false,
|
||||
profile_cache_ttl_secs: default_tls_fetch_profile_cache_ttl_secs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AntiCensorshipConfig {
|
||||
#[serde(default = "default_tls_domain")]
|
||||
|
|
@ -1356,11 +1590,19 @@ pub struct AntiCensorshipConfig {
|
|||
#[serde(default)]
|
||||
pub tls_domains: Vec<String>,
|
||||
|
||||
/// Policy for TLS ClientHello with unknown (non-configured) SNI.
|
||||
#[serde(default)]
|
||||
pub unknown_sni_action: UnknownSniAction,
|
||||
|
||||
/// Upstream scope used for TLS front metadata fetches.
|
||||
/// Empty value keeps default upstream routing behavior.
|
||||
#[serde(default = "default_tls_fetch_scope")]
|
||||
pub tls_fetch_scope: String,
|
||||
|
||||
/// Fetch strategy for TLS front metadata bootstrap and periodic refresh.
|
||||
#[serde(default)]
|
||||
pub tls_fetch: TlsFetchConfig,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
|
|
@ -1440,6 +1682,14 @@ pub struct AntiCensorshipConfig {
|
|||
#[serde(default = "default_mask_shape_above_cap_blur_max_bytes")]
|
||||
pub mask_shape_above_cap_blur_max_bytes: usize,
|
||||
|
||||
/// Maximum bytes relayed per direction on unauthenticated masking fallback paths.
|
||||
#[serde(default = "default_mask_relay_max_bytes")]
|
||||
pub mask_relay_max_bytes: usize,
|
||||
|
||||
/// Prefetch timeout (ms) for extending fragmented masking classifier window.
|
||||
#[serde(default = "default_mask_classifier_prefetch_timeout_ms")]
|
||||
pub mask_classifier_prefetch_timeout_ms: u64,
|
||||
|
||||
/// Enable outcome-time normalization envelope for masking fallback.
|
||||
#[serde(default = "default_mask_timing_normalization_enabled")]
|
||||
pub mask_timing_normalization_enabled: bool,
|
||||
|
|
@ -1458,7 +1708,9 @@ impl Default for AntiCensorshipConfig {
|
|||
Self {
|
||||
tls_domain: default_tls_domain(),
|
||||
tls_domains: Vec::new(),
|
||||
unknown_sni_action: UnknownSniAction::Drop,
|
||||
tls_fetch_scope: default_tls_fetch_scope(),
|
||||
tls_fetch: TlsFetchConfig::default(),
|
||||
mask: default_true(),
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
|
|
@ -1478,6 +1730,8 @@ impl Default for AntiCensorshipConfig {
|
|||
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
|
||||
mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),
|
||||
mask_shape_above_cap_blur_max_bytes: default_mask_shape_above_cap_blur_max_bytes(),
|
||||
mask_relay_max_bytes: default_mask_relay_max_bytes(),
|
||||
mask_classifier_prefetch_timeout_ms: default_mask_classifier_prefetch_timeout_ms(),
|
||||
mask_timing_normalization_enabled: default_mask_timing_normalization_enabled(),
|
||||
mask_timing_normalization_floor_ms: default_mask_timing_normalization_floor_ms(),
|
||||
mask_timing_normalization_ceiling_ms: default_mask_timing_normalization_ceiling_ms(),
|
||||
|
|
@ -1497,6 +1751,12 @@ pub struct AccessConfig {
|
|||
#[serde(default)]
|
||||
pub user_max_tcp_conns: HashMap<String, usize>,
|
||||
|
||||
/// Global per-user TCP connection limit applied when a user has no
|
||||
/// positive individual override.
|
||||
/// `0` disables the inherited limit.
|
||||
#[serde(default = "default_user_max_tcp_conns_global_each")]
|
||||
pub user_max_tcp_conns_global_each: usize,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
||||
|
||||
|
|
@ -1533,6 +1793,7 @@ impl Default for AccessConfig {
|
|||
users: default_access_users(),
|
||||
user_ad_tags: HashMap::new(),
|
||||
user_max_tcp_conns: HashMap::new(),
|
||||
user_max_tcp_conns_global_each: default_user_max_tcp_conns_global_each(),
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
user_max_unique_ips: HashMap::new(),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,755 @@
|
|||
use std::collections::BTreeSet;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::{ConntrackBackend, ConntrackMode, ProxyConfig};
|
||||
use crate::proxy::middle_relay::note_global_relay_pressure;
|
||||
use crate::proxy::shared_state::{ConntrackCloseEvent, ConntrackCloseReason, ProxySharedState};
|
||||
use crate::stats::Stats;
|
||||
|
||||
const CONNTRACK_EVENT_QUEUE_CAPACITY: usize = 32_768;
|
||||
const PRESSURE_RELEASE_TICKS: u8 = 3;
|
||||
const PRESSURE_SAMPLE_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum NetfilterBackend {
|
||||
Nftables,
|
||||
Iptables,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct PressureSample {
|
||||
conn_pct: Option<u8>,
|
||||
fd_pct: Option<u8>,
|
||||
accept_timeout_delta: u64,
|
||||
me_queue_pressure_delta: u64,
|
||||
}
|
||||
|
||||
struct PressureState {
|
||||
active: bool,
|
||||
low_streak: u8,
|
||||
prev_accept_timeout_total: u64,
|
||||
prev_me_queue_pressure_total: u64,
|
||||
}
|
||||
|
||||
impl PressureState {
|
||||
fn new(stats: &Stats) -> Self {
|
||||
Self {
|
||||
active: false,
|
||||
low_streak: 0,
|
||||
prev_accept_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
prev_me_queue_pressure_total: stats.get_me_c2me_send_full_total(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn spawn_conntrack_controller(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) {
|
||||
if !cfg!(target_os = "linux") {
|
||||
let enabled = config_rx
|
||||
.borrow()
|
||||
.server
|
||||
.conntrack_control
|
||||
.inline_conntrack_control;
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_event_queue_depth(0);
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
if enabled {
|
||||
warn!(
|
||||
"conntrack control is configured but unsupported on this OS; disabling runtime worker"
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel(CONNTRACK_EVENT_QUEUE_CAPACITY);
|
||||
shared.set_conntrack_close_sender(tx);
|
||||
tokio::spawn(async move {
|
||||
run_conntrack_controller(config_rx, stats, shared, rx).await;
|
||||
});
|
||||
}
|
||||
|
||||
async fn run_conntrack_controller(
|
||||
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
mut close_rx: mpsc::Receiver<ConntrackCloseEvent>,
|
||||
) {
|
||||
let mut cfg = config_rx.borrow().clone();
|
||||
let mut pressure_state = PressureState::new(stats.as_ref());
|
||||
let mut delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
let mut backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
|
||||
apply_runtime_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
backend.is_some(),
|
||||
false,
|
||||
);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
changed = config_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
break;
|
||||
}
|
||||
cfg = config_rx.borrow_and_update().clone();
|
||||
backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
apply_runtime_state(stats.as_ref(), shared.as_ref(), &cfg, backend.is_some(), pressure_state.active);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
}
|
||||
event = close_rx.recv() => {
|
||||
let Some(event) = event else {
|
||||
break;
|
||||
};
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
continue;
|
||||
}
|
||||
if !pressure_state.active {
|
||||
continue;
|
||||
}
|
||||
if !matches!(event.reason, ConntrackCloseReason::Timeout | ConntrackCloseReason::Pressure | ConntrackCloseReason::Reset) {
|
||||
continue;
|
||||
}
|
||||
if delete_budget_tokens == 0 {
|
||||
continue;
|
||||
}
|
||||
stats.increment_conntrack_delete_attempt_total();
|
||||
match delete_conntrack_entry(event).await {
|
||||
DeleteOutcome::Deleted => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_success_total();
|
||||
}
|
||||
DeleteOutcome::NotFound => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_not_found_total();
|
||||
}
|
||||
DeleteOutcome::Error => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_error_total();
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::time::sleep(PRESSURE_SAMPLE_INTERVAL) => {
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
let sample = collect_pressure_sample(stats.as_ref(), &cfg, &mut pressure_state);
|
||||
update_pressure_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
&sample,
|
||||
&mut pressure_state,
|
||||
);
|
||||
if pressure_state.active {
|
||||
note_global_relay_pressure(shared.as_ref());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
}
|
||||
|
||||
fn apply_runtime_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
backend_available: bool,
|
||||
pressure_active: bool,
|
||||
) {
|
||||
let enabled = cfg.server.conntrack_control.inline_conntrack_control;
|
||||
let available = enabled && backend_available && has_cap_net_admin();
|
||||
if enabled && !available {
|
||||
warn!(
|
||||
"conntrack control enabled but unavailable (missing CAP_NET_ADMIN or backend binaries)"
|
||||
);
|
||||
}
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(available);
|
||||
shared.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
stats.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
}
|
||||
|
||||
fn collect_pressure_sample(
|
||||
stats: &Stats,
|
||||
cfg: &ProxyConfig,
|
||||
state: &mut PressureState,
|
||||
) -> PressureSample {
|
||||
let current_connections = stats.get_current_connections_total();
|
||||
let conn_pct = if cfg.server.max_connections == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
((current_connections.saturating_mul(100)) / u64::from(cfg.server.max_connections))
|
||||
.min(100) as u8,
|
||||
)
|
||||
};
|
||||
|
||||
let fd_pct = fd_usage_pct();
|
||||
|
||||
let accept_total = stats.get_accept_permit_timeout_total();
|
||||
let accept_delta = accept_total.saturating_sub(state.prev_accept_timeout_total);
|
||||
state.prev_accept_timeout_total = accept_total;
|
||||
|
||||
let me_total = stats.get_me_c2me_send_full_total();
|
||||
let me_delta = me_total.saturating_sub(state.prev_me_queue_pressure_total);
|
||||
state.prev_me_queue_pressure_total = me_total;
|
||||
|
||||
PressureSample {
|
||||
conn_pct,
|
||||
fd_pct,
|
||||
accept_timeout_delta: accept_delta,
|
||||
me_queue_pressure_delta: me_delta,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_pressure_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
sample: &PressureSample,
|
||||
state: &mut PressureState,
|
||||
) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
if state.active {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated (feature disabled)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let high = cfg.server.conntrack_control.pressure_high_watermark_pct;
|
||||
let low = cfg.server.conntrack_control.pressure_low_watermark_pct;
|
||||
|
||||
let high_hit = sample.conn_pct.is_some_and(|v| v >= high)
|
||||
|| sample.fd_pct.is_some_and(|v| v >= high)
|
||||
|| sample.accept_timeout_delta > 0
|
||||
|| sample.me_queue_pressure_delta > 0;
|
||||
|
||||
let low_clear = sample.conn_pct.is_none_or(|v| v <= low)
|
||||
&& sample.fd_pct.is_none_or(|v| v <= low)
|
||||
&& sample.accept_timeout_delta == 0
|
||||
&& sample.me_queue_pressure_delta == 0;
|
||||
|
||||
if !state.active && high_hit {
|
||||
state.active = true;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(true);
|
||||
stats.set_conntrack_pressure_active(true);
|
||||
info!(
|
||||
conn_pct = ?sample.conn_pct,
|
||||
fd_pct = ?sample.fd_pct,
|
||||
accept_timeout_delta = sample.accept_timeout_delta,
|
||||
me_queue_pressure_delta = sample.me_queue_pressure_delta,
|
||||
"Conntrack pressure mode activated"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if state.active && low_clear {
|
||||
state.low_streak = state.low_streak.saturating_add(1);
|
||||
if state.low_streak >= PRESSURE_RELEASE_TICKS {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
state.low_streak = 0;
|
||||
}
|
||||
|
||||
async fn reconcile_rules(cfg: &ProxyConfig, backend: Option<NetfilterBackend>, stats: &Stats) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
clear_notrack_rules_all_backends().await;
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if !has_cap_net_admin() {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(backend) = backend else {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
};
|
||||
|
||||
let apply_result = match backend {
|
||||
NetfilterBackend::Nftables => apply_nft_rules(cfg).await,
|
||||
NetfilterBackend::Iptables => apply_iptables_rules(cfg).await,
|
||||
};
|
||||
|
||||
if let Err(error) = apply_result {
|
||||
warn!(error = %error, "Failed to reconcile conntrack/notrack rules");
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
} else {
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
fn pick_backend(configured: ConntrackBackend) -> Option<NetfilterBackend> {
|
||||
match configured {
|
||||
ConntrackBackend::Auto => {
|
||||
if command_exists("nft") {
|
||||
Some(NetfilterBackend::Nftables)
|
||||
} else if command_exists("iptables") {
|
||||
Some(NetfilterBackend::Iptables)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
ConntrackBackend::Nftables => command_exists("nft").then_some(NetfilterBackend::Nftables),
|
||||
ConntrackBackend::Iptables => {
|
||||
command_exists("iptables").then_some(NetfilterBackend::Iptables)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn command_exists(binary: &str) -> bool {
|
||||
let Some(path_var) = std::env::var_os("PATH") else {
|
||||
return false;
|
||||
};
|
||||
std::env::split_paths(&path_var).any(|dir| {
|
||||
let candidate: PathBuf = dir.join(binary);
|
||||
candidate.exists() && candidate.is_file()
|
||||
})
|
||||
}
|
||||
|
||||
fn notrack_targets(cfg: &ProxyConfig) -> (Vec<Option<IpAddr>>, Vec<Option<IpAddr>>) {
|
||||
let mode = cfg.server.conntrack_control.mode;
|
||||
let mut v4_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
let mut v6_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
|
||||
match mode {
|
||||
ConntrackMode::Tracked => {}
|
||||
ConntrackMode::Notrack => {
|
||||
if cfg.server.listeners.is_empty() {
|
||||
if let Some(ipv4) = cfg
|
||||
.server
|
||||
.listen_addr_ipv4
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv4.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(ipv4));
|
||||
}
|
||||
}
|
||||
if let Some(ipv6) = cfg
|
||||
.server
|
||||
.listen_addr_ipv6
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv6.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(ipv6));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for listener in &cfg.server.listeners {
|
||||
if listener.ip.is_ipv4() {
|
||||
if listener.ip.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(listener.ip));
|
||||
}
|
||||
} else if listener.ip.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(listener.ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ConntrackMode::Hybrid => {
|
||||
for ip in &cfg.server.conntrack_control.hybrid_listener_ips {
|
||||
if ip.is_ipv4() {
|
||||
v4_targets.insert(Some(*ip));
|
||||
} else {
|
||||
v6_targets.insert(Some(*ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
v4_targets.into_iter().collect(),
|
||||
v6_targets.into_iter().collect(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn apply_nft_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let mut rules = Vec::new();
|
||||
for ip in v4_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
for ip in v6_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip6 daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
|
||||
let rule_blob = if rules.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}\n", rules.join("\n "))
|
||||
};
|
||||
let script = format!(
|
||||
"table inet telemt_conntrack {{\n chain preraw {{\n type filter hook prerouting priority raw; policy accept;\n{rule_blob} }}\n}}\n"
|
||||
);
|
||||
run_command("nft", &["-f", "-"], Some(script)).await
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
apply_iptables_rules_for_binary("iptables", cfg, true).await?;
|
||||
apply_iptables_rules_for_binary("ip6tables", cfg, false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules_for_binary(
|
||||
binary: &str,
|
||||
cfg: &ProxyConfig,
|
||||
ipv4: bool,
|
||||
) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Ok(());
|
||||
}
|
||||
let chain = "TELEMT_NOTRACK";
|
||||
let _ = run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-F", chain], None).await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-X", chain], None).await;
|
||||
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
run_command(binary, &["-t", "raw", "-N", chain], None).await?;
|
||||
run_command(binary, &["-t", "raw", "-F", chain], None).await?;
|
||||
if run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-C", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-I", "PREROUTING", "1", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let selected = if ipv4 { v4_targets } else { v6_targets };
|
||||
for ip in selected {
|
||||
let mut args = vec![
|
||||
"-t".to_string(),
|
||||
"raw".to_string(),
|
||||
"-A".to_string(),
|
||||
chain.to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"--dport".to_string(),
|
||||
cfg.server.port.to_string(),
|
||||
];
|
||||
if let Some(ip) = ip {
|
||||
args.push("-d".to_string());
|
||||
args.push(ip.to_string());
|
||||
}
|
||||
args.push("-j".to_string());
|
||||
args.push("CT".to_string());
|
||||
args.push("--notrack".to_string());
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
run_command(binary, &arg_refs, None).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_notrack_rules_all_backends() {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(
|
||||
"iptables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command(
|
||||
"ip6tables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
}
|
||||
|
||||
enum DeleteOutcome {
|
||||
Deleted,
|
||||
NotFound,
|
||||
Error,
|
||||
}
|
||||
|
||||
async fn delete_conntrack_entry(event: ConntrackCloseEvent) -> DeleteOutcome {
|
||||
if !command_exists("conntrack") {
|
||||
return DeleteOutcome::Error;
|
||||
}
|
||||
let args = vec![
|
||||
"-D".to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"-s".to_string(),
|
||||
event.src.ip().to_string(),
|
||||
"--sport".to_string(),
|
||||
event.src.port().to_string(),
|
||||
"-d".to_string(),
|
||||
event.dst.ip().to_string(),
|
||||
"--dport".to_string(),
|
||||
event.dst.port().to_string(),
|
||||
];
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
match run_command("conntrack", &arg_refs, None).await {
|
||||
Ok(()) => DeleteOutcome::Deleted,
|
||||
Err(error) => {
|
||||
if error.contains("0 flow entries have been deleted") {
|
||||
DeleteOutcome::NotFound
|
||||
} else {
|
||||
debug!(error = %error, "conntrack delete failed");
|
||||
DeleteOutcome::Error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_command(binary: &str, args: &[&str], stdin: Option<String>) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Err(format!("{binary} is not available"));
|
||||
}
|
||||
let mut command = Command::new(binary);
|
||||
command.args(args);
|
||||
if stdin.is_some() {
|
||||
command.stdin(std::process::Stdio::piped());
|
||||
}
|
||||
command.stdout(std::process::Stdio::null());
|
||||
command.stderr(std::process::Stdio::piped());
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.map_err(|e| format!("spawn {binary} failed: {e}"))?;
|
||||
if let Some(blob) = stdin
|
||||
&& let Some(mut writer) = child.stdin.take()
|
||||
{
|
||||
writer
|
||||
.write_all(blob.as_bytes())
|
||||
.await
|
||||
.map_err(|e| format!("stdin write {binary} failed: {e}"))?;
|
||||
}
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.map_err(|e| format!("wait {binary} failed: {e}"))?;
|
||||
if output.status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
|
||||
Err(if stderr.is_empty() {
|
||||
format!("{binary} exited with status {}", output.status)
|
||||
} else {
|
||||
stderr
|
||||
})
|
||||
}
|
||||
|
||||
fn fd_usage_pct() -> Option<u8> {
|
||||
let soft_limit = nofile_soft_limit()?;
|
||||
if soft_limit == 0 {
|
||||
return None;
|
||||
}
|
||||
let fd_count = std::fs::read_dir("/proc/self/fd").ok()?.count() as u64;
|
||||
Some(((fd_count.saturating_mul(100)) / soft_limit).min(100) as u8)
|
||||
}
|
||||
|
||||
fn nofile_soft_limit() -> Option<u64> {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let mut lim = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
|
||||
if rc != 0 {
|
||||
return None;
|
||||
}
|
||||
return Some(lim.rlim_cur);
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn has_cap_net_admin() -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let Ok(status) = std::fs::read_to_string("/proc/self/status") else {
|
||||
return false;
|
||||
};
|
||||
for line in status.lines() {
|
||||
if let Some(raw) = line.strip_prefix("CapEff:") {
|
||||
let caps = raw.trim();
|
||||
if let Ok(bits) = u64::from_str_radix(caps, 16) {
|
||||
const CAP_NET_ADMIN_BIT: u64 = 12;
|
||||
return (bits & (1u64 << CAP_NET_ADMIN_BIT)) != 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
#[test]
|
||||
fn pressure_activates_on_accept_timeout_spike() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 1,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(state.active);
|
||||
assert!(shared.conntrack_pressure_active());
|
||||
assert!(stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_releases_after_hysteresis_window() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
|
||||
let high_sample = PressureSample {
|
||||
conn_pct: Some(95),
|
||||
fd_pct: Some(95),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &high_sample, &mut state);
|
||||
assert!(state.active);
|
||||
|
||||
let low_sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_does_not_activate_when_disabled() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = false;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(100),
|
||||
fd_pct: Some(100),
|
||||
accept_timeout_delta: 10,
|
||||
me_queue_pressure_delta: 10,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,577 @@
|
|||
//! Unix daemon support for telemt.
|
||||
//!
|
||||
//! Provides classic Unix daemonization (double-fork), PID file management,
|
||||
//! and privilege dropping for running telemt as a background service.
|
||||
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use nix::fcntl::{Flock, FlockArg};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid, chdir, close, fork, getpid, setsid};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Default PID file location.
|
||||
pub const DEFAULT_PID_FILE: &str = "/var/run/telemt.pid";
|
||||
|
||||
/// Daemon configuration options parsed from CLI.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DaemonOptions {
|
||||
/// Run as daemon (fork to background).
|
||||
pub daemonize: bool,
|
||||
/// Path to PID file.
|
||||
pub pid_file: Option<PathBuf>,
|
||||
/// User to run as after binding sockets.
|
||||
pub user: Option<String>,
|
||||
/// Group to run as after binding sockets.
|
||||
pub group: Option<String>,
|
||||
/// Working directory for the daemon.
|
||||
pub working_dir: Option<PathBuf>,
|
||||
/// Explicit foreground mode (for systemd Type=simple).
|
||||
pub foreground: bool,
|
||||
}
|
||||
|
||||
impl DaemonOptions {
|
||||
/// Returns the effective PID file path.
|
||||
pub fn pid_file_path(&self) -> &Path {
|
||||
self.pid_file
|
||||
.as_deref()
|
||||
.unwrap_or(Path::new(DEFAULT_PID_FILE))
|
||||
}
|
||||
|
||||
/// Returns true if we should actually daemonize.
|
||||
/// Foreground flag takes precedence.
|
||||
pub fn should_daemonize(&self) -> bool {
|
||||
self.daemonize && !self.foreground
|
||||
}
|
||||
}
|
||||
|
||||
/// Error types for daemon operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum DaemonError {
|
||||
#[error("fork failed: {0}")]
|
||||
ForkFailed(#[source] nix::Error),
|
||||
|
||||
#[error("setsid failed: {0}")]
|
||||
SetsidFailed(#[source] nix::Error),
|
||||
|
||||
#[error("chdir failed: {0}")]
|
||||
ChdirFailed(#[source] nix::Error),
|
||||
|
||||
#[error("failed to open /dev/null: {0}")]
|
||||
DevNullFailed(#[source] io::Error),
|
||||
|
||||
#[error("failed to redirect stdio: {0}")]
|
||||
RedirectFailed(#[source] nix::Error),
|
||||
|
||||
#[error("PID file error: {0}")]
|
||||
PidFile(String),
|
||||
|
||||
#[error("another instance is already running (pid {0})")]
|
||||
AlreadyRunning(i32),
|
||||
|
||||
#[error("user '{0}' not found")]
|
||||
UserNotFound(String),
|
||||
|
||||
#[error("group '{0}' not found")]
|
||||
GroupNotFound(String),
|
||||
|
||||
#[error("failed to set uid/gid: {0}")]
|
||||
PrivilegeDrop(#[source] nix::Error),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
/// Result of a successful daemonize() call.
|
||||
#[derive(Debug)]
|
||||
pub enum DaemonizeResult {
|
||||
/// We are the parent process and should exit.
|
||||
Parent,
|
||||
/// We are the daemon child process and should continue.
|
||||
Child,
|
||||
}
|
||||
|
||||
/// Performs classic Unix double-fork daemonization.
|
||||
///
|
||||
/// This detaches the process from the controlling terminal:
|
||||
/// 1. First fork - parent exits, child continues
|
||||
/// 2. setsid() - become session leader
|
||||
/// 3. Second fork - ensure we can never acquire a controlling terminal
|
||||
/// 4. chdir("/") - don't hold any directory open
|
||||
/// 5. Redirect stdin/stdout/stderr to /dev/null
|
||||
///
|
||||
/// Returns `DaemonizeResult::Parent` in the original parent (which should exit),
|
||||
/// or `DaemonizeResult::Child` in the final daemon child.
|
||||
pub fn daemonize(working_dir: Option<&Path>) -> Result<DaemonizeResult, DaemonError> {
|
||||
// First fork
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Parent exits
|
||||
return Ok(DaemonizeResult::Parent);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Create new session, become session leader
|
||||
setsid().map_err(DaemonError::SetsidFailed)?;
|
||||
|
||||
// Second fork to ensure we can never acquire a controlling terminal
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Intermediate parent exits
|
||||
std::process::exit(0);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Final daemon child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Change working directory
|
||||
let target_dir = working_dir.unwrap_or(Path::new("/"));
|
||||
chdir(target_dir).map_err(DaemonError::ChdirFailed)?;
|
||||
|
||||
// Redirect stdin, stdout, stderr to /dev/null
|
||||
redirect_stdio_to_devnull()?;
|
||||
|
||||
Ok(DaemonizeResult::Child)
|
||||
}
|
||||
|
||||
/// Redirects stdin, stdout, and stderr to /dev/null.
|
||||
fn redirect_stdio_to_devnull() -> Result<(), DaemonError> {
|
||||
let devnull = File::options()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.open("/dev/null")
|
||||
.map_err(DaemonError::DevNullFailed)?;
|
||||
|
||||
let devnull_fd = std::os::unix::io::AsRawFd::as_raw_fd(&devnull);
|
||||
|
||||
// Use libc::dup2 directly for redirecting standard file descriptors
|
||||
// nix 0.31's dup2 requires OwnedFd which doesn't work well with stdio fds
|
||||
unsafe {
|
||||
// Redirect stdin (fd 0)
|
||||
if libc::dup2(devnull_fd, 0) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stdout (fd 1)
|
||||
if libc::dup2(devnull_fd, 1) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stderr (fd 2)
|
||||
if libc::dup2(devnull_fd, 2) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
}
|
||||
|
||||
// Close original devnull fd if it's not one of the standard fds
|
||||
if devnull_fd > 2 {
|
||||
let _ = close(devnull_fd);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// PID file manager with flock-based locking.
|
||||
pub struct PidFile {
|
||||
path: PathBuf,
|
||||
file: Option<File>,
|
||||
locked: bool,
|
||||
}
|
||||
|
||||
impl PidFile {
|
||||
/// Creates a new PID file manager for the given path.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
file: None,
|
||||
locked: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if another instance is already running.
|
||||
///
|
||||
/// Returns the PID of the running instance if one exists.
|
||||
pub fn check_running(&self) -> Result<Option<i32>, DaemonError> {
|
||||
if !self.path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Try to read existing PID
|
||||
let mut contents = String::new();
|
||||
File::open(&self.path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot read {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
let pid: i32 = contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", self.path.display())))?;
|
||||
|
||||
// Check if process is still running
|
||||
if is_process_running(pid) {
|
||||
Ok(Some(pid))
|
||||
} else {
|
||||
// Stale PID file
|
||||
debug!(pid, path = %self.path.display(), "Removing stale PID file");
|
||||
let _ = fs::remove_file(&self.path);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires the PID file lock and writes the current PID.
|
||||
///
|
||||
/// Fails if another instance is already running.
|
||||
pub fn acquire(&mut self) -> Result<(), DaemonError> {
|
||||
// Check for running instance first
|
||||
if let Some(pid) = self.check_running()? {
|
||||
return Err(DaemonError::AlreadyRunning(pid));
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = self.path.parent() {
|
||||
if !parent.exists() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot create directory {}: {}",
|
||||
parent.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
// Open/create PID file with exclusive lock
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.mode(0o644)
|
||||
.open(&self.path)
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot open {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
// Try to acquire exclusive lock (non-blocking)
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
// Check if another instance grabbed the lock
|
||||
if let Some(pid) = self.check_running().ok().flatten() {
|
||||
DaemonError::AlreadyRunning(pid)
|
||||
} else {
|
||||
DaemonError::PidFile(format!("cannot lock {}: {}", self.path.display(), errno))
|
||||
}
|
||||
})?;
|
||||
|
||||
// Write our PID
|
||||
let pid = getpid();
|
||||
let mut file = flock
|
||||
.unlock()
|
||||
.map_err(|(_, errno)| DaemonError::PidFile(format!("unlock failed: {}", errno)))?;
|
||||
|
||||
writeln!(file, "{}", pid).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot write PID to {}: {}",
|
||||
self.path.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Re-acquire lock and keep it
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("cannot re-lock {}: {}", self.path.display(), errno))
|
||||
})?;
|
||||
|
||||
self.file = Some(flock.unlock().map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("unlock for storage failed: {}", errno))
|
||||
})?);
|
||||
self.locked = true;
|
||||
|
||||
info!(pid = pid.as_raw(), path = %self.path.display(), "PID file created");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Releases the PID file lock and removes the file.
|
||||
pub fn release(&mut self) -> Result<(), DaemonError> {
|
||||
if let Some(file) = self.file.take() {
|
||||
drop(file);
|
||||
}
|
||||
self.locked = false;
|
||||
|
||||
if self.path.exists() {
|
||||
fs::remove_file(&self.path).map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot remove {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
debug!(path = %self.path.display(), "PID file removed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path to this PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PidFile {
|
||||
fn drop(&mut self) {
|
||||
if self.locked {
|
||||
if let Err(e) = self.release() {
|
||||
warn!(error = %e, "Failed to clean up PID file on drop");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a process with the given PID is running.
|
||||
fn is_process_running(pid: i32) -> bool {
|
||||
// kill(pid, 0) checks if process exists without sending a signal
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), None).is_ok()
|
||||
}
|
||||
|
||||
/// Drops privileges to the specified user and group.
|
||||
///
|
||||
/// This should be called after binding privileged ports but before entering
|
||||
/// the main event loop.
|
||||
pub fn drop_privileges(
|
||||
user: Option<&str>,
|
||||
group: Option<&str>,
|
||||
pid_file: Option<&PidFile>,
|
||||
) -> Result<(), DaemonError> {
|
||||
let target_gid = if let Some(group_name) = group {
|
||||
Some(lookup_group(group_name)?)
|
||||
} else if let Some(user_name) = user {
|
||||
Some(lookup_user_primary_gid(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let target_uid = if let Some(user_name) = user {
|
||||
Some(lookup_user(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if (target_uid.is_some() || target_gid.is_some())
|
||||
&& let Some(file) = pid_file.and_then(|pid| pid.file.as_ref())
|
||||
{
|
||||
unistd::fchown(file, target_uid, target_gid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
}
|
||||
|
||||
if let Some(gid) = target_gid {
|
||||
unistd::setgid(gid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
unistd::setgroups(&[gid]).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(gid = gid.as_raw(), "Dropped group privileges");
|
||||
}
|
||||
|
||||
if let Some(uid) = target_uid {
|
||||
unistd::setuid(uid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(uid = uid.as_raw(), "Dropped user privileges");
|
||||
|
||||
if uid.as_raw() != 0
|
||||
&& let Some(pid) = pid_file
|
||||
{
|
||||
let parent = pid.path.parent().unwrap_or(Path::new("."));
|
||||
let probe_path = parent.join(format!(
|
||||
".telemt_pid_probe_{}_{}",
|
||||
std::process::id(),
|
||||
getpid().as_raw()
|
||||
));
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
.mode(0o600)
|
||||
.open(&probe_path)
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot create probe in PID directory {} as uid {} (pid cleanup will fail): {}",
|
||||
parent.display(),
|
||||
uid.as_raw(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
fs::remove_file(&probe_path).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot remove probe in PID directory {} as uid {} (pid cleanup will fail): {}",
|
||||
parent.display(),
|
||||
uid.as_raw(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Looks up a user by name and returns their UID.
|
||||
fn lookup_user(name: &str) -> Result<Uid, DaemonError> {
|
||||
// Use libc getpwnam
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Uid::from_raw((*pwd).pw_uid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a user's primary GID by username.
|
||||
fn lookup_user_primary_gid(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*pwd).pw_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a group by name and returns its GID.
|
||||
fn lookup_group(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::GroupNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let grp = libc::getgrnam(c_name.as_ptr());
|
||||
if grp.is_null() {
|
||||
Err(DaemonError::GroupNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*grp).gr_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads PID from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn read_pid_file<P: AsRef<Path>>(path: P) -> Result<i32, DaemonError> {
|
||||
let path = path.as_ref();
|
||||
let mut contents = String::new();
|
||||
File::open(path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot read {}: {}", path.display(), e)))?;
|
||||
|
||||
contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", path.display())))
|
||||
}
|
||||
|
||||
/// Sends a signal to the process specified in a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn signal_pid_file<P: AsRef<Path>>(
|
||||
path: P,
|
||||
signal: nix::sys::signal::Signal,
|
||||
) -> Result<(), DaemonError> {
|
||||
let pid = read_pid_file(&path)?;
|
||||
|
||||
if !is_process_running(pid) {
|
||||
return Err(DaemonError::PidFile(format!(
|
||||
"process {} from {} is not running",
|
||||
pid,
|
||||
path.as_ref().display()
|
||||
)));
|
||||
}
|
||||
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), signal)
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot signal process {}: {}", pid, e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the status of the daemon based on PID file.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DaemonStatus {
|
||||
/// Daemon is running with the given PID.
|
||||
Running(i32),
|
||||
/// PID file exists but process is not running.
|
||||
Stale(i32),
|
||||
/// No PID file exists.
|
||||
NotRunning,
|
||||
}
|
||||
|
||||
/// Checks the daemon status from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn check_status<P: AsRef<Path>>(path: P) -> DaemonStatus {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
return DaemonStatus::NotRunning;
|
||||
}
|
||||
|
||||
match read_pid_file(path) {
|
||||
Ok(pid) => {
|
||||
if is_process_running(pid) {
|
||||
DaemonStatus::Running(pid)
|
||||
} else {
|
||||
DaemonStatus::Stale(pid)
|
||||
}
|
||||
}
|
||||
Err(_) => DaemonStatus::NotRunning,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_default() {
|
||||
let opts = DaemonOptions::default();
|
||||
assert!(!opts.daemonize);
|
||||
assert!(!opts.should_daemonize());
|
||||
assert_eq!(opts.pid_file_path(), Path::new(DEFAULT_PID_FILE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_foreground_overrides() {
|
||||
let opts = DaemonOptions {
|
||||
daemonize: true,
|
||||
foreground: true,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(!opts.should_daemonize());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_status_not_running() {
|
||||
let path = "/tmp/telemt_test_nonexistent.pid";
|
||||
assert_eq!(check_status(path), DaemonStatus::NotRunning);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pid_file_basic() {
|
||||
let path = "/tmp/telemt_test_pidfile.pid";
|
||||
let _ = fs::remove_file(path);
|
||||
|
||||
let mut pf = PidFile::new(path);
|
||||
assert!(pf.check_running().unwrap().is_none());
|
||||
|
||||
pf.acquire().unwrap();
|
||||
assert!(Path::new(path).exists());
|
||||
|
||||
// Read it back
|
||||
let pid = read_pid_file(path).unwrap();
|
||||
assert_eq!(pid, std::process::id() as i32);
|
||||
|
||||
pf.release().unwrap();
|
||||
assert!(!Path::new(path).exists());
|
||||
}
|
||||
}
|
||||
|
|
@ -216,6 +216,9 @@ pub enum ProxyError {
|
|||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
#[error("Unknown TLS SNI")]
|
||||
UnknownTlsSni,
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,15 @@ pub struct UserIpTracker {
|
|||
cleanup_drain_lock: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct UserIpTrackerMemoryStats {
|
||||
pub active_users: usize,
|
||||
pub recent_users: usize,
|
||||
pub active_entries: usize,
|
||||
pub recent_entries: usize,
|
||||
pub cleanup_queue_len: usize,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
|
|
@ -141,6 +150,13 @@ impl UserIpTracker {
|
|||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
for user_recent in recent_ips.values_mut() {
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
}
|
||||
|
||||
let mut users =
|
||||
Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
|
|
@ -166,6 +182,26 @@ impl UserIpTracker {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn memory_stats(&self) -> UserIpTrackerMemoryStats {
|
||||
let cleanup_queue_len = self
|
||||
.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len();
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
let active_entries = active_ips.values().map(HashMap::len).sum();
|
||||
let recent_entries = recent_ips.values().map(HashMap::len).sum();
|
||||
|
||||
UserIpTrackerMemoryStats {
|
||||
active_users: active_ips.len(),
|
||||
recent_users: recent_ips.len(),
|
||||
active_entries,
|
||||
recent_entries,
|
||||
cleanup_queue_len,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
|
|
@ -451,6 +487,7 @@ impl Default for UserIpTracker {
|
|||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||
|
|
@ -764,4 +801,54 @@ mod tests {
|
|||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_stats_reports_queue_and_entry_counts() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 4).await;
|
||||
let ip1 = test_ipv4(10, 2, 0, 1);
|
||||
let ip2 = test_ipv4(10, 2, 0, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||
tracker.enqueue_cleanup("test_user".to_string(), ip1);
|
||||
|
||||
let snapshot = tracker.memory_stats().await;
|
||||
assert_eq!(snapshot.active_users, 1);
|
||||
assert_eq!(snapshot.recent_users, 1);
|
||||
assert_eq!(snapshot.active_entries, 2);
|
||||
assert_eq!(snapshot.recent_entries, 2);
|
||||
assert_eq!(snapshot.cleanup_queue_len, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compact_prunes_stale_recent_entries() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let stale_user = "stale-user".to_string();
|
||||
let stale_ip = test_ipv4(10, 3, 0, 1);
|
||||
{
|
||||
let mut recent_ips = tracker.recent_ips.write().await;
|
||||
recent_ips
|
||||
.entry(stale_user.clone())
|
||||
.or_insert_with(HashMap::new)
|
||||
.insert(stale_ip, Instant::now() - Duration::from_secs(5));
|
||||
}
|
||||
|
||||
tracker.last_compact_epoch_secs.store(0, Ordering::Relaxed);
|
||||
tracker
|
||||
.check_and_add("trigger-user", test_ipv4(10, 3, 0, 2))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let recent_ips = tracker.recent_ips.read().await;
|
||||
let stale_exists = recent_ips
|
||||
.get(&stale_user)
|
||||
.map(|ips| ips.contains_key(&stale_ip))
|
||||
.unwrap_or(false);
|
||||
assert!(!stale_exists);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,343 @@
|
|||
//! Logging configuration for telemt.
|
||||
//!
|
||||
//! Supports multiple log destinations:
|
||||
//! - stderr (default, works with systemd journald)
|
||||
//! - syslog (Unix only, for traditional init systems)
|
||||
//! - file (with optional rotation)
|
||||
|
||||
#![allow(dead_code)] // Infrastructure module - used via CLI flags
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
use tracing_subscriber::{EnvFilter, fmt, reload};
|
||||
|
||||
/// Log destination configuration.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LogDestination {
|
||||
/// Log to stderr (default, captured by systemd journald).
|
||||
#[default]
|
||||
Stderr,
|
||||
/// Log to syslog (Unix only).
|
||||
#[cfg(unix)]
|
||||
Syslog,
|
||||
/// Log to a file with optional rotation.
|
||||
File {
|
||||
path: String,
|
||||
/// Rotate daily if true.
|
||||
rotate_daily: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// Logging options parsed from CLI/config.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LoggingOptions {
|
||||
/// Where to send logs.
|
||||
pub destination: LogDestination,
|
||||
/// Disable ANSI colors.
|
||||
pub disable_colors: bool,
|
||||
}
|
||||
|
||||
/// Guard that must be held to keep file logging active.
|
||||
/// When dropped, flushes and closes log files.
|
||||
pub struct LoggingGuard {
|
||||
_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
}
|
||||
|
||||
impl LoggingGuard {
|
||||
fn new(guard: Option<tracing_appender::non_blocking::WorkerGuard>) -> Self {
|
||||
Self { _guard: guard }
|
||||
}
|
||||
|
||||
/// Creates a no-op guard for stderr/syslog logging.
|
||||
pub fn noop() -> Self {
|
||||
Self { _guard: None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the tracing subscriber with the specified options.
|
||||
///
|
||||
/// Returns a reload handle for dynamic log level changes and a guard
|
||||
/// that must be kept alive for file logging.
|
||||
pub fn init_logging(
|
||||
opts: &LoggingOptions,
|
||||
initial_filter: &str,
|
||||
) -> (
|
||||
reload::Handle<EnvFilter, impl tracing::Subscriber + Send + Sync>,
|
||||
LoggingGuard,
|
||||
) {
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new(initial_filter));
|
||||
|
||||
match &opts.destination {
|
||||
LogDestination::Stderr => {
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(!opts.disable_colors)
|
||||
.with_target(true);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
LogDestination::Syslog => {
|
||||
// Use a custom fmt layer that writes to syslog
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_level(false)
|
||||
.without_time()
|
||||
.with_writer(SyslogMakeWriter::new());
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
let (non_blocking, guard) = if *rotate_daily {
|
||||
// Extract directory and filename prefix
|
||||
let path = Path::new(path);
|
||||
let dir = path.parent().unwrap_or(Path::new("/var/log"));
|
||||
let prefix = path
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("telemt");
|
||||
|
||||
let file_appender = tracing_appender::rolling::daily(dir, prefix);
|
||||
tracing_appender::non_blocking(file_appender)
|
||||
} else {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.expect("Failed to open log file");
|
||||
tracing_appender::non_blocking(file)
|
||||
};
|
||||
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(true)
|
||||
.with_writer(non_blocking);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::new(Some(guard)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Syslog writer for tracing.
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogMakeWriter;
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogWriter {
|
||||
priority: libc::c_int,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl SyslogMakeWriter {
|
||||
fn new() -> Self {
|
||||
// Open syslog connection on first use
|
||||
static INIT: std::sync::Once = std::sync::Once::new();
|
||||
INIT.call_once(|| {
|
||||
unsafe {
|
||||
// Open syslog with ident "telemt", LOG_PID, LOG_DAEMON facility
|
||||
let ident = b"telemt\0".as_ptr() as *const libc::c_char;
|
||||
libc::openlog(ident, libc::LOG_PID | libc::LOG_NDELAY, libc::LOG_DAEMON);
|
||||
}
|
||||
});
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn syslog_priority_for_level(level: &tracing::Level) -> libc::c_int {
|
||||
match *level {
|
||||
tracing::Level::ERROR => libc::LOG_ERR,
|
||||
tracing::Level::WARN => libc::LOG_WARNING,
|
||||
tracing::Level::INFO => libc::LOG_INFO,
|
||||
tracing::Level::DEBUG => libc::LOG_DEBUG,
|
||||
tracing::Level::TRACE => libc::LOG_DEBUG,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl std::io::Write for SyslogWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
// Convert to C string, stripping newlines
|
||||
let msg = String::from_utf8_lossy(buf);
|
||||
let msg = msg.trim_end();
|
||||
|
||||
if msg.is_empty() {
|
||||
return Ok(buf.len());
|
||||
}
|
||||
|
||||
// Write to syslog
|
||||
let c_msg = std::ffi::CString::new(msg.as_bytes())
|
||||
.unwrap_or_else(|_| std::ffi::CString::new("(invalid utf8)").unwrap());
|
||||
|
||||
unsafe {
|
||||
libc::syslog(
|
||||
self.priority,
|
||||
b"%s\0".as_ptr() as *const libc::c_char,
|
||||
c_msg.as_ptr(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for SyslogMakeWriter {
|
||||
type Writer = SyslogWriter;
|
||||
|
||||
fn make_writer(&'a self) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: libc::LOG_INFO,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_writer_for(&'a self, meta: &tracing::Metadata<'_>) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: syslog_priority_for_level(meta.level()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse log destination from CLI arguments.
|
||||
pub fn parse_log_destination(args: &[String]) -> LogDestination {
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
#[cfg(unix)]
|
||||
"--syslog" => {
|
||||
return LogDestination::Syslog;
|
||||
}
|
||||
"--log-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file=").to_string(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
"--log-file-daily" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file-daily=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file-daily=").to_string(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
LogDestination::Stderr
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_default() {
|
||||
let args: Vec<String> = vec![];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Stderr
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file() {
|
||||
let args = vec!["--log-file".to_string(), "/var/log/telemt.log".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt.log");
|
||||
assert!(!rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file_daily() {
|
||||
let args = vec!["--log-file-daily=/var/log/telemt".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt");
|
||||
assert!(rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_parse_log_destination_syslog() {
|
||||
let args = vec!["--syslog".to_string()];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Syslog
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_syslog_priority_for_level_mapping() {
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::ERROR),
|
||||
libc::LOG_ERR
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::WARN),
|
||||
libc::LOG_WARNING
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::INFO),
|
||||
libc::LOG_INFO
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::DEBUG),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::TRACE),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -21,10 +21,29 @@ pub(crate) async fn configure_admission_gate(
|
|||
if config.general.use_middle_proxy {
|
||||
if let Some(pool) = me_pool.as_ref() {
|
||||
let initial_ready = pool.admission_ready_conditional_cast().await;
|
||||
admission_tx.send_replace(initial_ready);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Middle);
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
let mut fast_fallback_enabled = fallback_enabled && config.general.me2dc_fast;
|
||||
let (initial_gate_open, initial_route_mode, initial_fallback_reason) = if initial_ready
|
||||
{
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
};
|
||||
admission_tx.send_replace(initial_gate_open);
|
||||
let _ = route_runtime.set_mode(initial_route_mode);
|
||||
if initial_ready {
|
||||
info!("Conditional-admission gate: open / ME pool READY");
|
||||
} else if let Some(reason) = initial_fallback_reason {
|
||||
warn!(
|
||||
fallback_reason = reason,
|
||||
"Conditional-admission gate opened in ME fast fallback mode"
|
||||
);
|
||||
} else {
|
||||
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
|
||||
}
|
||||
|
|
@ -34,10 +53,9 @@ pub(crate) async fn configure_admission_gate(
|
|||
let route_runtime_gate = route_runtime.clone();
|
||||
let mut config_rx_gate = config_rx.clone();
|
||||
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
tokio::spawn(async move {
|
||||
let mut gate_open = initial_ready;
|
||||
let mut route_mode = RelayRouteMode::Middle;
|
||||
let mut gate_open = initial_gate_open;
|
||||
let mut route_mode = initial_route_mode;
|
||||
let mut ready_observed = initial_ready;
|
||||
let mut not_ready_since = if initial_ready {
|
||||
None
|
||||
|
|
@ -53,16 +71,23 @@ pub(crate) async fn configure_admission_gate(
|
|||
let cfg = config_rx_gate.borrow_and_update().clone();
|
||||
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
|
||||
fallback_enabled = cfg.general.me2dc_fallback;
|
||||
fast_fallback_enabled = cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
continue;
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
|
||||
}
|
||||
let ready = pool_for_gate.admission_ready_conditional_cast().await;
|
||||
let now = Instant::now();
|
||||
let (next_gate_open, next_route_mode, next_fallback_active) = if ready {
|
||||
let (next_gate_open, next_route_mode, next_fallback_reason) = if ready {
|
||||
ready_observed = true;
|
||||
not_ready_since = None;
|
||||
(true, RelayRouteMode::Middle, false)
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
let not_ready_started_at = *not_ready_since.get_or_insert(now);
|
||||
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
|
||||
|
|
@ -72,11 +97,12 @@ pub(crate) async fn configure_admission_gate(
|
|||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
if fallback_enabled && not_ready_for > fallback_after {
|
||||
(true, RelayRouteMode::Direct, true)
|
||||
(true, RelayRouteMode::Direct, Some("strict_grace_fallback"))
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, false)
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
}
|
||||
};
|
||||
let next_fallback_active = next_fallback_reason.is_some();
|
||||
|
||||
if next_route_mode != route_mode {
|
||||
route_mode = next_route_mode;
|
||||
|
|
@ -88,6 +114,8 @@ pub(crate) async fn configure_admission_gate(
|
|||
"Middle-End routing restored for new sessions"
|
||||
);
|
||||
} else {
|
||||
let fallback_reason = next_fallback_reason.unwrap_or("unknown");
|
||||
if fallback_reason == "strict_grace_fallback" {
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
|
|
@ -97,8 +125,17 @@ pub(crate) async fn configure_admission_gate(
|
|||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
grace_secs = fallback_after.as_secs(),
|
||||
fallback_reason,
|
||||
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||
);
|
||||
} else {
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
fallback_reason,
|
||||
"ME pool not-ready; routing new sessions via Direct-DC (fast mode)"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -108,7 +145,10 @@ pub(crate) async fn configure_admission_gate(
|
|||
admission_tx_gate.send_replace(gate_open);
|
||||
if gate_open {
|
||||
if next_fallback_active {
|
||||
warn!("Conditional-admission gate opened in ME fallback mode");
|
||||
warn!(
|
||||
fallback_reason = next_fallback_reason.unwrap_or("unknown"),
|
||||
"Conditional-admission gate opened in ME fallback mode"
|
||||
);
|
||||
} else {
|
||||
info!("Conditional-admission gate opened / ME pool READY");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,31 +8,66 @@ use tracing::{debug, error, info, warn};
|
|||
|
||||
use crate::cli;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::logging::LogDestination;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
||||
ProxyConfigData, fetch_proxy_config_with_raw_via_upstream, load_proxy_config_cache,
|
||||
save_proxy_config_cache,
|
||||
};
|
||||
|
||||
pub(crate) fn resolve_runtime_config_path(
|
||||
config_path_cli: &str,
|
||||
startup_cwd: &std::path::Path,
|
||||
config_path_explicit: bool,
|
||||
) -> PathBuf {
|
||||
if config_path_explicit {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
absolute.canonicalize().unwrap_or(absolute)
|
||||
return absolute.canonicalize().unwrap_or(absolute);
|
||||
}
|
||||
|
||||
let etc_telemt = std::path::Path::new("/etc/telemt");
|
||||
let candidates = [
|
||||
startup_cwd.join("config.toml"),
|
||||
startup_cwd.join("telemt.toml"),
|
||||
etc_telemt.join("telemt.toml"),
|
||||
etc_telemt.join("config.toml"),
|
||||
];
|
||||
for candidate in candidates {
|
||||
if candidate.is_file() {
|
||||
return candidate.canonicalize().unwrap_or(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
startup_cwd.join("config.toml")
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
||||
/// Parsed CLI arguments.
|
||||
pub(crate) struct CliArgs {
|
||||
pub config_path: String,
|
||||
pub config_path_explicit: bool,
|
||||
pub data_path: Option<PathBuf>,
|
||||
pub silent: bool,
|
||||
pub log_level: Option<String>,
|
||||
pub log_destination: LogDestination,
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> CliArgs {
|
||||
let mut config_path = "config.toml".to_string();
|
||||
let mut config_path_explicit = false;
|
||||
let mut data_path: Option<PathBuf> = None;
|
||||
let mut silent = false;
|
||||
let mut log_level: Option<String> = None;
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
|
||||
// Parse log destination
|
||||
let log_destination = crate::logging::parse_log_destination(&args);
|
||||
|
||||
// Check for --init first (handled before tokio)
|
||||
if let Some(init_opts) = cli::parse_init_args(&args) {
|
||||
if let Err(e) = cli::run_init(init_opts) {
|
||||
|
|
@ -59,6 +94,20 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
s.trim_start_matches("--data-path=").to_string(),
|
||||
));
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --working-dir");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--working-dir=").to_string(),
|
||||
));
|
||||
}
|
||||
"--silent" | "-s" => {
|
||||
silent = true;
|
||||
}
|
||||
|
|
@ -72,38 +121,35 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
log_level = Some(s.trim_start_matches("--log-level=").to_string());
|
||||
}
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!();
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(
|
||||
" --init Generate config, install systemd service, start"
|
||||
);
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(
|
||||
" --domain <DOMAIN> TLS domain for masking (default: www.google.com)"
|
||||
);
|
||||
eprintln!(
|
||||
" --secret <HEX> 32-char hex secret (auto-generated if omitted)"
|
||||
);
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
print_help();
|
||||
std::process::exit(0);
|
||||
}
|
||||
"--version" | "-V" => {
|
||||
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Skip daemon-related flags (already parsed)
|
||||
"--daemon" | "-d" | "--foreground" | "-f" => {}
|
||||
s if s.starts_with("--pid-file") => {
|
||||
if !s.contains('=') {
|
||||
i += 1; // skip value
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if !s.starts_with('-') => {
|
||||
if !matches!(s, "run" | "start" | "stop" | "reload" | "status") {
|
||||
config_path = s.to_string();
|
||||
config_path_explicit = true;
|
||||
}
|
||||
}
|
||||
other => {
|
||||
eprintln!("Unknown option: {}", other);
|
||||
|
|
@ -112,7 +158,75 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
i += 1;
|
||||
}
|
||||
|
||||
(config_path, data_path, silent, log_level)
|
||||
CliArgs {
|
||||
config_path,
|
||||
config_path_explicit,
|
||||
data_path,
|
||||
silent,
|
||||
log_level,
|
||||
log_destination,
|
||||
}
|
||||
}
|
||||
|
||||
fn print_help() {
|
||||
eprintln!("Usage: telemt [COMMAND] [OPTIONS] [config.toml]");
|
||||
eprintln!();
|
||||
eprintln!("Commands:");
|
||||
eprintln!(" run Run in foreground (default if no command given)");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!(" start Start as background daemon");
|
||||
eprintln!(" stop Stop a running daemon");
|
||||
eprintln!(" reload Reload configuration (send SIGHUP)");
|
||||
eprintln!(" status Check if daemon is running");
|
||||
}
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --working-dir <DIR> Alias for --data-path");
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!(" --version, -V Show version");
|
||||
eprintln!();
|
||||
eprintln!("Logging options:");
|
||||
eprintln!(" --log-file <PATH> Log to file (default: stderr)");
|
||||
eprintln!(" --log-file-daily <PATH> Log to file with daily rotation");
|
||||
#[cfg(unix)]
|
||||
eprintln!(" --syslog Log to syslog (Unix only)");
|
||||
eprintln!();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!("Daemon options (Unix only):");
|
||||
eprintln!(" --daemon, -d Fork to background (daemonize)");
|
||||
eprintln!(" --foreground, -f Explicit foreground mode (for systemd)");
|
||||
eprintln!(" --pid-file <PATH> PID file path (default: /var/run/telemt.pid)");
|
||||
eprintln!(" --run-as-user <USER> Drop privileges to this user after binding");
|
||||
eprintln!(" --run-as-group <GROUP> Drop privileges to this group after binding");
|
||||
eprintln!(" --working-dir <DIR> Working directory for daemon mode");
|
||||
eprintln!();
|
||||
}
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(" --init Generate config, install systemd service, start");
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(" --domain <DOMAIN> TLS domain for masking (default: www.google.com)");
|
||||
eprintln!(" --secret <HEX> 32-char hex secret (auto-generated if omitted)");
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!();
|
||||
eprintln!("Examples:");
|
||||
eprintln!(" telemt config.toml Run in foreground");
|
||||
eprintln!(" telemt start config.toml Start as daemon");
|
||||
eprintln!(" telemt start --pid-file /tmp/t.pid Start with custom PID file");
|
||||
eprintln!(" telemt stop Stop daemon");
|
||||
eprintln!(" telemt reload Reload configuration");
|
||||
eprintln!(" telemt status Check daemon status");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
@ -130,7 +244,7 @@ mod tests {
|
|||
let target = startup_cwd.join("config.toml");
|
||||
std::fs::write(&target, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd);
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, target.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&target);
|
||||
|
|
@ -146,11 +260,45 @@ mod tests {
|
|||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd);
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, startup_cwd.join("missing.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_uses_startup_candidates_when_not_explicit() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd =
|
||||
std::env::temp_dir().join(format!("telemt_cfg_startup_candidates_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let telemt = startup_cwd.join("telemt.toml");
|
||||
std::fs::write(&telemt, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, telemt.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&telemt);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_defaults_to_startup_config_when_none_found() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_startup_default_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, startup_cwd.join("config.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||
|
|
@ -288,9 +436,10 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
|
|||
cache_path: Option<&str>,
|
||||
me2dc_fallback: bool,
|
||||
label: &'static str,
|
||||
upstream: Option<std::sync::Arc<UpstreamManager>>,
|
||||
) -> Option<ProxyConfigData> {
|
||||
loop {
|
||||
match fetch_proxy_config_with_raw(url).await {
|
||||
match fetch_proxy_config_with_raw_via_upstream(url, upstream.clone()).await {
|
||||
Ok((cfg, raw)) => {
|
||||
if !cfg.map.is_empty() {
|
||||
if let Some(path) = cache_path
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ use crate::crypto::SecureRandom;
|
|||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::ClientHandler;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
|
|
@ -49,6 +50,7 @@ pub(crate) async fn bind_listeners(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||
startup_tracker
|
||||
|
|
@ -72,6 +74,7 @@ pub(crate) async fn bind_listeners(
|
|||
let options = ListenOptions {
|
||||
reuse_port: listener_conf.reuse_allow,
|
||||
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||
backlog: config.server.listen_backlog,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
|
@ -223,6 +226,7 @@ pub(crate) async fn bind_listeners(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_unix = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
|
@ -258,6 +262,7 @@ pub(crate) async fn bind_listeners(
|
|||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted unix connection: permit wait timeout"
|
||||
|
|
@ -283,11 +288,12 @@ pub(crate) async fn bind_listeners(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream_with_shared(
|
||||
stream,
|
||||
fake_peer,
|
||||
config,
|
||||
|
|
@ -301,6 +307,7 @@ pub(crate) async fn bind_listeners(
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
|
|
@ -350,6 +357,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) {
|
||||
for (listener, listener_proxy_protocol) in listeners {
|
||||
|
|
@ -365,6 +373,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_tcp = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
|
@ -399,6 +408,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
peer = %peer_addr,
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
|
|
@ -420,13 +430,14 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||
let real_peer_report_for_handler = real_peer_report.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = ClientHandler::new(
|
||||
if let Err(e) = ClientHandler::new_with_shared(
|
||||
stream,
|
||||
peer_addr,
|
||||
config,
|
||||
|
|
@ -440,6 +451,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report_for_handler,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -63,9 +63,10 @@ pub(crate) async fn initialize_me_pool(
|
|||
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||
let proxy_secret = loop {
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret(
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret_with_upstream(
|
||||
proxy_secret_path,
|
||||
config.general.proxy_secret_len_max,
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -129,6 +130,7 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.proxy_config_v4_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfig",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v4.is_some() {
|
||||
|
|
@ -160,6 +162,7 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.proxy_config_v6_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfigV6",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v6.is_some() {
|
||||
|
|
@ -274,6 +277,8 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.me_warn_rate_limit_ms,
|
||||
config.general.me_route_no_writer_mode,
|
||||
config.general.me_route_no_writer_wait_ms,
|
||||
config.general.me_route_hybrid_max_wait_ms,
|
||||
config.general.me_route_blocking_send_timeout_ms,
|
||||
config.general.me_route_inline_recovery_attempts,
|
||||
config.general.me_route_inline_recovery_wait_ms,
|
||||
);
|
||||
|
|
|
|||
|
|
@ -29,10 +29,12 @@ use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
|||
|
||||
use crate::api;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::conntrack_control;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
|
|
@ -47,8 +49,55 @@ use crate::transport::UpstreamManager;
|
|||
use crate::transport::middle_proxy::MePool;
|
||||
use helpers::{parse_cli, resolve_runtime_config_path};
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{DaemonOptions, PidFile, drop_privileges};
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// On Unix, daemon options should be handled before calling this function
|
||||
/// (daemonization must happen before tokio runtime starts).
|
||||
#[cfg(unix)]
|
||||
pub async fn run_with_daemon(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// This is the main entry point for non-daemon mode or when called as a library.
|
||||
#[allow(dead_code)]
|
||||
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Parse CLI to get daemon options even in simple run() path
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
let daemon_opts = crate::cli::parse_daemon_args(&args);
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
run_inner().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn run_inner(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Acquire PID file if daemonizing or if explicitly requested
|
||||
// Keep it alive until shutdown (underscore prefix = intentionally kept for RAII cleanup)
|
||||
let _pid_file = if daemon_opts.daemonize || daemon_opts.pid_file.is_some() {
|
||||
let mut pf = PidFile::new(daemon_opts.pid_file_path());
|
||||
if let Err(e) = pf.acquire() {
|
||||
eprintln!("[telemt] {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
Some(pf)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let process_started_at = Instant::now();
|
||||
let process_started_at_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
|
|
@ -61,7 +110,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
Some("load and validate config".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_path_cli, data_path, cli_silent, cli_log_level) = parse_cli();
|
||||
let cli_args = parse_cli();
|
||||
let config_path_cli = cli_args.config_path;
|
||||
let config_path_explicit = cli_args.config_path_explicit;
|
||||
let data_path = cli_args.data_path;
|
||||
let cli_silent = cli_args.silent;
|
||||
let cli_log_level = cli_args.log_level;
|
||||
let log_destination = cli_args.log_destination;
|
||||
let startup_cwd = match std::env::current_dir() {
|
||||
Ok(cwd) => cwd,
|
||||
Err(e) => {
|
||||
|
|
@ -69,7 +124,8 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let config_path = resolve_runtime_config_path(&config_path_cli, &startup_cwd);
|
||||
let mut config_path =
|
||||
resolve_runtime_config_path(&config_path_cli, &startup_cwd, config_path_explicit);
|
||||
|
||||
let mut config = match ProxyConfig::load(&config_path) {
|
||||
Ok(c) => c,
|
||||
|
|
@ -79,11 +135,99 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
} else {
|
||||
let default = ProxyConfig::default();
|
||||
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
|
||||
|
||||
let serialized =
|
||||
match toml::to_string_pretty(&default).or_else(|_| toml::to_string(&default)) {
|
||||
Ok(value) => Some(value),
|
||||
Err(serialize_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to serialize default config: {}",
|
||||
serialize_error
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if config_path_explicit {
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
if let Err(write_error) = std::fs::write(&config_path, serialized) {
|
||||
eprintln!(
|
||||
"[telemt] Error: failed to create explicit config at {}: {}",
|
||||
config_path.display(),
|
||||
write_error
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let system_dir = std::path::Path::new("/etc/telemt");
|
||||
let system_config_path = system_dir.join("telemt.toml");
|
||||
let startup_config_path = startup_cwd.join("config.toml");
|
||||
let mut persisted = false;
|
||||
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
match std::fs::create_dir_all(system_dir) {
|
||||
Ok(()) => match std::fs::write(&system_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = system_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
system_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(create_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to create {}: {}",
|
||||
system_dir.display(),
|
||||
create_error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
match std::fs::write(&startup_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = startup_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
startup_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
}
|
||||
default
|
||||
}
|
||||
}
|
||||
|
|
@ -115,8 +259,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
} else if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
|
|
@ -124,7 +267,6 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||
eprintln!(
|
||||
|
|
@ -161,17 +303,43 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
)
|
||||
.await;
|
||||
|
||||
// Configure color output based on config
|
||||
// Initialize logging based on destination
|
||||
let _logging_guard: Option<crate::logging::LoggingGuard>;
|
||||
match log_destination {
|
||||
crate::logging::LogDestination::Stderr => {
|
||||
// Default: log to stderr (works with systemd journald)
|
||||
let fmt_layer = if config.general.disable_colors {
|
||||
fmt::Layer::default().with_ansi(false)
|
||||
} else {
|
||||
fmt::Layer::default().with_ansi(true)
|
||||
};
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
_logging_guard = None;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
crate::logging::LogDestination::Syslog => {
|
||||
// Syslog: for OpenRC/FreeBSD
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
crate::logging::LogDestination::File { .. } => {
|
||||
// File logging with optional rotation
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
}
|
||||
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
|
|
@ -225,6 +393,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
config.general.upstream_connect_retry_attempts,
|
||||
config.general.upstream_connect_retry_backoff_ms,
|
||||
config.general.upstream_connect_budget_ms,
|
||||
config.general.tg_connect,
|
||||
config.general.upstream_unhealthy_fail_threshold,
|
||||
config.general.upstream_connect_failfast_hard_errors,
|
||||
stats.clone(),
|
||||
|
|
@ -554,6 +723,12 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
)
|
||||
.await;
|
||||
let _admission_tx_hold = admission_tx;
|
||||
let shared_state = ProxySharedState::new();
|
||||
conntrack_control::spawn_conntrack_controller(
|
||||
config_rx.clone(),
|
||||
stats.clone(),
|
||||
shared_state.clone(),
|
||||
);
|
||||
|
||||
let bound = listeners::bind_listeners(
|
||||
&config,
|
||||
|
|
@ -574,6 +749,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state.clone(),
|
||||
max_connections.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -585,6 +761,18 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
}
|
||||
|
||||
// Drop privileges after binding sockets (which may require root for port < 1024)
|
||||
if daemon_opts.user.is_some() || daemon_opts.group.is_some() {
|
||||
if let Err(e) = drop_privileges(
|
||||
daemon_opts.user.as_deref(),
|
||||
daemon_opts.group.as_deref(),
|
||||
_pid_file.as_ref(),
|
||||
) {
|
||||
error!(error = %e, "Failed to drop privileges");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
runtime_tasks::apply_runtime_log_filter(
|
||||
has_rust_log,
|
||||
&effective_log_level,
|
||||
|
|
@ -598,6 +786,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
&startup_tracker,
|
||||
stats.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state.clone(),
|
||||
ip_tracker.clone(),
|
||||
config_rx.clone(),
|
||||
)
|
||||
|
|
@ -605,6 +794,9 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
|
||||
runtime_tasks::mark_runtime_ready(&startup_tracker).await;
|
||||
|
||||
// Spawn signal handlers for SIGUSR1/SIGUSR2 (non-shutdown signals)
|
||||
shutdown::spawn_signal_handlers(stats.clone(), process_started_at);
|
||||
|
||||
listeners::spawn_tcp_accept_loops(
|
||||
listeners,
|
||||
config_rx.clone(),
|
||||
|
|
@ -619,10 +811,11 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state,
|
||||
max_connections.clone(),
|
||||
);
|
||||
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool).await;
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool, stats).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,7 @@ use crate::crypto::SecureRandom;
|
|||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::metrics;
|
||||
use crate::network::probe::NetworkProbe;
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{
|
||||
COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
|
|
@ -287,6 +288,7 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||
startup_tracker: &Arc<StartupTracker>,
|
||||
stats: Arc<Stats>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared_state: Arc<ProxySharedState>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
|
|
@ -320,15 +322,19 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||
.await;
|
||||
let stats = stats.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared_state = shared_state.clone();
|
||||
let config_rx_metrics = config_rx.clone();
|
||||
let ip_tracker_metrics = ip_tracker.clone();
|
||||
let whitelist = config.server.metrics_whitelist.clone();
|
||||
let listen_backlog = config.server.listen_backlog;
|
||||
tokio::spawn(async move {
|
||||
metrics::serve(
|
||||
port,
|
||||
listen,
|
||||
listen_backlog,
|
||||
stats,
|
||||
beobachten,
|
||||
shared_state,
|
||||
ip_tracker_metrics,
|
||||
config_rx_metrics,
|
||||
whitelist,
|
||||
|
|
|
|||
|
|
@ -1,25 +1,100 @@
|
|||
//! Shutdown and signal handling for telemt.
|
||||
//!
|
||||
//! Handles graceful shutdown on various signals:
|
||||
//! - SIGINT (Ctrl+C) / SIGTERM: Graceful shutdown
|
||||
//! - SIGQUIT: Graceful shutdown with stats dump
|
||||
//! - SIGUSR1: Reserved for log rotation (logs acknowledgment)
|
||||
//! - SIGUSR2: Dump runtime status to log
|
||||
//!
|
||||
//! SIGHUP is handled separately in config/hot_reload.rs for config reload.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
use tokio::signal;
|
||||
use tracing::{error, info, warn};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::{format_uptime, unit_label};
|
||||
|
||||
pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Option<Arc<MePool>>) {
|
||||
match signal::ctrl_c().await {
|
||||
Ok(()) => {
|
||||
/// Signal that triggered shutdown.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ShutdownSignal {
|
||||
/// SIGINT (Ctrl+C)
|
||||
Interrupt,
|
||||
/// SIGTERM
|
||||
Terminate,
|
||||
/// SIGQUIT (with stats dump)
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ShutdownSignal {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ShutdownSignal::Interrupt => write!(f, "SIGINT"),
|
||||
ShutdownSignal::Terminate => write!(f, "SIGTERM"),
|
||||
ShutdownSignal::Quit => write!(f, "SIGQUIT"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits for a shutdown signal and performs graceful shutdown.
|
||||
pub(crate) async fn wait_for_shutdown(
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: Arc<Stats>,
|
||||
) {
|
||||
let signal = wait_for_shutdown_signal().await;
|
||||
perform_shutdown(signal, process_started_at, me_pool, &stats).await;
|
||||
}
|
||||
|
||||
/// Waits for any shutdown signal (SIGINT, SIGTERM, SIGQUIT).
|
||||
#[cfg(unix)]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
let mut sigint = signal(SignalKind::interrupt()).expect("Failed to register SIGINT handler");
|
||||
let mut sigterm = signal(SignalKind::terminate()).expect("Failed to register SIGTERM handler");
|
||||
let mut sigquit = signal(SignalKind::quit()).expect("Failed to register SIGQUIT handler");
|
||||
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => ShutdownSignal::Interrupt,
|
||||
_ = sigterm.recv() => ShutdownSignal::Terminate,
|
||||
_ = sigquit.recv() => ShutdownSignal::Quit,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
|
||||
ShutdownSignal::Interrupt
|
||||
}
|
||||
|
||||
/// Performs graceful shutdown sequence.
|
||||
async fn perform_shutdown(
|
||||
signal: ShutdownSignal,
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: &Stats,
|
||||
) {
|
||||
let shutdown_started_at = Instant::now();
|
||||
info!(signal = %signal, "Received shutdown signal");
|
||||
|
||||
// Dump stats if SIGQUIT
|
||||
if signal == ShutdownSignal::Quit {
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Graceful ME pool shutdown
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
pool.shutdown_send_close_conn_all(),
|
||||
)
|
||||
match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
|
|
@ -33,13 +108,99 @@ pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Opti
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||
info!(
|
||||
"Shutdown completed successfully in {} {}.",
|
||||
shutdown_secs,
|
||||
unit_label(shutdown_secs, "second", "seconds")
|
||||
);
|
||||
}
|
||||
Err(e) => error!("Signal error: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Dumps runtime statistics to the log.
|
||||
fn dump_stats(stats: &Stats, process_started_at: Instant) {
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
|
||||
info!("=== Runtime Statistics Dump ===");
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Connection stats
|
||||
info!(
|
||||
"Connections: total={}, current={} (direct={}, me={}), bad={}",
|
||||
stats.get_connects_all(),
|
||||
stats.get_current_connections_total(),
|
||||
stats.get_current_connections_direct(),
|
||||
stats.get_current_connections_me(),
|
||||
stats.get_connects_bad(),
|
||||
);
|
||||
|
||||
// ME pool stats
|
||||
info!(
|
||||
"ME keepalive: sent={}, pong={}, failed={}, timeout={}",
|
||||
stats.get_me_keepalive_sent(),
|
||||
stats.get_me_keepalive_pong(),
|
||||
stats.get_me_keepalive_failed(),
|
||||
stats.get_me_keepalive_timeout(),
|
||||
);
|
||||
|
||||
// Relay stats
|
||||
info!(
|
||||
"Relay idle: soft_mark={}, hard_close={}, pressure_evict={}",
|
||||
stats.get_relay_idle_soft_mark_total(),
|
||||
stats.get_relay_idle_hard_close_total(),
|
||||
stats.get_relay_pressure_evict_total(),
|
||||
);
|
||||
|
||||
info!("=== End Statistics Dump ===");
|
||||
}
|
||||
|
||||
/// Spawns a background task to handle operational signals (SIGUSR1, SIGUSR2).
|
||||
///
|
||||
/// These signals don't trigger shutdown but perform specific actions:
|
||||
/// - SIGUSR1: Log rotation acknowledgment (for external log rotation tools)
|
||||
/// - SIGUSR2: Dump runtime status to log
|
||||
#[cfg(unix)]
|
||||
pub(crate) fn spawn_signal_handlers(stats: Arc<Stats>, process_started_at: Instant) {
|
||||
tokio::spawn(async move {
|
||||
let mut sigusr1 =
|
||||
signal(SignalKind::user_defined1()).expect("Failed to register SIGUSR1 handler");
|
||||
let mut sigusr2 =
|
||||
signal(SignalKind::user_defined2()).expect("Failed to register SIGUSR2 handler");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigusr1.recv() => {
|
||||
handle_sigusr1();
|
||||
}
|
||||
_ = sigusr2.recv() => {
|
||||
handle_sigusr2(&stats, process_started_at);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// No-op on non-Unix platforms.
|
||||
#[cfg(not(unix))]
|
||||
pub(crate) fn spawn_signal_handlers(_stats: Arc<Stats>, _process_started_at: Instant) {
|
||||
// No SIGUSR1/SIGUSR2 on non-Unix
|
||||
}
|
||||
|
||||
/// Handles SIGUSR1 - log rotation signal.
|
||||
///
|
||||
/// This signal is typically sent by logrotate or similar tools after
|
||||
/// rotating log files. Since tracing-subscriber doesn't natively support
|
||||
/// reopening files, we just acknowledge the signal. If file logging is
|
||||
/// added in the future, this would reopen log file handles.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr1() {
|
||||
info!("SIGUSR1 received - log rotation acknowledged");
|
||||
// Future: If using file-based logging, reopen file handles here
|
||||
}
|
||||
|
||||
/// Handles SIGUSR2 - dump runtime status.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr2(stats: &Stats, process_started_at: Instant) {
|
||||
info!("SIGUSR2 received - dumping runtime status");
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use tracing::warn;
|
|||
use crate::config::ProxyConfig;
|
||||
use crate::startup::{COMPONENT_TLS_FRONT_BOOTSTRAP, StartupTracker};
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::tls_front::fetcher::TlsFetchStrategy;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn bootstrap_tls_front(
|
||||
|
|
@ -40,7 +41,17 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
|
||||
.then(|| config.censorship.tls_fetch_scope.clone());
|
||||
let fetch_timeout = Duration::from_secs(5);
|
||||
let tls_fetch = config.censorship.tls_fetch.clone();
|
||||
let fetch_strategy = TlsFetchStrategy {
|
||||
profiles: tls_fetch.profiles,
|
||||
strict_route: tls_fetch.strict_route,
|
||||
attempt_timeout: Duration::from_millis(tls_fetch.attempt_timeout_ms.max(1)),
|
||||
total_budget: Duration::from_millis(tls_fetch.total_budget_ms.max(1)),
|
||||
grease_enabled: tls_fetch.grease_enabled,
|
||||
deterministic: tls_fetch.deterministic,
|
||||
profile_cache_ttl: Duration::from_secs(tls_fetch.profile_cache_ttl_secs),
|
||||
};
|
||||
let fetch_timeout = fetch_strategy.total_budget;
|
||||
|
||||
let cache_initial = cache.clone();
|
||||
let domains_initial = tls_domains.to_vec();
|
||||
|
|
@ -48,6 +59,7 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_initial = mask_unix_sock.clone();
|
||||
let scope_initial = tls_fetch_scope.clone();
|
||||
let upstream_initial = upstream_manager.clone();
|
||||
let strategy_initial = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_initial {
|
||||
|
|
@ -56,12 +68,13 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_domain = unix_sock_initial.clone();
|
||||
let scope_domain = scope_initial.clone();
|
||||
let upstream_domain = upstream_initial.clone();
|
||||
let strategy_domain = strategy_initial.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
|
|
@ -107,6 +120,7 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_refresh = mask_unix_sock.clone();
|
||||
let scope_refresh = tls_fetch_scope.clone();
|
||||
let upstream_refresh = upstream_manager.clone();
|
||||
let strategy_refresh = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600);
|
||||
|
|
@ -120,12 +134,13 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_domain = unix_sock_refresh.clone();
|
||||
let scope_domain = scope_refresh.clone();
|
||||
let upstream_domain = upstream_refresh.clone();
|
||||
let strategy_domain = strategy_refresh.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
|
|
|
|||
59
src/main.rs
59
src/main.rs
|
|
@ -3,23 +3,28 @@
|
|||
mod api;
|
||||
mod cli;
|
||||
mod config;
|
||||
mod conntrack_control;
|
||||
mod crypto;
|
||||
#[cfg(unix)]
|
||||
mod daemon;
|
||||
mod error;
|
||||
mod ip_tracker;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
|
||||
mod ip_tracker_hotpath_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_encapsulation_adversarial_tests.rs"]
|
||||
mod ip_tracker_encapsulation_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
|
||||
mod ip_tracker_hotpath_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_regression_tests.rs"]
|
||||
mod ip_tracker_regression_tests;
|
||||
mod logging;
|
||||
mod maestro;
|
||||
mod metrics;
|
||||
mod network;
|
||||
mod protocol;
|
||||
mod proxy;
|
||||
mod service;
|
||||
mod startup;
|
||||
mod stats;
|
||||
mod stream;
|
||||
|
|
@ -27,7 +32,49 @@ mod tls_front;
|
|||
mod transport;
|
||||
mod util;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
maestro::run().await
|
||||
fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Install rustls crypto provider early
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
let cmd = cli::parse_command(&args);
|
||||
|
||||
// Handle subcommands that don't need the server (stop, reload, status, init)
|
||||
if let Some(exit_code) = cli::execute_subcommand(&cmd) {
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let daemon_opts = cmd.daemon_opts;
|
||||
|
||||
// Daemonize BEFORE runtime
|
||||
if daemon_opts.should_daemonize() {
|
||||
match daemon::daemonize(daemon_opts.working_dir.as_deref()) {
|
||||
Ok(daemon::DaemonizeResult::Parent) => {
|
||||
std::process::exit(0);
|
||||
}
|
||||
Ok(daemon::DaemonizeResult::Child) => {
|
||||
// continue
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Daemonization failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()?
|
||||
.block_on(maestro::run_with_daemon(daemon_opts))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()?
|
||||
.block_on(maestro::run())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
949
src/metrics.rs
949
src/metrics.rs
File diff suppressed because it is too large
Load Diff
|
|
@ -24,6 +24,8 @@ const DIRECT_S2C_CAP_BYTES: usize = 512 * 1024;
|
|||
const ME_FRAMES_CAP: usize = 96;
|
||||
const ME_BYTES_CAP: usize = 384 * 1024;
|
||||
const ME_DELAY_MIN_US: u64 = 150;
|
||||
const MAX_USER_PROFILES_ENTRIES: usize = 50_000;
|
||||
const MAX_USER_KEY_BYTES: usize = 512;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum AdaptiveTier {
|
||||
|
|
@ -234,32 +236,50 @@ fn profiles() -> &'static DashMap<String, UserAdaptiveProfile> {
|
|||
}
|
||||
|
||||
pub fn seed_tier_for_user(user: &str) -> AdaptiveTier {
|
||||
if user.len() > MAX_USER_KEY_BYTES {
|
||||
return AdaptiveTier::Base;
|
||||
}
|
||||
let now = Instant::now();
|
||||
if let Some(entry) = profiles().get(user) {
|
||||
let value = entry.value();
|
||||
if now.duration_since(value.seen_at) <= PROFILE_TTL {
|
||||
let value = *entry.value();
|
||||
drop(entry);
|
||||
if now.saturating_duration_since(value.seen_at) <= PROFILE_TTL {
|
||||
return value.tier;
|
||||
}
|
||||
profiles().remove_if(user, |_, v| {
|
||||
now.saturating_duration_since(v.seen_at) > PROFILE_TTL
|
||||
});
|
||||
}
|
||||
AdaptiveTier::Base
|
||||
}
|
||||
|
||||
pub fn record_user_tier(user: &str, tier: AdaptiveTier) {
|
||||
if user.len() > MAX_USER_KEY_BYTES {
|
||||
return;
|
||||
}
|
||||
let now = Instant::now();
|
||||
if let Some(mut entry) = profiles().get_mut(user) {
|
||||
let existing = *entry;
|
||||
let effective = if now.duration_since(existing.seen_at) > PROFILE_TTL {
|
||||
let mut was_vacant = false;
|
||||
match profiles().entry(user.to_string()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(mut entry) => {
|
||||
let existing = *entry.get();
|
||||
let effective = if now.saturating_duration_since(existing.seen_at) > PROFILE_TTL {
|
||||
tier
|
||||
} else {
|
||||
max(existing.tier, tier)
|
||||
};
|
||||
*entry = UserAdaptiveProfile {
|
||||
entry.insert(UserAdaptiveProfile {
|
||||
tier: effective,
|
||||
seen_at: now,
|
||||
};
|
||||
return;
|
||||
});
|
||||
}
|
||||
dashmap::mapref::entry::Entry::Vacant(slot) => {
|
||||
slot.insert(UserAdaptiveProfile { tier, seen_at: now });
|
||||
was_vacant = true;
|
||||
}
|
||||
}
|
||||
if was_vacant && profiles().len() > MAX_USER_PROFILES_ENTRIES {
|
||||
profiles().retain(|_, v| now.saturating_duration_since(v.seen_at) <= PROFILE_TTL);
|
||||
}
|
||||
profiles().insert(user.to_string(), UserAdaptiveProfile { tier, seen_at: now });
|
||||
}
|
||||
|
||||
pub fn direct_copy_buffers_for_tier(
|
||||
|
|
@ -310,6 +330,14 @@ fn scale(base: usize, numerator: usize, denominator: usize, cap: usize) -> usize
|
|||
scaled.min(cap).max(1)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/adaptive_buffers_security_tests.rs"]
|
||||
mod adaptive_buffers_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/adaptive_buffers_record_race_security_tests.rs"]
|
||||
mod adaptive_buffers_record_race_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
|||
|
|
@ -80,11 +80,16 @@ use crate::transport::middle_proxy::MePool;
|
|||
use crate::transport::socket::normalize_ip;
|
||||
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
||||
|
||||
use crate::proxy::direct_relay::handle_via_direct;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
||||
use crate::proxy::direct_relay::handle_via_direct_with_shared;
|
||||
use crate::proxy::handshake::{
|
||||
HandshakeSuccess, handle_mtproto_handshake_with_shared, handle_tls_handshake_with_shared,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use crate::proxy::handshake::{handle_mtproto_handshake, handle_tls_handshake};
|
||||
use crate::proxy::masking::handle_bad_client;
|
||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
|
||||
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||
const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60;
|
||||
|
|
@ -186,6 +191,90 @@ fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
|
|||
}
|
||||
}
|
||||
|
||||
fn effective_client_first_byte_idle_secs(config: &ProxyConfig, shared: &ProxySharedState) -> u64 {
|
||||
let idle_secs = config.timeouts.client_first_byte_idle_secs;
|
||||
if idle_secs == 0 {
|
||||
return 0;
|
||||
}
|
||||
if shared.conntrack_pressure_active() {
|
||||
idle_secs.min(
|
||||
config
|
||||
.server
|
||||
.conntrack_control
|
||||
.profile
|
||||
.client_first_byte_idle_cap_secs(),
|
||||
)
|
||||
} else {
|
||||
idle_secs
|
||||
}
|
||||
}
|
||||
|
||||
const MASK_CLASSIFIER_PREFETCH_WINDOW: usize = 16;
|
||||
#[cfg(test)]
|
||||
const MASK_CLASSIFIER_PREFETCH_TIMEOUT: Duration = Duration::from_millis(5);
|
||||
|
||||
fn mask_classifier_prefetch_timeout(config: &ProxyConfig) -> Duration {
|
||||
Duration::from_millis(config.censorship.mask_classifier_prefetch_timeout_ms)
|
||||
}
|
||||
|
||||
fn should_prefetch_mask_classifier_window(initial_data: &[u8]) -> bool {
|
||||
if initial_data.len() >= MASK_CLASSIFIER_PREFETCH_WINDOW {
|
||||
return false;
|
||||
}
|
||||
|
||||
if initial_data.is_empty() {
|
||||
// Empty initial_data means there is no client probe prefix to refine.
|
||||
// Prefetching in this case can consume fallback relay payload bytes and
|
||||
// accidentally route them through shaping heuristics.
|
||||
return false;
|
||||
}
|
||||
|
||||
if initial_data[0] == 0x16 || initial_data.starts_with(b"SSH-") {
|
||||
return false;
|
||||
}
|
||||
|
||||
initial_data
|
||||
.iter()
|
||||
.all(|b| b.is_ascii_alphabetic() || *b == b' ')
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn extend_masking_initial_window<R>(reader: &mut R, initial_data: &mut Vec<u8>)
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
extend_masking_initial_window_with_timeout(
|
||||
reader,
|
||||
initial_data,
|
||||
MASK_CLASSIFIER_PREFETCH_TIMEOUT,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn extend_masking_initial_window_with_timeout<R>(
|
||||
reader: &mut R,
|
||||
initial_data: &mut Vec<u8>,
|
||||
prefetch_timeout: Duration,
|
||||
) where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if !should_prefetch_mask_classifier_window(initial_data) {
|
||||
return;
|
||||
}
|
||||
|
||||
let need = MASK_CLASSIFIER_PREFETCH_WINDOW.saturating_sub(initial_data.len());
|
||||
if need == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut extra = [0u8; MASK_CLASSIFIER_PREFETCH_WINDOW];
|
||||
if let Ok(Ok(n)) = timeout(prefetch_timeout, reader.read(&mut extra[..need])).await
|
||||
&& n > 0
|
||||
{
|
||||
initial_data.extend_from_slice(&extra[..n]);
|
||||
}
|
||||
}
|
||||
|
||||
fn masking_outcome<R, W>(
|
||||
reader: R,
|
||||
writer: W,
|
||||
|
|
@ -200,6 +289,15 @@ where
|
|||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
HandshakeOutcome::NeedsMasking(Box::pin(async move {
|
||||
let mut reader = reader;
|
||||
let mut initial_data = initial_data;
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
mask_classifier_prefetch_timeout(&config),
|
||||
)
|
||||
.await;
|
||||
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
|
|
@ -242,13 +340,20 @@ fn record_handshake_failure_class(
|
|||
record_beobachten_class(beobachten, config, peer_ip, class);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn increment_bad_on_unknown_tls_sni(stats: &Stats, error: &ProxyError) {
|
||||
if matches!(error, ProxyError::UnknownTlsSni) {
|
||||
stats.increment_connects_bad();
|
||||
}
|
||||
}
|
||||
|
||||
fn is_trusted_proxy_source(peer_ip: IpAddr, trusted: &[IpNetwork]) -> bool {
|
||||
if trusted.is_empty() {
|
||||
static EMPTY_PROXY_TRUST_WARNED: OnceLock<AtomicBool> = OnceLock::new();
|
||||
let warned = EMPTY_PROXY_TRUST_WARNED.get_or_init(|| AtomicBool::new(false));
|
||||
if !warned.swap(true, Ordering::Relaxed) {
|
||||
warn!(
|
||||
"PROXY protocol enabled but server.proxy_protocol_trusted_cidrs is empty; rejecting all PROXY headers by default"
|
||||
"PROXY protocol enabled but server.proxy_protocol_trusted_cidrs is empty; rejecting all PROXY headers"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
|
@ -260,7 +365,48 @@ fn synthetic_local_addr(port: u16) -> SocketAddr {
|
|||
SocketAddr::from(([0, 0, 0, 0], port))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn handle_client_stream<S>(
|
||||
stream: S,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
handle_client_stream_with_shared(
|
||||
stream,
|
||||
peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
ProxySharedState::new(),
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn handle_client_stream_with_shared<S>(
|
||||
mut stream: S,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
|
|
@ -274,6 +420,7 @@ pub async fn handle_client_stream<S>(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
|
|
@ -334,16 +481,69 @@ where
|
|||
|
||||
debug!(peer = %real_peer, "New connection (generic stream)");
|
||||
|
||||
let first_byte_idle_secs = effective_client_first_byte_idle_secs(&config, shared.as_ref());
|
||||
let first_byte = if first_byte_idle_secs == 0 {
|
||||
None
|
||||
} else {
|
||||
let idle_timeout = Duration::from_secs(first_byte_idle_secs);
|
||||
let mut first_byte = [0u8; 1];
|
||||
match timeout(idle_timeout, stream.read(&mut first_byte)).await {
|
||||
Ok(Ok(0)) => {
|
||||
debug!(peer = %real_peer, "Connection closed before first client byte");
|
||||
return Ok(());
|
||||
}
|
||||
Ok(Ok(_)) => Some(first_byte[0]),
|
||||
Ok(Err(e))
|
||||
if matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::UnexpectedEof
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
) =>
|
||||
{
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
error = %e,
|
||||
"Connection closed before first client byte"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
error = %e,
|
||||
"Failed while waiting for first client byte"
|
||||
);
|
||||
return Err(ProxyError::Io(e));
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
idle_secs = first_byte_idle_secs,
|
||||
"Closing idle pooled connection before first client byte"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&config);
|
||||
let stats_for_timeout = stats.clone();
|
||||
let config_for_timeout = config.clone();
|
||||
let beobachten_for_timeout = beobachten.clone();
|
||||
let peer_for_timeout = real_peer.ip();
|
||||
|
||||
// Phase 1: handshake (with timeout)
|
||||
// Phase 2: active handshake (with timeout after the first client byte)
|
||||
let outcome = match timeout(handshake_timeout, async {
|
||||
let mut first_bytes = [0u8; 5];
|
||||
if let Some(first_byte) = first_byte {
|
||||
first_bytes[0] = first_byte;
|
||||
stream.read_exact(&mut first_bytes[1..]).await?;
|
||||
} else {
|
||||
stream.read_exact(&mut first_bytes).await?;
|
||||
}
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
debug!(peer = %real_peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
|
@ -416,9 +616,10 @@ where
|
|||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -433,7 +634,10 @@ where
|
|||
beobachten.clone(),
|
||||
));
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
HandshakeResult::Error(e) => {
|
||||
increment_bad_on_unknown_tls_sni(stats.as_ref(), &e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||
|
|
@ -441,9 +645,10 @@ where
|
|||
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||
&config, &replay_checker, true, Some(tls_user.as_str()),
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -477,11 +682,12 @@ where
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||
crypto_reader, crypto_writer, success,
|
||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||
route_runtime.clone(),
|
||||
local_addr, real_peer, ip_tracker.clone(),
|
||||
shared.clone(),
|
||||
),
|
||||
)))
|
||||
} else {
|
||||
|
|
@ -507,9 +713,10 @@ where
|
|||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, false, None,
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -528,7 +735,7 @@ where
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -542,6 +749,7 @@ where
|
|||
local_addr,
|
||||
real_peer,
|
||||
ip_tracker.clone(),
|
||||
shared.clone(),
|
||||
)
|
||||
)))
|
||||
}
|
||||
|
|
@ -594,10 +802,12 @@ pub struct RunningClientHandler {
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
}
|
||||
|
||||
impl ClientHandler {
|
||||
#[cfg(test)]
|
||||
pub fn new(
|
||||
stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
|
|
@ -614,6 +824,45 @@ impl ClientHandler {
|
|||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||
) -> RunningClientHandler {
|
||||
Self::new_with_shared(
|
||||
stream,
|
||||
peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
ProxySharedState::new(),
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report,
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new_with_shared(
|
||||
stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||
) -> RunningClientHandler {
|
||||
let normalized_peer = normalize_ip(peer);
|
||||
RunningClientHandler {
|
||||
|
|
@ -632,6 +881,7 @@ impl ClientHandler {
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
}
|
||||
}
|
||||
|
|
@ -651,36 +901,9 @@ impl RunningClientHandler {
|
|||
debug!(peer = %peer, error = %e, "Failed to configure client socket");
|
||||
}
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&self.config);
|
||||
let stats = self.stats.clone();
|
||||
let config_for_timeout = self.config.clone();
|
||||
let beobachten_for_timeout = self.beobachten.clone();
|
||||
let peer_for_timeout = peer.ip();
|
||||
|
||||
// Phase 1: handshake (with timeout)
|
||||
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_handshake_timeouts();
|
||||
debug!(peer = %peer, "Handshake timeout");
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
let outcome = match self.do_handshake().await? {
|
||||
Some(outcome) => outcome,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||
|
|
@ -689,7 +912,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
}
|
||||
|
||||
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||
async fn do_handshake(mut self) -> Result<Option<HandshakeOutcome>> {
|
||||
let mut local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||
|
||||
if self.proxy_protocol_enabled {
|
||||
|
|
@ -764,8 +987,70 @@ impl RunningClientHandler {
|
|||
}
|
||||
}
|
||||
|
||||
let first_byte_idle_secs =
|
||||
effective_client_first_byte_idle_secs(&self.config, self.shared.as_ref());
|
||||
let first_byte = if first_byte_idle_secs == 0 {
|
||||
None
|
||||
} else {
|
||||
let idle_timeout = Duration::from_secs(first_byte_idle_secs);
|
||||
let mut first_byte = [0u8; 1];
|
||||
match timeout(idle_timeout, self.stream.read(&mut first_byte)).await {
|
||||
Ok(Ok(0)) => {
|
||||
debug!(peer = %self.peer, "Connection closed before first client byte");
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Ok(_)) => Some(first_byte[0]),
|
||||
Ok(Err(e))
|
||||
if matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::UnexpectedEof
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
) =>
|
||||
{
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
error = %e,
|
||||
"Connection closed before first client byte"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
error = %e,
|
||||
"Failed while waiting for first client byte"
|
||||
);
|
||||
return Err(ProxyError::Io(e));
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
idle_secs = first_byte_idle_secs,
|
||||
"Closing idle pooled connection before first client byte"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&self.config);
|
||||
let stats = self.stats.clone();
|
||||
let config_for_timeout = self.config.clone();
|
||||
let beobachten_for_timeout = self.beobachten.clone();
|
||||
let peer_for_timeout = self.peer.ip();
|
||||
let peer_for_log = self.peer;
|
||||
|
||||
let outcome = match timeout(handshake_timeout, async {
|
||||
let mut first_bytes = [0u8; 5];
|
||||
if let Some(first_byte) = first_byte {
|
||||
first_bytes[0] = first_byte;
|
||||
self.stream.read_exact(&mut first_bytes[1..]).await?;
|
||||
} else {
|
||||
self.stream.read_exact(&mut first_bytes).await?;
|
||||
}
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
let peer = self.peer;
|
||||
|
|
@ -777,6 +1062,34 @@ impl RunningClientHandler {
|
|||
} else {
|
||||
self.handle_direct_client(first_bytes, local_addr).await
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer_for_log, error = %e, "Handshake failed");
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_handshake_timeouts();
|
||||
debug!(peer = %peer_for_log, "Handshake timeout");
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(outcome))
|
||||
}
|
||||
|
||||
async fn handle_tls_client(
|
||||
|
|
@ -859,7 +1172,7 @@ impl RunningClientHandler {
|
|||
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||
&handshake,
|
||||
read_half,
|
||||
write_half,
|
||||
|
|
@ -868,6 +1181,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
&self.rng,
|
||||
self.tls_cache.clone(),
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -884,7 +1198,10 @@ impl RunningClientHandler {
|
|||
self.beobachten.clone(),
|
||||
));
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
HandshakeResult::Error(e) => {
|
||||
increment_bad_on_unknown_tls_sni(stats.as_ref(), &e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||
|
|
@ -893,7 +1210,7 @@ impl RunningClientHandler {
|
|||
.try_into()
|
||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&mtproto_handshake,
|
||||
tls_reader,
|
||||
tls_writer,
|
||||
|
|
@ -902,6 +1219,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
true,
|
||||
Some(tls_user.as_str()),
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -938,7 +1256,7 @@ impl RunningClientHandler {
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -952,6 +1270,7 @@ impl RunningClientHandler {
|
|||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
self.shared,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
|
@ -990,7 +1309,7 @@ impl RunningClientHandler {
|
|||
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&handshake,
|
||||
read_half,
|
||||
write_half,
|
||||
|
|
@ -999,6 +1318,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -1019,7 +1339,7 @@ impl RunningClientHandler {
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -1033,6 +1353,7 @@ impl RunningClientHandler {
|
|||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
self.shared,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
|
@ -1041,6 +1362,7 @@ impl RunningClientHandler {
|
|||
/// Two modes:
|
||||
/// - Direct: TCP relay to TG DC (existing behavior)
|
||||
/// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs)
|
||||
#[cfg(test)]
|
||||
async fn handle_authenticated_static<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
|
|
@ -1056,6 +1378,45 @@ impl RunningClientHandler {
|
|||
peer_addr: SocketAddr,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
local_addr,
|
||||
peer_addr,
|
||||
ip_tracker,
|
||||
ProxySharedState::new(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_authenticated_static_with_shared<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
local_addr: SocketAddr,
|
||||
peer_addr: SocketAddr,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
|
|
@ -1097,11 +1458,12 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
||||
handle_via_direct(
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
|
|
@ -1113,12 +1475,14 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
local_addr,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
} else {
|
||||
// Direct mode (original behavior)
|
||||
handle_via_direct(
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
|
|
@ -1130,6 +1494,8 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
local_addr,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
|
@ -1153,7 +1519,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||
&& stats.get_user_total_octets(user) >= *quota
|
||||
&& stats.get_user_quota_used(user) >= *quota
|
||||
{
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1164,7 +1530,11 @@ impl RunningClientHandler {
|
|||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(user)
|
||||
.map(|v| *v as u64);
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((config.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(config.access.user_max_tcp_conns_global_each))
|
||||
.map(|v| v as u64);
|
||||
if !stats.try_acquire_user_curr_connects(user, limit) {
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1212,7 +1582,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||
&& stats.get_user_total_octets(user) >= *quota
|
||||
&& stats.get_user_quota_used(user) >= *quota
|
||||
{
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1223,7 +1593,11 @@ impl RunningClientHandler {
|
|||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(user)
|
||||
.map(|v| *v as u64);
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((config.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(config.access.user_max_tcp_conns_global_each))
|
||||
.map(|v| v as u64);
|
||||
if !stats.try_acquire_user_curr_connects(user, limit) {
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1321,6 +1695,38 @@ mod masking_shape_classifier_fuzz_redteam_expected_fail_tests;
|
|||
#[path = "tests/client_masking_probe_evasion_blackhat_tests.rs"]
|
||||
mod masking_probe_evasion_blackhat_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_fragmented_classifier_security_tests.rs"]
|
||||
mod masking_fragmented_classifier_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_replay_timing_security_tests.rs"]
|
||||
mod masking_replay_timing_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_http2_fragmented_preface_security_tests.rs"]
|
||||
mod masking_http2_fragmented_preface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_invariant_security_tests.rs"]
|
||||
mod masking_prefetch_invariant_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_timing_matrix_security_tests.rs"]
|
||||
mod masking_prefetch_timing_matrix_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_config_runtime_security_tests.rs"]
|
||||
mod masking_prefetch_config_runtime_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_config_pipeline_integration_security_tests.rs"]
|
||||
mod masking_prefetch_config_pipeline_integration_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_strict_boundary_security_tests.rs"]
|
||||
mod masking_prefetch_strict_boundary_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_beobachten_ttl_bounds_security_tests.rs"]
|
||||
mod beobachten_ttl_bounds_security_tests;
|
||||
|
|
@ -1328,3 +1734,15 @@ mod beobachten_ttl_bounds_security_tests;
|
|||
#[cfg(test)]
|
||||
#[path = "tests/client_tls_record_wrap_hardening_security_tests.rs"]
|
||||
mod tls_record_wrap_hardening_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_clever_advanced_tests.rs"]
|
||||
mod client_clever_advanced_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_more_advanced_tests.rs"]
|
||||
mod client_more_advanced_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_deep_invariants_tests.rs"]
|
||||
mod client_deep_invariants_tests;
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use std::net::SocketAddr;
|
|||
use std::path::{Component, Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, split};
|
||||
use tokio::sync::watch;
|
||||
|
|
@ -16,11 +17,13 @@ use crate::crypto::SecureRandom;
|
|||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
||||
use crate::proxy::relay::relay_bidirectional;
|
||||
use crate::proxy::route_mode::{
|
||||
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
|
||||
cutover_stagger_delay,
|
||||
};
|
||||
use crate::proxy::shared_state::{
|
||||
ConntrackCloseEvent, ConntrackClosePublishResult, ConntrackCloseReason, ProxySharedState,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
|
@ -225,7 +228,43 @@ fn unknown_dc_test_lock() -> &'static Mutex<()> {
|
|||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn handle_via_direct<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_rx: watch::Receiver<RouteCutoverState>,
|
||||
route_snapshot: RouteCutoverState,
|
||||
session_id: u64,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config.clone(),
|
||||
buffer_pool,
|
||||
rng,
|
||||
route_rx,
|
||||
route_snapshot,
|
||||
session_id,
|
||||
SocketAddr::from(([0, 0, 0, 0], config.server.port)),
|
||||
ProxySharedState::new(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_via_direct_with_shared<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
|
|
@ -237,6 +276,8 @@ pub(crate) async fn handle_via_direct<R, W>(
|
|||
mut route_rx: watch::Receiver<RouteCutoverState>,
|
||||
route_snapshot: RouteCutoverState,
|
||||
session_id: u64,
|
||||
local_addr: SocketAddr,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
|
|
@ -276,7 +317,19 @@ where
|
|||
stats.increment_user_connects(user);
|
||||
let _direct_connection_lease = stats.acquire_direct_connection_lease();
|
||||
|
||||
let relay_result = relay_bidirectional(
|
||||
let buffer_pool_trim = Arc::clone(&buffer_pool);
|
||||
let relay_activity_timeout = if shared.conntrack_pressure_active() {
|
||||
Duration::from_secs(
|
||||
config
|
||||
.server
|
||||
.conntrack_control
|
||||
.profile
|
||||
.direct_activity_timeout_secs(),
|
||||
)
|
||||
} else {
|
||||
Duration::from_secs(1800)
|
||||
};
|
||||
let relay_result = crate::proxy::relay::relay_bidirectional_with_activity_timeout(
|
||||
client_reader,
|
||||
client_writer,
|
||||
tg_reader,
|
||||
|
|
@ -287,6 +340,7 @@ where
|
|||
Arc::clone(&stats),
|
||||
config.access.user_data_quota.get(user).copied(),
|
||||
buffer_pool,
|
||||
relay_activity_timeout,
|
||||
);
|
||||
tokio::pin!(relay_result);
|
||||
let relay_result = loop {
|
||||
|
|
@ -321,9 +375,59 @@ where
|
|||
Err(e) => debug!(user = %user, error = %e, "Direct relay ended with error"),
|
||||
}
|
||||
|
||||
buffer_pool_trim.trim_to(buffer_pool_trim.max_buffers().min(64));
|
||||
let pool_snapshot = buffer_pool_trim.stats();
|
||||
stats.set_buffer_pool_gauges(
|
||||
pool_snapshot.pooled,
|
||||
pool_snapshot.allocated,
|
||||
pool_snapshot.allocated.saturating_sub(pool_snapshot.pooled),
|
||||
);
|
||||
|
||||
let close_reason = classify_conntrack_close_reason(&relay_result);
|
||||
let publish_result = shared.publish_conntrack_close_event(ConntrackCloseEvent {
|
||||
src: success.peer,
|
||||
dst: local_addr,
|
||||
reason: close_reason,
|
||||
});
|
||||
if !matches!(
|
||||
publish_result,
|
||||
ConntrackClosePublishResult::Sent | ConntrackClosePublishResult::Disabled
|
||||
) {
|
||||
stats.increment_conntrack_close_event_drop_total();
|
||||
}
|
||||
|
||||
relay_result
|
||||
}
|
||||
|
||||
fn classify_conntrack_close_reason(result: &Result<()>) -> ConntrackCloseReason {
|
||||
match result {
|
||||
Ok(()) => ConntrackCloseReason::NormalEof,
|
||||
Err(crate::error::ProxyError::Io(error))
|
||||
if matches!(error.kind(), std::io::ErrorKind::TimedOut) =>
|
||||
{
|
||||
ConntrackCloseReason::Timeout
|
||||
}
|
||||
Err(crate::error::ProxyError::Io(error))
|
||||
if matches!(
|
||||
error.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
| std::io::ErrorKind::UnexpectedEof
|
||||
) =>
|
||||
{
|
||||
ConntrackCloseReason::Reset
|
||||
}
|
||||
Err(crate::error::ProxyError::Proxy(message))
|
||||
if message.contains("pressure") || message.contains("evicted") =>
|
||||
{
|
||||
ConntrackCloseReason::Pressure
|
||||
}
|
||||
Err(_) => ConntrackCloseReason::Other,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let prefer_v6 = config.network.prefer == 6 && config.network.ipv6.unwrap_or(true);
|
||||
let datacenters = if prefer_v6 {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,14 +4,23 @@ use crate::config::ProxyConfig;
|
|||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
use rand::{Rng, RngExt};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(unix)]
|
||||
use nix::ifaddrs::getifaddrs;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, RngExt, SeedableRng};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::str;
|
||||
use std::time::Duration;
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
#[cfg(unix)]
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{Duration, Instant as StdInstant};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::sync::Mutex as AsyncMutex;
|
||||
use tokio::time::{Instant, timeout};
|
||||
use tracing::debug;
|
||||
|
||||
|
|
@ -30,28 +39,55 @@ const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_secs(5);
|
|||
#[cfg(test)]
|
||||
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
const MASK_BUFFER_SIZE: usize = 8192;
|
||||
#[cfg(unix)]
|
||||
#[cfg(not(test))]
|
||||
const LOCAL_INTERFACE_CACHE_TTL: Duration = Duration::from_secs(300);
|
||||
#[cfg(all(unix, test))]
|
||||
const LOCAL_INTERFACE_CACHE_TTL: Duration = Duration::from_secs(1);
|
||||
|
||||
struct CopyOutcome {
|
||||
total: usize,
|
||||
ended_by_eof: bool,
|
||||
}
|
||||
|
||||
async fn copy_with_idle_timeout<R, W>(reader: &mut R, writer: &mut W) -> CopyOutcome
|
||||
async fn copy_with_idle_timeout<R, W>(
|
||||
reader: &mut R,
|
||||
writer: &mut W,
|
||||
byte_cap: usize,
|
||||
shutdown_on_eof: bool,
|
||||
) -> CopyOutcome
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = [0u8; MASK_BUFFER_SIZE];
|
||||
let mut buf = Box::new([0u8; MASK_BUFFER_SIZE]);
|
||||
let mut total = 0usize;
|
||||
let mut ended_by_eof = false;
|
||||
|
||||
if byte_cap == 0 {
|
||||
return CopyOutcome {
|
||||
total,
|
||||
ended_by_eof,
|
||||
};
|
||||
}
|
||||
|
||||
loop {
|
||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf)).await;
|
||||
let remaining_budget = byte_cap.saturating_sub(total);
|
||||
if remaining_budget == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let read_len = remaining_budget.min(MASK_BUFFER_SIZE);
|
||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf[..read_len])).await;
|
||||
let n = match read_res {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
};
|
||||
if n == 0 {
|
||||
ended_by_eof = true;
|
||||
if shutdown_on_eof {
|
||||
let _ = timeout(MASK_RELAY_IDLE_TIMEOUT, writer.shutdown()).await;
|
||||
}
|
||||
break;
|
||||
}
|
||||
total = total.saturating_add(n);
|
||||
|
|
@ -68,6 +104,31 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
fn is_http_probe(data: &[u8]) -> bool {
|
||||
// RFC 7540 section 3.5: HTTP/2 client preface starts with "PRI ".
|
||||
const HTTP_METHODS: [&[u8]; 10] = [
|
||||
b"GET ", b"POST", b"HEAD", b"PUT ", b"DELETE", b"OPTIONS", b"CONNECT", b"TRACE", b"PATCH",
|
||||
b"PRI ",
|
||||
];
|
||||
|
||||
if data.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let window = &data[..data.len().min(16)];
|
||||
for method in HTTP_METHODS {
|
||||
if data.len() >= method.len() && window.starts_with(method) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (2..=3).contains(&window.len()) && method.starts_with(window) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn next_mask_shape_bucket(total: usize, floor: usize, cap: usize) -> usize {
|
||||
if total == 0 || floor == 0 || cap < floor {
|
||||
return total;
|
||||
|
|
@ -125,6 +186,11 @@ async fn maybe_write_shape_padding<W>(
|
|||
let mut remaining = target_total - total_sent;
|
||||
let mut pad_chunk = [0u8; 1024];
|
||||
let deadline = Instant::now() + MASK_TIMEOUT;
|
||||
// Use a Send RNG so relay futures remain spawn-safe under Tokio.
|
||||
let mut rng = {
|
||||
let mut seed_source = rand::rng();
|
||||
StdRng::from_rng(&mut seed_source)
|
||||
};
|
||||
|
||||
while remaining > 0 {
|
||||
let now = Instant::now();
|
||||
|
|
@ -133,10 +199,7 @@ async fn maybe_write_shape_padding<W>(
|
|||
}
|
||||
|
||||
let write_len = remaining.min(pad_chunk.len());
|
||||
{
|
||||
let mut rng = rand::rng();
|
||||
rng.fill_bytes(&mut pad_chunk[..write_len]);
|
||||
}
|
||||
let write_budget = deadline.saturating_duration_since(now);
|
||||
match timeout(write_budget, mask_write.write_all(&pad_chunk[..write_len])).await {
|
||||
Ok(Ok(())) => {}
|
||||
|
|
@ -167,11 +230,11 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
async fn consume_client_data_with_timeout<R>(reader: R)
|
||||
async fn consume_client_data_with_timeout_and_cap<R>(reader: R, byte_cap: usize)
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if timeout(MASK_RELAY_TIMEOUT, consume_client_data(reader))
|
||||
if timeout(MASK_RELAY_TIMEOUT, consume_client_data(reader, byte_cap))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
|
|
@ -186,15 +249,63 @@ async fn wait_mask_connect_budget(started: Instant) {
|
|||
}
|
||||
}
|
||||
|
||||
// Log-normal sample bounded to [floor, ceiling]. Median = sqrt(floor * ceiling).
|
||||
// Implements Box-Muller transform for standard normal sampling — no external
|
||||
// dependency on rand_distr (which is incompatible with rand 0.10).
|
||||
// sigma is chosen so ~99% of raw samples land inside [floor, ceiling] before clamp.
|
||||
// When floor > ceiling (misconfiguration), returns ceiling (the smaller value).
|
||||
// When floor == ceiling, returns that value. When both are 0, returns 0.
|
||||
pub(crate) fn sample_lognormal_percentile_bounded(
|
||||
floor: u64,
|
||||
ceiling: u64,
|
||||
rng: &mut impl Rng,
|
||||
) -> u64 {
|
||||
if ceiling == 0 && floor == 0 {
|
||||
return 0;
|
||||
}
|
||||
if floor > ceiling {
|
||||
return ceiling;
|
||||
}
|
||||
if floor == ceiling {
|
||||
return floor;
|
||||
}
|
||||
let floor_f = floor.max(1) as f64;
|
||||
let ceiling_f = ceiling.max(1) as f64;
|
||||
let mu = (floor_f.ln() + ceiling_f.ln()) / 2.0;
|
||||
// 4.65 ≈ 2 * 2.326 (double-sided z-score for 99th percentile)
|
||||
let sigma = ((ceiling_f / floor_f).ln() / 4.65).max(0.01);
|
||||
// Box-Muller transform: two uniform samples → one standard normal sample
|
||||
let u1: f64 = rng.random_range(f64::MIN_POSITIVE..1.0);
|
||||
let u2: f64 = rng.random_range(0.0_f64..std::f64::consts::TAU);
|
||||
let normal_sample = (-2.0_f64 * u1.ln()).sqrt() * u2.cos();
|
||||
let raw = (mu + sigma * normal_sample).exp();
|
||||
if raw.is_finite() {
|
||||
(raw as u64).clamp(floor, ceiling)
|
||||
} else {
|
||||
((floor_f * ceiling_f).sqrt()) as u64
|
||||
}
|
||||
}
|
||||
|
||||
fn mask_outcome_target_budget(config: &ProxyConfig) -> Duration {
|
||||
if config.censorship.mask_timing_normalization_enabled {
|
||||
let floor = config.censorship.mask_timing_normalization_floor_ms;
|
||||
let ceiling = config.censorship.mask_timing_normalization_ceiling_ms;
|
||||
if floor == 0 {
|
||||
if ceiling == 0 {
|
||||
return Duration::from_millis(0);
|
||||
}
|
||||
// floor=0 stays uniform: log-normal cannot model distribution anchored at zero
|
||||
let mut rng = rand::rng();
|
||||
return Duration::from_millis(rng.random_range(0..=ceiling));
|
||||
}
|
||||
if ceiling > floor {
|
||||
let mut rng = rand::rng();
|
||||
return Duration::from_millis(rng.random_range(floor..=ceiling));
|
||||
return Duration::from_millis(sample_lognormal_percentile_bounded(
|
||||
floor, ceiling, &mut rng,
|
||||
));
|
||||
}
|
||||
return Duration::from_millis(floor);
|
||||
// ceiling <= floor: use the larger value (fail-closed: preserve longer delay)
|
||||
return Duration::from_millis(floor.max(ceiling));
|
||||
}
|
||||
|
||||
MASK_TIMEOUT
|
||||
|
|
@ -219,14 +330,7 @@ async fn wait_mask_outcome_budget(started: Instant, config: &ProxyConfig) {
|
|||
/// Detect client type based on initial data
|
||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
// Check for HTTP request
|
||||
if data.len() > 4
|
||||
&& (data.starts_with(b"GET ")
|
||||
|| data.starts_with(b"POST")
|
||||
|| data.starts_with(b"HEAD")
|
||||
|| data.starts_with(b"PUT ")
|
||||
|| data.starts_with(b"DELETE")
|
||||
|| data.starts_with(b"OPTIONS"))
|
||||
{
|
||||
if is_http_probe(data) {
|
||||
return "HTTP";
|
||||
}
|
||||
|
||||
|
|
@ -248,6 +352,247 @@ fn detect_client_type(data: &[u8]) -> &'static str {
|
|||
"unknown"
|
||||
}
|
||||
|
||||
fn parse_mask_host_ip_literal(host: &str) -> Option<IpAddr> {
|
||||
if host.starts_with('[') && host.ends_with(']') {
|
||||
return host[1..host.len() - 1].parse::<IpAddr>().ok();
|
||||
}
|
||||
host.parse::<IpAddr>().ok()
|
||||
}
|
||||
|
||||
fn canonical_ip(ip: IpAddr) -> IpAddr {
|
||||
match ip {
|
||||
IpAddr::V6(v6) => v6
|
||||
.to_ipv4_mapped()
|
||||
.map(IpAddr::V4)
|
||||
.unwrap_or(IpAddr::V6(v6)),
|
||||
IpAddr::V4(v4) => IpAddr::V4(v4),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn collect_local_interface_ips() -> Vec<IpAddr> {
|
||||
#[cfg(test)]
|
||||
LOCAL_INTERFACE_ENUMERATIONS.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let mut out = Vec::new();
|
||||
if let Ok(addrs) = getifaddrs() {
|
||||
for iface in addrs {
|
||||
if let Some(address) = iface.address {
|
||||
if let Some(v4) = address.as_sockaddr_in() {
|
||||
out.push(canonical_ip(IpAddr::V4(v4.ip())));
|
||||
} else if let Some(v6) = address.as_sockaddr_in6() {
|
||||
out.push(canonical_ip(IpAddr::V6(v6.ip())));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn choose_interface_snapshot(previous: &[IpAddr], refreshed: Vec<IpAddr>) -> Vec<IpAddr> {
|
||||
if refreshed.is_empty() && !previous.is_empty() {
|
||||
return previous.to_vec();
|
||||
}
|
||||
|
||||
refreshed
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Default)]
|
||||
struct LocalInterfaceCache {
|
||||
ips: Vec<IpAddr>,
|
||||
refreshed_at: Option<StdInstant>,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
static LOCAL_INTERFACE_CACHE: OnceLock<Mutex<LocalInterfaceCache>> = OnceLock::new();
|
||||
|
||||
#[cfg(unix)]
|
||||
static LOCAL_INTERFACE_REFRESH_LOCK: OnceLock<AsyncMutex<()>> = OnceLock::new();
|
||||
|
||||
#[cfg(all(unix, test))]
|
||||
fn local_interface_ips() -> Vec<IpAddr> {
|
||||
let cache = LOCAL_INTERFACE_CACHE.get_or_init(|| Mutex::new(LocalInterfaceCache::default()));
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if stale {
|
||||
let refreshed = collect_local_interface_ips();
|
||||
guard.ips = choose_interface_snapshot(&guard.ips, refreshed);
|
||||
guard.refreshed_at = Some(StdInstant::now());
|
||||
}
|
||||
|
||||
guard.ips.clone()
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn local_interface_ips_async() -> Vec<IpAddr> {
|
||||
let cache = LOCAL_INTERFACE_CACHE.get_or_init(|| Mutex::new(LocalInterfaceCache::default()));
|
||||
|
||||
{
|
||||
let guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if !stale {
|
||||
return guard.ips.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let refresh_lock = LOCAL_INTERFACE_REFRESH_LOCK.get_or_init(|| AsyncMutex::new(()));
|
||||
let _refresh_guard = refresh_lock.lock().await;
|
||||
|
||||
{
|
||||
let guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if !stale {
|
||||
return guard.ips.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let refreshed = tokio::task::spawn_blocking(collect_local_interface_ips)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if stale {
|
||||
guard.ips = choose_interface_snapshot(&guard.ips, refreshed);
|
||||
guard.refreshed_at = Some(StdInstant::now());
|
||||
}
|
||||
|
||||
guard.ips.clone()
|
||||
}
|
||||
|
||||
#[cfg(all(not(unix), test))]
|
||||
fn local_interface_ips() -> Vec<IpAddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn local_interface_ips_async() -> Vec<IpAddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
static LOCAL_INTERFACE_ENUMERATIONS: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[cfg(test)]
|
||||
fn reset_local_interface_enumerations_for_tests() {
|
||||
LOCAL_INTERFACE_ENUMERATIONS.store(0, Ordering::Relaxed);
|
||||
|
||||
#[cfg(unix)]
|
||||
if let Some(cache) = LOCAL_INTERFACE_CACHE.get() {
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
guard.ips.clear();
|
||||
guard.refreshed_at = None;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn local_interface_enumerations_for_tests() -> usize {
|
||||
LOCAL_INTERFACE_ENUMERATIONS.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn is_mask_target_local_listener_with_interfaces(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
interface_ips: &[IpAddr],
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let local_ip = canonical_ip(local_addr.ip());
|
||||
let literal_mask_ip = parse_mask_host_ip_literal(mask_host).map(canonical_ip);
|
||||
|
||||
if let Some(addr) = resolved_override {
|
||||
let resolved_ip = canonical_ip(addr.ip());
|
||||
if resolved_ip == local_ip {
|
||||
return true;
|
||||
}
|
||||
|
||||
if local_ip.is_unspecified()
|
||||
&& (resolved_ip.is_loopback()
|
||||
|| resolved_ip.is_unspecified()
|
||||
|| interface_ips.contains(&resolved_ip))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mask_ip) = literal_mask_ip {
|
||||
if mask_ip == local_ip {
|
||||
return true;
|
||||
}
|
||||
|
||||
if local_ip.is_unspecified()
|
||||
&& (mask_ip.is_loopback()
|
||||
|| mask_ip.is_unspecified()
|
||||
|| interface_ips.contains(&mask_ip))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn is_mask_target_local_listener(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let interfaces = local_interface_ips();
|
||||
is_mask_target_local_listener_with_interfaces(
|
||||
mask_host,
|
||||
mask_port,
|
||||
local_addr,
|
||||
resolved_override,
|
||||
&interfaces,
|
||||
)
|
||||
}
|
||||
|
||||
async fn is_mask_target_local_listener_async(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let interfaces = local_interface_ips_async().await;
|
||||
is_mask_target_local_listener_with_interfaces(
|
||||
mask_host,
|
||||
mask_port,
|
||||
local_addr,
|
||||
resolved_override,
|
||||
&interfaces,
|
||||
)
|
||||
}
|
||||
|
||||
fn masking_beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||
let minutes = config.general.beobachten_minutes;
|
||||
let clamped = minutes.clamp(1, 24 * 60);
|
||||
Duration::from_secs(clamped.saturating_mul(60))
|
||||
}
|
||||
|
||||
fn build_mask_proxy_header(
|
||||
version: u8,
|
||||
peer: SocketAddr,
|
||||
|
|
@ -290,13 +635,14 @@ pub async fn handle_bad_client<R, W>(
|
|||
{
|
||||
let client_type = detect_client_type(initial_data);
|
||||
if config.general.beobachten {
|
||||
let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60));
|
||||
let ttl = masking_beobachten_ttl(config);
|
||||
beobachten.record(client_type, peer.ip(), ttl);
|
||||
}
|
||||
|
||||
if !config.censorship.mask {
|
||||
// Masking disabled, just consume data
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(reader, config.censorship.mask_relay_max_bytes)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -341,6 +687,7 @@ pub async fn handle_bad_client<R, W>(
|
|||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
),
|
||||
)
|
||||
.await
|
||||
|
|
@ -353,12 +700,20 @@ pub async fn handle_bad_client<R, W>(
|
|||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask unix socket");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask unix socket");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -372,6 +727,29 @@ pub async fn handle_bad_client<R, W>(
|
|||
.unwrap_or(&config.censorship.tls_domain);
|
||||
let mask_port = config.censorship.mask_port;
|
||||
|
||||
// Fail closed when fallback points at our own listener endpoint.
|
||||
// Self-referential masking can create recursive proxy loops under
|
||||
// misconfiguration and leak distinguishable load spikes to adversaries.
|
||||
let resolved_mask_addr = resolve_socket_addr(mask_host, mask_port);
|
||||
if is_mask_target_local_listener_async(mask_host, mask_port, local_addr, resolved_mask_addr)
|
||||
.await
|
||||
{
|
||||
let outcome_started = Instant::now();
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
host = %mask_host,
|
||||
port = mask_port,
|
||||
local = %local_addr,
|
||||
"Mask target resolves to local listener; refusing self-referential masking fallback"
|
||||
);
|
||||
consume_client_data_with_timeout_and_cap(reader, config.censorship.mask_relay_max_bytes)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let outcome_started = Instant::now();
|
||||
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
host = %mask_host,
|
||||
|
|
@ -381,10 +759,9 @@ pub async fn handle_bad_client<R, W>(
|
|||
);
|
||||
|
||||
// Apply runtime DNS override for mask target when configured.
|
||||
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||
let mask_addr = resolved_mask_addr
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||
let outcome_started = Instant::now();
|
||||
let connect_started = Instant::now();
|
||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||
match connect_result {
|
||||
|
|
@ -413,6 +790,7 @@ pub async fn handle_bad_client<R, W>(
|
|||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
),
|
||||
)
|
||||
.await
|
||||
|
|
@ -425,12 +803,20 @@ pub async fn handle_bad_client<R, W>(
|
|||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask host");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask host");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -449,6 +835,7 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
shape_above_cap_blur: bool,
|
||||
shape_above_cap_blur_max_bytes: usize,
|
||||
shape_hardening_aggressive_mode: bool,
|
||||
mask_relay_max_bytes: usize,
|
||||
) where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
|
|
@ -464,8 +851,18 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
}
|
||||
|
||||
let (upstream_copy, downstream_copy) = tokio::join!(
|
||||
async { copy_with_idle_timeout(&mut reader, &mut mask_write).await },
|
||||
async { copy_with_idle_timeout(&mut mask_read, &mut writer).await }
|
||||
async {
|
||||
copy_with_idle_timeout(
|
||||
&mut reader,
|
||||
&mut mask_write,
|
||||
mask_relay_max_bytes,
|
||||
!shape_hardening_enabled,
|
||||
)
|
||||
.await
|
||||
},
|
||||
async {
|
||||
copy_with_idle_timeout(&mut mask_read, &mut writer, mask_relay_max_bytes, true).await
|
||||
}
|
||||
);
|
||||
|
||||
let total_sent = initial_data.len().saturating_add(upstream_copy.total);
|
||||
|
|
@ -491,13 +888,36 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
let _ = writer.shutdown().await;
|
||||
}
|
||||
|
||||
/// Just consume all data from client without responding
|
||||
async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R) {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
while let Ok(n) = reader.read(&mut buf).await {
|
||||
/// Just consume all data from client without responding.
|
||||
async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R, byte_cap: usize) {
|
||||
if byte_cap == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Keep drain path fail-closed under slow-loris stalls.
|
||||
let mut buf = Box::new([0u8; MASK_BUFFER_SIZE]);
|
||||
let mut total = 0usize;
|
||||
|
||||
loop {
|
||||
let remaining_budget = byte_cap.saturating_sub(total);
|
||||
if remaining_budget == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let read_len = remaining_budget.min(MASK_BUFFER_SIZE);
|
||||
let n = match timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf[..read_len])).await {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
};
|
||||
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
total = total.saturating_add(n);
|
||||
if total >= byte_cap {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -521,6 +941,10 @@ mod masking_shape_above_cap_blur_security_tests;
|
|||
#[path = "tests/masking_timing_normalization_security_tests.rs"]
|
||||
mod masking_timing_normalization_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_budget_coupling_security_tests.rs"]
|
||||
mod masking_timing_budget_coupling_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_ab_envelope_blur_integration_security_tests.rs"]
|
||||
mod masking_ab_envelope_blur_integration_security_tests;
|
||||
|
|
@ -548,3 +972,83 @@ mod masking_aggressive_mode_security_tests;
|
|||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_sidechannel_redteam_expected_fail_tests.rs"]
|
||||
mod masking_timing_sidechannel_redteam_expected_fail_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_self_target_loop_security_tests.rs"]
|
||||
mod masking_self_target_loop_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_classification_completeness_security_tests.rs"]
|
||||
mod masking_classification_completeness_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_relay_guardrails_security_tests.rs"]
|
||||
mod masking_relay_guardrails_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_connect_failure_close_matrix_security_tests.rs"]
|
||||
mod masking_connect_failure_close_matrix_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_additional_hardening_security_tests.rs"]
|
||||
mod masking_additional_hardening_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_consume_idle_timeout_security_tests.rs"]
|
||||
mod masking_consume_idle_timeout_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http2_probe_classification_security_tests.rs"]
|
||||
mod masking_http2_probe_classification_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http_probe_boundary_security_tests.rs"]
|
||||
mod masking_http_probe_boundary_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_rng_hoist_perf_regression_tests.rs"]
|
||||
mod masking_rng_hoist_perf_regression_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http2_preface_integration_security_tests.rs"]
|
||||
mod masking_http2_preface_integration_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_consume_stress_adversarial_tests.rs"]
|
||||
mod masking_consume_stress_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_security_tests.rs"]
|
||||
mod masking_interface_cache_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_defense_in_depth_security_tests.rs"]
|
||||
mod masking_interface_cache_defense_in_depth_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_concurrency_security_tests.rs"]
|
||||
mod masking_interface_cache_concurrency_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_production_cap_regression_security_tests.rs"]
|
||||
mod masking_production_cap_regression_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_extended_attack_surface_security_tests.rs"]
|
||||
mod masking_extended_attack_surface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_padding_timeout_adversarial_tests.rs"]
|
||||
mod masking_padding_timeout_adversarial_tests;
|
||||
|
||||
#[cfg(all(test, feature = "redteam_offline_expected_fail"))]
|
||||
#[path = "tests/masking_offline_target_redteam_expected_fail_tests.rs"]
|
||||
mod masking_offline_target_redteam_expected_fail_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_baseline_invariant_tests.rs"]
|
||||
mod masking_baseline_invariant_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_lognormal_timing_security_tests.rs"]
|
||||
mod masking_lognormal_timing_security_tests;
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -67,6 +67,7 @@ pub mod middle_relay;
|
|||
pub mod relay;
|
||||
pub mod route_mode;
|
||||
pub mod session_eviction;
|
||||
pub mod shared_state;
|
||||
|
||||
pub use client::ClientHandler;
|
||||
#[allow(unused_imports)]
|
||||
|
|
@ -75,3 +76,15 @@ pub use handshake::*;
|
|||
pub use masking::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use relay::*;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/test_harness_common.rs"]
|
||||
mod test_harness_common;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/proxy_shared_state_isolation_tests.rs"]
|
||||
mod proxy_shared_state_isolation_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/proxy_shared_state_parallel_execution_tests.rs"]
|
||||
mod proxy_shared_state_parallel_execution_tests;
|
||||
|
|
|
|||
|
|
@ -52,13 +52,12 @@
|
|||
//! - `SharedCounters` (atomics) let the watchdog read stats without locking
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::stats::Stats;
|
||||
use crate::stats::{Stats, UserStats};
|
||||
use crate::stream::BufferPool;
|
||||
use dashmap::DashMap;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional_with_sizes};
|
||||
|
|
@ -71,6 +70,7 @@ use tracing::{debug, trace, warn};
|
|||
///
|
||||
/// iOS keeps Telegram connections alive in background for up to 30 minutes.
|
||||
/// Closing earlier causes unnecessary reconnects and handshake overhead.
|
||||
#[allow(dead_code)]
|
||||
const ACTIVITY_TIMEOUT: Duration = Duration::from_secs(1800);
|
||||
|
||||
/// Watchdog check interval — also used for periodic rate logging.
|
||||
|
|
@ -209,12 +209,10 @@ struct StatsIo<S> {
|
|||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
user_stats: Arc<UserStats>,
|
||||
quota_limit: Option<u64>,
|
||||
quota_exceeded: Arc<AtomicBool>,
|
||||
quota_read_wake_scheduled: bool,
|
||||
quota_write_wake_scheduled: bool,
|
||||
quota_read_retry_active: Arc<AtomicBool>,
|
||||
quota_write_retry_active: Arc<AtomicBool>,
|
||||
quota_bytes_since_check: u64,
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
|
|
@ -230,30 +228,21 @@ impl<S> StatsIo<S> {
|
|||
) -> Self {
|
||||
// Mark initial activity so the watchdog doesn't fire before data flows
|
||||
counters.touch(Instant::now(), epoch);
|
||||
let user_stats = stats.get_or_create_user_stats_handle(&user);
|
||||
Self {
|
||||
inner,
|
||||
counters,
|
||||
stats,
|
||||
user,
|
||||
user_stats,
|
||||
quota_limit,
|
||||
quota_exceeded,
|
||||
quota_read_wake_scheduled: false,
|
||||
quota_write_wake_scheduled: false,
|
||||
quota_read_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
quota_write_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
quota_bytes_since_check: 0,
|
||||
epoch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Drop for StatsIo<S> {
|
||||
fn drop(&mut self) {
|
||||
self.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
self.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct QuotaIoSentinel;
|
||||
|
||||
|
|
@ -277,84 +266,24 @@ fn is_quota_io_error(err: &io::Error) -> bool {
|
|||
.is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(1);
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(2);
|
||||
const QUOTA_NEAR_LIMIT_BYTES: u64 = 64 * 1024;
|
||||
const QUOTA_LARGE_CHARGE_BYTES: u64 = 16 * 1024;
|
||||
const QUOTA_ADAPTIVE_INTERVAL_MIN_BYTES: u64 = 4 * 1024;
|
||||
const QUOTA_ADAPTIVE_INTERVAL_MAX_BYTES: u64 = 64 * 1024;
|
||||
const QUOTA_RESERVE_SPIN_RETRIES: usize = 64;
|
||||
const QUOTA_RESERVE_MAX_ROUNDS: usize = 8;
|
||||
|
||||
fn spawn_quota_retry_waker(retry_active: Arc<AtomicBool>, waker: std::task::Waker) {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(QUOTA_CONTENTION_RETRY_INTERVAL).await;
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
waker.wake_by_ref();
|
||||
}
|
||||
});
|
||||
#[inline]
|
||||
fn quota_adaptive_interval_bytes(remaining_before: u64) -> u64 {
|
||||
remaining_before.saturating_div(2).clamp(
|
||||
QUOTA_ADAPTIVE_INTERVAL_MIN_BYTES,
|
||||
QUOTA_ADAPTIVE_INTERVAL_MAX_BYTES,
|
||||
)
|
||||
}
|
||||
|
||||
static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<Mutex<()>>>> = OnceLock::new();
|
||||
static QUOTA_USER_OVERFLOW_LOCKS: OnceLock<Vec<Arc<Mutex<()>>>> = OnceLock::new();
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 64;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 4_096;
|
||||
#[cfg(test)]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 16;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 256;
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_guard() -> &'static Mutex<()> {
|
||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_scope() -> std::sync::MutexGuard<'static, ()> {
|
||||
quota_user_lock_test_guard()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
fn quota_overflow_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let stripes = QUOTA_USER_OVERFLOW_LOCKS.get_or_init(|| {
|
||||
(0..QUOTA_OVERFLOW_LOCK_STRIPES)
|
||||
.map(|_| Arc::new(Mutex::new(())))
|
||||
.collect()
|
||||
});
|
||||
|
||||
let hash = crc32fast::hash(user.as_bytes()) as usize;
|
||||
Arc::clone(&stripes[hash % stripes.len()])
|
||||
}
|
||||
|
||||
fn quota_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let locks = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
|
||||
if let Some(existing) = locks.get(user) {
|
||||
return Arc::clone(existing.value());
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
locks.retain(|_, value| Arc::strong_count(value) > 1);
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
return quota_overflow_user_lock(user);
|
||||
}
|
||||
|
||||
let created = Arc::new(Mutex::new(()));
|
||||
match locks.entry(user.to_string()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(entry) => Arc::clone(entry.get()),
|
||||
dashmap::mapref::entry::Entry::Vacant(entry) => {
|
||||
entry.insert(Arc::clone(&created));
|
||||
created
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
fn should_immediate_quota_check(remaining_before: u64, charge_bytes: u64) -> bool {
|
||||
remaining_before <= QUOTA_NEAR_LIMIT_BYTES || charge_bytes >= QUOTA_LARGE_CHARGE_BYTES
|
||||
}
|
||||
|
||||
impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
||||
|
|
@ -364,81 +293,90 @@ impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
|||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
if this.quota_exceeded.load(Ordering::Acquire) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_read_wake_scheduled = false;
|
||||
this.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_read_wake_scheduled {
|
||||
this.quota_read_wake_scheduled = true;
|
||||
this.quota_read_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_read_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
let mut remaining_before = None;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used_before = this.user_stats.quota_used();
|
||||
let remaining = limit.saturating_sub(used_before);
|
||||
if remaining == 0 {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
remaining_before = Some(remaining);
|
||||
}
|
||||
|
||||
let before = buf.filled().len();
|
||||
|
||||
match Pin::new(&mut this.inner).poll_read(cx, buf) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = buf.filled().len() - before;
|
||||
if n > 0 {
|
||||
let mut reached_quota_boundary = false;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
let n_to_charge = n as u64;
|
||||
|
||||
if let (Some(limit), Some(remaining)) = (this.quota_limit, remaining_before) {
|
||||
let mut reserved_total = None;
|
||||
let mut reserve_rounds = 0usize;
|
||||
while reserved_total.is_none() {
|
||||
let mut saw_contention = false;
|
||||
for _ in 0..QUOTA_RESERVE_SPIN_RETRIES {
|
||||
match this.user_stats.quota_try_reserve(n_to_charge, limit) {
|
||||
Ok(total) => {
|
||||
reserved_total = Some(total);
|
||||
break;
|
||||
}
|
||||
Err(crate::stats::QuotaReserveError::LimitExceeded) => {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
buf.set_filled(before);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let remaining = limit - used;
|
||||
if (n as u64) > remaining {
|
||||
// Fail closed: when a single read chunk would cross quota,
|
||||
// stop relay immediately without accounting beyond the cap.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
Err(crate::stats::QuotaReserveError::Contended) => {
|
||||
saw_contention = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
if reserved_total.is_none() {
|
||||
reserve_rounds = reserve_rounds.saturating_add(1);
|
||||
if reserve_rounds >= QUOTA_RESERVE_MAX_ROUNDS {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
buf.set_filled(before);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
if saw_contention {
|
||||
std::thread::yield_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
reached_quota_boundary = (n as u64) == remaining;
|
||||
if should_immediate_quota_check(remaining, n_to_charge) {
|
||||
this.quota_bytes_since_check = 0;
|
||||
} else {
|
||||
this.quota_bytes_since_check =
|
||||
this.quota_bytes_since_check.saturating_add(n_to_charge);
|
||||
let interval = quota_adaptive_interval_bytes(remaining);
|
||||
if this.quota_bytes_since_check >= interval {
|
||||
this.quota_bytes_since_check = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if reserved_total.unwrap_or(0) >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
}
|
||||
|
||||
// C→S: client sent data
|
||||
this.counters
|
||||
.c2s_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
.fetch_add(n_to_charge, Ordering::Relaxed);
|
||||
this.counters.c2s_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_from(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_from(&this.user);
|
||||
|
||||
if reached_quota_boundary {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
}
|
||||
this.stats
|
||||
.add_user_octets_from_handle(this.user_stats.as_ref(), n_to_charge);
|
||||
this.stats
|
||||
.increment_user_msgs_from_handle(this.user_stats.as_ref());
|
||||
|
||||
trace!(user = %this.user, bytes = n, "C->S");
|
||||
}
|
||||
|
|
@ -456,82 +394,158 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
|||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
if this.quota_exceeded.load(Ordering::Acquire) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_write_wake_scheduled = false;
|
||||
this.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
let mut remaining_before = None;
|
||||
let mut reserved_bytes = 0u64;
|
||||
let mut write_buf = buf;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
if !buf.is_empty() {
|
||||
let mut reserve_rounds = 0usize;
|
||||
while reserved_bytes == 0 {
|
||||
let used_before = this.user_stats.quota_used();
|
||||
let remaining = limit.saturating_sub(used_before);
|
||||
if remaining == 0 {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_write_wake_scheduled {
|
||||
this.quota_write_wake_scheduled = true;
|
||||
this.quota_write_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_write_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
remaining_before = Some(remaining);
|
||||
|
||||
let desired = remaining.min(buf.len() as u64);
|
||||
let mut saw_contention = false;
|
||||
for _ in 0..QUOTA_RESERVE_SPIN_RETRIES {
|
||||
match this.user_stats.quota_try_reserve(desired, limit) {
|
||||
Ok(_) => {
|
||||
reserved_bytes = desired;
|
||||
write_buf = &buf[..desired as usize];
|
||||
break;
|
||||
}
|
||||
Err(crate::stats::QuotaReserveError::LimitExceeded) => {
|
||||
break;
|
||||
}
|
||||
Err(crate::stats::QuotaReserveError::Contended) => {
|
||||
saw_contention = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if reserved_bytes == 0 {
|
||||
reserve_rounds = reserve_rounds.saturating_add(1);
|
||||
if reserve_rounds >= QUOTA_RESERVE_MAX_ROUNDS {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
if saw_contention {
|
||||
std::thread::yield_now();
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let write_buf = if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
let used_before = this.user_stats.quota_used();
|
||||
let remaining = limit.saturating_sub(used_before);
|
||||
if remaining == 0 {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let remaining = (limit - used) as usize;
|
||||
if buf.len() > remaining {
|
||||
// Fail closed: do not emit partial S->C payload when remaining
|
||||
// quota cannot accommodate the pending write request.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
remaining_before = Some(remaining);
|
||||
}
|
||||
}
|
||||
buf
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
|
||||
match Pin::new(&mut this.inner).poll_write(cx, write_buf) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if reserved_bytes > n as u64 {
|
||||
let refund = reserved_bytes - n as u64;
|
||||
let mut current = this.user_stats.quota_used.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let next = current.saturating_sub(refund);
|
||||
match this.user_stats.quota_used.compare_exchange_weak(
|
||||
current,
|
||||
next,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(observed) => current = observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if n > 0 {
|
||||
let n_to_charge = n as u64;
|
||||
|
||||
// S→C: data written to client
|
||||
this.counters
|
||||
.s2c_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
.fetch_add(n_to_charge, Ordering::Relaxed);
|
||||
this.counters.s2c_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_to(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_to(&this.user);
|
||||
this.stats
|
||||
.add_user_octets_to_handle(this.user_stats.as_ref(), n_to_charge);
|
||||
this.stats
|
||||
.increment_user_msgs_to_handle(this.user_stats.as_ref());
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
if let (Some(limit), Some(remaining)) = (this.quota_limit, remaining_before) {
|
||||
if should_immediate_quota_check(remaining, n_to_charge) {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
} else {
|
||||
this.quota_bytes_since_check =
|
||||
this.quota_bytes_since_check.saturating_add(n_to_charge);
|
||||
let interval = quota_adaptive_interval_bytes(remaining);
|
||||
if this.quota_bytes_since_check >= interval {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(user = %this.user, bytes = n, "S->C");
|
||||
}
|
||||
Poll::Ready(Ok(n))
|
||||
}
|
||||
other => other,
|
||||
Poll::Ready(Err(err)) => {
|
||||
if reserved_bytes > 0 {
|
||||
let mut current = this.user_stats.quota_used.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let next = current.saturating_sub(reserved_bytes);
|
||||
match this.user_stats.quota_used.compare_exchange_weak(
|
||||
current,
|
||||
next,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(observed) => current = observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Ready(Err(err))
|
||||
}
|
||||
Poll::Pending => {
|
||||
if reserved_bytes > 0 {
|
||||
let mut current = this.user_stats.quota_used.load(Ordering::Relaxed);
|
||||
loop {
|
||||
let next = current.saturating_sub(reserved_bytes);
|
||||
match this.user_stats.quota_used.compare_exchange_weak(
|
||||
current,
|
||||
next,
|
||||
Ordering::Relaxed,
|
||||
Ordering::Relaxed,
|
||||
) {
|
||||
Ok(_) => break,
|
||||
Err(observed) => current = observed,
|
||||
}
|
||||
}
|
||||
}
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -565,6 +579,7 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
|||
/// - Clean shutdown: both write sides are shut down on exit
|
||||
/// - Error propagation: quota exits return `ProxyError::DataQuotaExceeded`,
|
||||
/// other I/O failures are returned as `ProxyError::Io`
|
||||
#[allow(dead_code)]
|
||||
pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
|
|
@ -583,6 +598,42 @@ where
|
|||
SR: AsyncRead + Unpin + Send + 'static,
|
||||
SW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
relay_bidirectional_with_activity_timeout(
|
||||
client_reader,
|
||||
client_writer,
|
||||
server_reader,
|
||||
server_writer,
|
||||
c2s_buf_size,
|
||||
s2c_buf_size,
|
||||
user,
|
||||
stats,
|
||||
quota_limit,
|
||||
_buffer_pool,
|
||||
ACTIVITY_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn relay_bidirectional_with_activity_timeout<CR, CW, SR, SW>(
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
server_reader: SR,
|
||||
server_writer: SW,
|
||||
c2s_buf_size: usize,
|
||||
s2c_buf_size: usize,
|
||||
user: &str,
|
||||
stats: Arc<Stats>,
|
||||
quota_limit: Option<u64>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
activity_timeout: Duration,
|
||||
) -> Result<()>
|
||||
where
|
||||
CR: AsyncRead + Unpin + Send + 'static,
|
||||
CW: AsyncWrite + Unpin + Send + 'static,
|
||||
SR: AsyncRead + Unpin + Send + 'static,
|
||||
SW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let activity_timeout = activity_timeout.max(Duration::from_secs(1));
|
||||
let epoch = Instant::now();
|
||||
let counters = Arc::new(SharedCounters::new());
|
||||
let quota_exceeded = Arc::new(AtomicBool::new(false));
|
||||
|
|
@ -618,13 +669,13 @@ where
|
|||
let now = Instant::now();
|
||||
let idle = wd_counters.idle_duration(now, epoch);
|
||||
|
||||
if wd_quota_exceeded.load(Ordering::Relaxed) {
|
||||
if wd_quota_exceeded.load(Ordering::Acquire) {
|
||||
warn!(user = %wd_user, "User data quota reached, closing relay");
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Activity timeout ────────────────────────────────────
|
||||
if idle >= ACTIVITY_TIMEOUT {
|
||||
if idle >= activity_timeout {
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
warn!(
|
||||
|
|
@ -756,18 +807,10 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_adversarial_tests.rs"]
|
||||
mod adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_lock_pressure_adversarial_tests.rs"]
|
||||
mod relay_quota_lock_pressure_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_boundary_blackhat_tests.rs"]
|
||||
mod relay_quota_boundary_blackhat_tests;
|
||||
|
|
@ -780,14 +823,18 @@ mod relay_quota_model_adversarial_tests;
|
|||
#[path = "tests/relay_quota_overflow_regression_tests.rs"]
|
||||
mod relay_quota_overflow_regression_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_extended_attack_surface_security_tests.rs"]
|
||||
mod relay_quota_extended_attack_surface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_watchdog_delta_security_tests.rs"]
|
||||
mod relay_watchdog_delta_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_waker_storm_adversarial_tests.rs"]
|
||||
mod relay_quota_waker_storm_adversarial_tests;
|
||||
#[path = "tests/relay_atomic_quota_invariant_tests.rs"]
|
||||
mod relay_atomic_quota_invariant_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_wake_liveness_regression_tests.rs"]
|
||||
mod relay_quota_wake_liveness_regression_tests;
|
||||
#[path = "tests/relay_baseline_invariant_tests.rs"]
|
||||
mod relay_baseline_invariant_tests;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,165 @@
|
|||
use std::collections::HashSet;
|
||||
use std::collections::hash_map::RandomState;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Instant;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::proxy::handshake::{AuthProbeSaturationState, AuthProbeState};
|
||||
use crate::proxy::middle_relay::{DesyncDedupRotationState, RelayIdleCandidateRegistry};
|
||||
|
||||
const HANDSHAKE_RECENT_USER_RING_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum ConntrackCloseReason {
|
||||
NormalEof,
|
||||
Timeout,
|
||||
Pressure,
|
||||
Reset,
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) struct ConntrackCloseEvent {
|
||||
pub(crate) src: SocketAddr,
|
||||
pub(crate) dst: SocketAddr,
|
||||
pub(crate) reason: ConntrackCloseReason,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum ConntrackClosePublishResult {
|
||||
Sent,
|
||||
Disabled,
|
||||
QueueFull,
|
||||
QueueClosed,
|
||||
}
|
||||
|
||||
pub(crate) struct HandshakeSharedState {
|
||||
pub(crate) auth_probe: DashMap<IpAddr, AuthProbeState>,
|
||||
pub(crate) auth_probe_saturation: Mutex<Option<AuthProbeSaturationState>>,
|
||||
pub(crate) auth_probe_eviction_hasher: RandomState,
|
||||
pub(crate) invalid_secret_warned: Mutex<HashSet<(String, String)>>,
|
||||
pub(crate) unknown_sni_warn_next_allowed: Mutex<Option<Instant>>,
|
||||
pub(crate) sticky_user_by_ip: DashMap<IpAddr, u32>,
|
||||
pub(crate) sticky_user_by_ip_prefix: DashMap<u64, u32>,
|
||||
pub(crate) sticky_user_by_sni_hash: DashMap<u64, u32>,
|
||||
pub(crate) recent_user_ring: Box<[AtomicU32]>,
|
||||
pub(crate) recent_user_ring_seq: AtomicU64,
|
||||
pub(crate) auth_expensive_checks_total: AtomicU64,
|
||||
pub(crate) auth_budget_exhausted_total: AtomicU64,
|
||||
}
|
||||
|
||||
pub(crate) struct MiddleRelaySharedState {
|
||||
pub(crate) desync_dedup: DashMap<u64, Instant>,
|
||||
pub(crate) desync_dedup_previous: DashMap<u64, Instant>,
|
||||
pub(crate) desync_hasher: RandomState,
|
||||
pub(crate) desync_full_cache_last_emit_at: Mutex<Option<Instant>>,
|
||||
pub(crate) desync_dedup_rotation_state: Mutex<DesyncDedupRotationState>,
|
||||
pub(crate) relay_idle_registry: Mutex<RelayIdleCandidateRegistry>,
|
||||
pub(crate) relay_idle_mark_seq: AtomicU64,
|
||||
}
|
||||
|
||||
pub(crate) struct ProxySharedState {
|
||||
pub(crate) handshake: HandshakeSharedState,
|
||||
pub(crate) middle_relay: MiddleRelaySharedState,
|
||||
pub(crate) conntrack_pressure_active: AtomicBool,
|
||||
pub(crate) conntrack_close_tx: Mutex<Option<mpsc::Sender<ConntrackCloseEvent>>>,
|
||||
}
|
||||
|
||||
impl ProxySharedState {
|
||||
pub(crate) fn new() -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
handshake: HandshakeSharedState {
|
||||
auth_probe: DashMap::new(),
|
||||
auth_probe_saturation: Mutex::new(None),
|
||||
auth_probe_eviction_hasher: RandomState::new(),
|
||||
invalid_secret_warned: Mutex::new(HashSet::new()),
|
||||
unknown_sni_warn_next_allowed: Mutex::new(None),
|
||||
sticky_user_by_ip: DashMap::new(),
|
||||
sticky_user_by_ip_prefix: DashMap::new(),
|
||||
sticky_user_by_sni_hash: DashMap::new(),
|
||||
recent_user_ring: std::iter::repeat_with(|| AtomicU32::new(0))
|
||||
.take(HANDSHAKE_RECENT_USER_RING_LEN)
|
||||
.collect::<Vec<_>>()
|
||||
.into_boxed_slice(),
|
||||
recent_user_ring_seq: AtomicU64::new(0),
|
||||
auth_expensive_checks_total: AtomicU64::new(0),
|
||||
auth_budget_exhausted_total: AtomicU64::new(0),
|
||||
},
|
||||
middle_relay: MiddleRelaySharedState {
|
||||
desync_dedup: DashMap::new(),
|
||||
desync_dedup_previous: DashMap::new(),
|
||||
desync_hasher: RandomState::new(),
|
||||
desync_full_cache_last_emit_at: Mutex::new(None),
|
||||
desync_dedup_rotation_state: Mutex::new(DesyncDedupRotationState::default()),
|
||||
relay_idle_registry: Mutex::new(RelayIdleCandidateRegistry::default()),
|
||||
relay_idle_mark_seq: AtomicU64::new(0),
|
||||
},
|
||||
conntrack_pressure_active: AtomicBool::new(false),
|
||||
conntrack_close_tx: Mutex::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn set_conntrack_close_sender(&self, tx: mpsc::Sender<ConntrackCloseEvent>) {
|
||||
match self.conntrack_close_tx.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = Some(tx);
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut guard = poisoned.into_inner();
|
||||
*guard = Some(tx);
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn disable_conntrack_close_sender(&self) {
|
||||
match self.conntrack_close_tx.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = None;
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut guard = poisoned.into_inner();
|
||||
*guard = None;
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn publish_conntrack_close_event(
|
||||
&self,
|
||||
event: ConntrackCloseEvent,
|
||||
) -> ConntrackClosePublishResult {
|
||||
let tx = match self.conntrack_close_tx.lock() {
|
||||
Ok(guard) => guard.clone(),
|
||||
Err(poisoned) => {
|
||||
let guard = poisoned.into_inner();
|
||||
let cloned = guard.clone();
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
cloned
|
||||
}
|
||||
};
|
||||
|
||||
let Some(tx) = tx else {
|
||||
return ConntrackClosePublishResult::Disabled;
|
||||
};
|
||||
|
||||
match tx.try_send(event) {
|
||||
Ok(()) => ConntrackClosePublishResult::Sent,
|
||||
Err(mpsc::error::TrySendError::Full(_)) => ConntrackClosePublishResult::QueueFull,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => ConntrackClosePublishResult::QueueClosed,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_conntrack_pressure_active(&self, active: bool) {
|
||||
self.conntrack_pressure_active
|
||||
.store(active, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub(crate) fn conntrack_pressure_active(&self) -> bool {
|
||||
self.conntrack_pressure_active.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,260 @@
|
|||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
static RACE_TEST_KEY_COUNTER: AtomicUsize = AtomicUsize::new(1_000_000);
|
||||
|
||||
fn race_unique_key(prefix: &str) -> String {
|
||||
let id = RACE_TEST_KEY_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
format!("{}_{}", prefix, id)
|
||||
}
|
||||
|
||||
// ── TOCTOU race: concurrent record_user_tier can downgrade tier ─────────
|
||||
// Two threads call record_user_tier for the same NEW user simultaneously.
|
||||
// Thread A records Tier1, Thread B records Base. Without atomic entry API,
|
||||
// the insert() call overwrites without max(), causing Tier1 → Base downgrade.
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_concurrent_insert_no_tier_downgrade() {
|
||||
// Run multiple rounds to increase race detection probability.
|
||||
for round in 0..50 {
|
||||
let key = race_unique_key(&format!("race_downgrade_{}", round));
|
||||
let key_a = key.clone();
|
||||
let key_b = key.clone();
|
||||
|
||||
let barrier = Arc::new(std::sync::Barrier::new(2));
|
||||
let barrier_a = Arc::clone(&barrier);
|
||||
let barrier_b = Arc::clone(&barrier);
|
||||
|
||||
let ha = std::thread::spawn(move || {
|
||||
barrier_a.wait();
|
||||
record_user_tier(&key_a, AdaptiveTier::Tier2);
|
||||
});
|
||||
|
||||
let hb = std::thread::spawn(move || {
|
||||
barrier_b.wait();
|
||||
record_user_tier(&key_b, AdaptiveTier::Base);
|
||||
});
|
||||
|
||||
ha.join().expect("thread A panicked");
|
||||
hb.join().expect("thread B panicked");
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// The final tier must be at least Tier2, never downgraded to Base.
|
||||
// With correct max() semantics: max(Tier2, Base) = Tier2.
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier2,
|
||||
"Round {}: concurrent insert downgraded tier from Tier2 to {:?}",
|
||||
round,
|
||||
result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── TOCTOU race: three threads write three tiers, highest must survive ──
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_triple_concurrent_insert_highest_tier_survives() {
|
||||
for round in 0..30 {
|
||||
let key = race_unique_key(&format!("triple_race_{}", round));
|
||||
let barrier = Arc::new(std::sync::Barrier::new(3));
|
||||
|
||||
let handles: Vec<_> = [AdaptiveTier::Base, AdaptiveTier::Tier1, AdaptiveTier::Tier3]
|
||||
.into_iter()
|
||||
.map(|tier| {
|
||||
let k = key.clone();
|
||||
let b = Arc::clone(&barrier);
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
record_user_tier(&k, tier);
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier3,
|
||||
"Round {}: triple concurrent insert didn't preserve Tier3, got {:?}",
|
||||
round,
|
||||
result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Stress: 20 threads writing different tiers to same key ──────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_20_concurrent_writers_no_panic_no_downgrade() {
|
||||
let key = race_unique_key("stress_20");
|
||||
let barrier = Arc::new(std::sync::Barrier::new(20));
|
||||
|
||||
let handles: Vec<_> = (0..20u32)
|
||||
.map(|i| {
|
||||
let k = key.clone();
|
||||
let b = Arc::clone(&barrier);
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
let tier = match i % 4 {
|
||||
0 => AdaptiveTier::Base,
|
||||
1 => AdaptiveTier::Tier1,
|
||||
2 => AdaptiveTier::Tier2,
|
||||
_ => AdaptiveTier::Tier3,
|
||||
};
|
||||
for _ in 0..100 {
|
||||
record_user_tier(&k, tier);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// At least one thread writes Tier3, max() should preserve it
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier3,
|
||||
"20 concurrent writers: expected at least Tier3, got {:?}",
|
||||
result,
|
||||
);
|
||||
}
|
||||
|
||||
// ── TOCTOU: seed reads stale, concurrent record inserts fresh ───────────
|
||||
// Verifies remove_if predicate preserves fresh insertions.
|
||||
|
||||
#[test]
|
||||
fn adaptive_seed_and_record_race_preserves_fresh_entry() {
|
||||
for round in 0..30 {
|
||||
let key = race_unique_key(&format!("seed_record_race_{}", round));
|
||||
|
||||
// Plant a stale entry
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier1,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
|
||||
let key_seed = key.clone();
|
||||
let key_record = key.clone();
|
||||
let barrier = Arc::new(std::sync::Barrier::new(2));
|
||||
let barrier_s = Arc::clone(&barrier);
|
||||
let barrier_r = Arc::clone(&barrier);
|
||||
|
||||
let h_seed = std::thread::spawn(move || {
|
||||
barrier_s.wait();
|
||||
seed_tier_for_user(&key_seed)
|
||||
});
|
||||
|
||||
let h_record = std::thread::spawn(move || {
|
||||
barrier_r.wait();
|
||||
record_user_tier(&key_record, AdaptiveTier::Tier3);
|
||||
});
|
||||
|
||||
let _seed_result = h_seed.join().expect("seed thread panicked");
|
||||
h_record.join().expect("record thread panicked");
|
||||
|
||||
let final_result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// Fresh Tier3 entry should survive the stale-removal race.
|
||||
// Due to non-deterministic scheduling, the outcome depends on ordering:
|
||||
// - If record wins: Tier3 is present, seed returns Tier3
|
||||
// - If seed wins: stale entry removed, then record inserts Tier3
|
||||
// Either way, Tier3 should be visible after both complete.
|
||||
assert!(
|
||||
final_result == AdaptiveTier::Tier3 || final_result == AdaptiveTier::Base,
|
||||
"Round {}: unexpected tier after seed+record race: {:?}",
|
||||
round,
|
||||
final_result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Eviction safety: retain() during concurrent inserts ─────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_eviction_during_concurrent_inserts_no_panic() {
|
||||
let prefix = race_unique_key("evict_conc");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
|
||||
// Pre-fill with stale entries to push past the eviction threshold
|
||||
for i in 0..100 {
|
||||
let k = format!("{}_{}", prefix, i);
|
||||
profiles().insert(
|
||||
k,
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Base,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let barrier = Arc::new(std::sync::Barrier::new(10));
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|t| {
|
||||
let b = Arc::clone(&barrier);
|
||||
let pfx = prefix.clone();
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
for i in 0..50 {
|
||||
let k = format!("{}_t{}_{}", pfx, t, i);
|
||||
record_user_tier(&k, AdaptiveTier::Tier1);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("eviction thread panicked");
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
profiles().retain(|k, _| !k.starts_with(&prefix));
|
||||
}
|
||||
|
||||
// ── Adversarial: attacker races insert+seed in tight loop ───────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_tight_loop_insert_seed_race_no_panic() {
|
||||
let key = race_unique_key("tight_loop");
|
||||
let key_w = key.clone();
|
||||
let key_r = key.clone();
|
||||
|
||||
let done = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
||||
let done_w = Arc::clone(&done);
|
||||
let done_r = Arc::clone(&done);
|
||||
|
||||
let writer = std::thread::spawn(move || {
|
||||
while !done_w.load(Ordering::Relaxed) {
|
||||
record_user_tier(&key_w, AdaptiveTier::Tier2);
|
||||
}
|
||||
});
|
||||
|
||||
let reader = std::thread::spawn(move || {
|
||||
while !done_r.load(Ordering::Relaxed) {
|
||||
let _ = seed_tier_for_user(&key_r);
|
||||
}
|
||||
});
|
||||
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
done.store(true, Ordering::Relaxed);
|
||||
|
||||
writer.join().expect("writer panicked");
|
||||
reader.join().expect("reader panicked");
|
||||
profiles().remove(&key);
|
||||
}
|
||||
|
|
@ -0,0 +1,453 @@
|
|||
use super::*;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// Unique key generator to avoid test interference through the global DashMap.
|
||||
static TEST_KEY_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
fn unique_key(prefix: &str) -> String {
|
||||
let id = TEST_KEY_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
format!("{}_{}", prefix, id)
|
||||
}
|
||||
|
||||
// ── Positive / Lifecycle ────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_seed_unknown_user_returns_base() {
|
||||
let key = unique_key("seed_unknown");
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Base);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_then_seed_returns_recorded_tier() {
|
||||
let key = unique_key("record_seed");
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_separate_users_have_independent_tiers() {
|
||||
let key_a = unique_key("indep_a");
|
||||
let key_b = unique_key("indep_b");
|
||||
record_user_tier(&key_a, AdaptiveTier::Tier1);
|
||||
record_user_tier(&key_b, AdaptiveTier::Tier2);
|
||||
assert_eq!(seed_tier_for_user(&key_a), AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key_b), AdaptiveTier::Tier2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_upgrades_tier_within_ttl() {
|
||||
let key = unique_key("upgrade");
|
||||
record_user_tier(&key, AdaptiveTier::Base);
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_does_not_downgrade_within_ttl() {
|
||||
let key = unique_key("no_downgrade");
|
||||
record_user_tier(&key, AdaptiveTier::Tier2);
|
||||
record_user_tier(&key, AdaptiveTier::Base);
|
||||
// max(Tier2, Base) = Tier2 — within TTL the higher tier is retained
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier2);
|
||||
}
|
||||
|
||||
// ── Edge Cases ──────────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_base_tier_buffers_unchanged() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Base, 65536, 262144);
|
||||
assert_eq!(c2s, 65536);
|
||||
assert_eq!(s2c, 262144);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_tier1_buffers_within_caps() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier1, 65536, 262144);
|
||||
assert!(c2s > 65536, "Tier1 c2s should exceed Base");
|
||||
assert!(
|
||||
c2s <= 128 * 1024,
|
||||
"Tier1 c2s should not exceed DIRECT_C2S_CAP_BYTES"
|
||||
);
|
||||
assert!(s2c > 262144, "Tier1 s2c should exceed Base");
|
||||
assert!(
|
||||
s2c <= 512 * 1024,
|
||||
"Tier1 s2c should not exceed DIRECT_S2C_CAP_BYTES"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_tier3_buffers_capped() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier3, 65536, 262144);
|
||||
assert!(c2s <= 128 * 1024, "Tier3 c2s must not exceed cap");
|
||||
assert!(s2c <= 512 * 1024, "Tier3 s2c must not exceed cap");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_scale_zero_base_returns_at_least_one() {
|
||||
// scale(0, num, den, cap) should return at least 1 (the .max(1) guard)
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier1, 0, 0);
|
||||
assert!(c2s >= 1);
|
||||
assert!(s2c >= 1);
|
||||
}
|
||||
|
||||
// ── Stale Entry Handling ────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_stale_profile_returns_base_tier() {
|
||||
let key = unique_key("stale_base");
|
||||
// Manually insert a stale entry with seen_at in the far past.
|
||||
// PROFILE_TTL = 300s, so 600s ago is well past expiry.
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier3,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
assert_eq!(
|
||||
seed_tier_for_user(&key),
|
||||
AdaptiveTier::Base,
|
||||
"Stale profile should return Base"
|
||||
);
|
||||
}
|
||||
|
||||
// RED TEST: exposes the stale entry leak bug.
|
||||
// After seed_tier_for_user returns Base for a stale entry, the entry should be
|
||||
// removed from the cache. Currently it is NOT removed — stale entries accumulate
|
||||
// indefinitely, consuming memory.
|
||||
#[test]
|
||||
fn adaptive_stale_entry_removed_after_seed() {
|
||||
let key = unique_key("stale_removal");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier2,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
let _ = seed_tier_for_user(&key);
|
||||
// After seeding, the stale entry should have been removed.
|
||||
assert!(
|
||||
!profiles().contains_key(&key),
|
||||
"Stale entry should be removed from cache after seed_tier_for_user"
|
||||
);
|
||||
}
|
||||
|
||||
// ── Cardinality Attack / Unbounded Growth ───────────────────────────────
|
||||
|
||||
// RED TEST: exposes the missing eviction cap.
|
||||
// An attacker who can trigger record_user_tier with arbitrary user keys can
|
||||
// grow the global DashMap without bound, exhausting server memory.
|
||||
// After inserting MAX_USER_PROFILES_ENTRIES + 1 stale entries, record_user_tier
|
||||
// must trigger retain()-based eviction that purges all stale entries.
|
||||
#[test]
|
||||
fn adaptive_profile_cache_bounded_under_cardinality_attack() {
|
||||
let prefix = unique_key("cardinality");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
let n = MAX_USER_PROFILES_ENTRIES + 1;
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
profiles().insert(
|
||||
key,
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Base,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
}
|
||||
// This insert should push the cache over MAX_USER_PROFILES_ENTRIES and trigger eviction.
|
||||
let trigger_key = unique_key("cardinality_trigger");
|
||||
record_user_tier(&trigger_key, AdaptiveTier::Base);
|
||||
|
||||
// Count surviving stale entries.
|
||||
let mut surviving_stale = 0;
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
if profiles().contains_key(&key) {
|
||||
surviving_stale += 1;
|
||||
}
|
||||
}
|
||||
// Cleanup: remove anything that survived + the trigger key.
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
profiles().remove(&key);
|
||||
}
|
||||
profiles().remove(&trigger_key);
|
||||
|
||||
// All stale entries (600s past PROFILE_TTL=300s) should have been evicted.
|
||||
assert_eq!(
|
||||
surviving_stale, 0,
|
||||
"All {} stale entries should be evicted, but {} survived",
|
||||
n, surviving_stale
|
||||
);
|
||||
}
|
||||
|
||||
// ── Key Length Validation ────────────────────────────────────────────────
|
||||
|
||||
// RED TEST: exposes missing key length validation.
|
||||
// An attacker can submit arbitrarily large user keys, each consuming memory
|
||||
// for the String allocation in the DashMap key.
|
||||
#[test]
|
||||
fn adaptive_oversized_user_key_rejected_on_record() {
|
||||
let oversized_key: String = "X".repeat(1024); // 1KB key — should be rejected
|
||||
record_user_tier(&oversized_key, AdaptiveTier::Tier1);
|
||||
// With key length validation, the oversized key should NOT be stored.
|
||||
let stored = profiles().contains_key(&oversized_key);
|
||||
// Cleanup regardless
|
||||
profiles().remove(&oversized_key);
|
||||
assert!(
|
||||
!stored,
|
||||
"Oversized user key (1024 bytes) should be rejected by record_user_tier"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_oversized_user_key_rejected_on_seed() {
|
||||
let oversized_key: String = "X".repeat(1024);
|
||||
// Insert it directly to test seed behavior
|
||||
profiles().insert(
|
||||
oversized_key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier3,
|
||||
seen_at: Instant::now(),
|
||||
},
|
||||
);
|
||||
let result = seed_tier_for_user(&oversized_key);
|
||||
profiles().remove(&oversized_key);
|
||||
assert_eq!(
|
||||
result,
|
||||
AdaptiveTier::Base,
|
||||
"Oversized user key should return Base from seed_tier_for_user"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_empty_user_key_safe() {
|
||||
// Empty string is a valid (if unusual) key — should not panic
|
||||
record_user_tier("", AdaptiveTier::Tier1);
|
||||
let tier = seed_tier_for_user("");
|
||||
profiles().remove("");
|
||||
assert_eq!(tier, AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_max_length_key_accepted() {
|
||||
// A key at exactly 512 bytes should be accepted
|
||||
let key: String = "K".repeat(512);
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let tier = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
assert_eq!(tier, AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
// ── Concurrent Access Safety ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_concurrent_record_and_seed_no_torn_read() {
|
||||
let key = unique_key("concurrent_rw");
|
||||
let key_clone = key.clone();
|
||||
|
||||
// Record from multiple threads simultaneously
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|i| {
|
||||
let k = key_clone.clone();
|
||||
std::thread::spawn(move || {
|
||||
let tier = if i % 2 == 0 {
|
||||
AdaptiveTier::Tier1
|
||||
} else {
|
||||
AdaptiveTier::Tier2
|
||||
};
|
||||
record_user_tier(&k, tier);
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
// Result must be one of the recorded tiers, not a corrupted value
|
||||
assert!(
|
||||
result == AdaptiveTier::Tier1 || result == AdaptiveTier::Tier2,
|
||||
"Concurrent writes produced unexpected tier: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_concurrent_seed_does_not_panic() {
|
||||
let key = unique_key("concurrent_seed");
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let key_clone = key.clone();
|
||||
|
||||
let handles: Vec<_> = (0..20)
|
||||
.map(|_| {
|
||||
let k = key_clone.clone();
|
||||
std::thread::spawn(move || {
|
||||
for _ in 0..100 {
|
||||
let _ = seed_tier_for_user(&k);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("concurrent seed panicked");
|
||||
}
|
||||
profiles().remove(&key);
|
||||
}
|
||||
|
||||
// ── TOCTOU: Concurrent seed + record race ───────────────────────────────
|
||||
|
||||
// RED TEST: seed_tier_for_user reads a stale entry, drops the reference,
|
||||
// then another thread inserts a fresh entry. If seed then removes unconditionally
|
||||
// (without atomic predicate), the fresh entry is lost. With remove_if, the
|
||||
// fresh entry survives.
|
||||
#[test]
|
||||
fn adaptive_remove_if_does_not_delete_fresh_concurrent_insert() {
|
||||
let key = unique_key("toctou");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier1,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
|
||||
// Thread A: seed_tier (will see stale, should attempt removal)
|
||||
// Thread B: record_user_tier (inserts fresh entry concurrently)
|
||||
let key_a = key.clone();
|
||||
let key_b = key.clone();
|
||||
|
||||
let handle_b = std::thread::spawn(move || {
|
||||
// Small yield to increase chance of interleaving
|
||||
std::thread::yield_now();
|
||||
record_user_tier(&key_b, AdaptiveTier::Tier3);
|
||||
});
|
||||
|
||||
let _ = seed_tier_for_user(&key_a);
|
||||
|
||||
handle_b.join().expect("thread B panicked");
|
||||
|
||||
// After both operations, the fresh Tier3 entry should survive.
|
||||
// With a correct remove_if predicate, the fresh entry is NOT deleted.
|
||||
// Without remove_if (current code), the entry may be lost.
|
||||
let final_tier = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// The fresh Tier3 entry should survive the stale-removal race.
|
||||
// Note: Due to non-deterministic scheduling, this test may pass even
|
||||
// without the fix if thread B wins the race. Run with --test-threads=1
|
||||
// or multiple iterations for reliable detection.
|
||||
assert!(
|
||||
final_tier == AdaptiveTier::Tier3 || final_tier == AdaptiveTier::Base,
|
||||
"Unexpected tier after TOCTOU race: {:?}",
|
||||
final_tier
|
||||
);
|
||||
}
|
||||
|
||||
// ── Fuzz: Random keys ──────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_fuzz_random_keys_no_panic() {
|
||||
use rand::{Rng, RngExt};
|
||||
let mut rng = rand::rng();
|
||||
let mut keys = Vec::new();
|
||||
for _ in 0..200 {
|
||||
let len: usize = rng.random_range(0..=256);
|
||||
let key: String = (0..len)
|
||||
.map(|_| {
|
||||
let c: u8 = rng.random_range(0x20..=0x7E);
|
||||
c as char
|
||||
})
|
||||
.collect();
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let _ = seed_tier_for_user(&key);
|
||||
keys.push(key);
|
||||
}
|
||||
// Cleanup
|
||||
for key in &keys {
|
||||
profiles().remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
// ── average_throughput_to_tier (proposed function, tests the mapping) ────
|
||||
|
||||
// These tests verify the function that will be added in PR-D.
|
||||
// They are written against the current code's constant definitions.
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_mapping_below_threshold_is_base() {
|
||||
// 7 Mbps < 8 Mbps threshold → Base
|
||||
// 7 Mbps = 7_000_000 bps = 875_000 bytes/s over 10s = 8_750_000 bytes
|
||||
// max(c2s, s2c) determines direction
|
||||
let c2s_bytes: u64 = 8_750_000;
|
||||
let s2c_bytes: u64 = 1_000_000;
|
||||
let duration_secs: f64 = 10.0;
|
||||
let avg_bps = (c2s_bytes.max(s2c_bytes) as f64 * 8.0) / duration_secs;
|
||||
// 8_750_000 * 8 / 10 = 7_000_000 bps = 7 Mbps → Base
|
||||
assert!(
|
||||
avg_bps < THROUGHPUT_UP_BPS,
|
||||
"Should be below threshold: {} < {}",
|
||||
avg_bps,
|
||||
THROUGHPUT_UP_BPS,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_mapping_above_threshold_is_tier1() {
|
||||
// 10 Mbps > 8 Mbps threshold → Tier1
|
||||
let bytes_10mbps_10s: u64 = 12_500_000; // 10 Mbps * 10s / 8 = 12_500_000 bytes
|
||||
let duration_secs: f64 = 10.0;
|
||||
let avg_bps = (bytes_10mbps_10s as f64 * 8.0) / duration_secs;
|
||||
assert!(
|
||||
avg_bps >= THROUGHPUT_UP_BPS,
|
||||
"Should be above threshold: {} >= {}",
|
||||
avg_bps,
|
||||
THROUGHPUT_UP_BPS,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_short_session_should_return_base() {
|
||||
// Sessions shorter than 1 second should not promote (too little data to judge)
|
||||
let duration_secs: f64 = 0.5;
|
||||
// Even with high throughput, short sessions should return Base
|
||||
assert!(
|
||||
duration_secs < 1.0,
|
||||
"Short session duration guard should activate"
|
||||
);
|
||||
}
|
||||
|
||||
// ── me_flush_policy_for_tier ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_base_unchanged() {
|
||||
let (frames, bytes, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Base, 32, 65536, Duration::from_micros(1000));
|
||||
assert_eq!(frames, 32);
|
||||
assert_eq!(bytes, 65536);
|
||||
assert_eq!(delay, Duration::from_micros(1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_tier1_delay_reduced() {
|
||||
let (_, _, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Tier1, 32, 65536, Duration::from_micros(1000));
|
||||
// Tier1: delay * 7/10 = 700 µs
|
||||
assert_eq!(delay, Duration::from_micros(700));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_delay_never_below_minimum() {
|
||||
let (_, _, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Tier3, 32, 65536, Duration::from_micros(200));
|
||||
// Tier3: 200 * 3/10 = 60, but min is ME_DELAY_MIN_US = 150
|
||||
assert!(delay.as_micros() >= 150, "Delay must respect minimum");
|
||||
}
|
||||
|
|
@ -0,0 +1,473 @@
|
|||
use super::*;
|
||||
use crate::config::{ProxyConfig, UpstreamConfig, UpstreamType};
|
||||
use crate::protocol::constants::{MAX_TLS_PLAINTEXT_SIZE, MIN_TLS_CLIENT_HELLO_SIZE};
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt, ReadBuf, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
#[test]
|
||||
fn edge_mask_reject_delay_min_greater_than_max_does_not_panic() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.censorship.server_hello_delay_min_ms = 5000;
|
||||
config.censorship.server_hello_delay_max_ms = 1000;
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let start = std::time::Instant::now();
|
||||
maybe_apply_mask_reject_delay(&config).await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
assert!(elapsed >= Duration::from_millis(1000));
|
||||
assert!(elapsed < Duration::from_millis(1500));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_handshake_timeout_with_mask_grace_saturating_add_prevents_overflow() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = u64::MAX;
|
||||
config.censorship.mask = true;
|
||||
|
||||
let timeout = handshake_timeout_with_mask_grace(&config);
|
||||
assert_eq!(timeout.as_secs(), u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_tls_clienthello_len_in_bounds_exact_boundaries() {
|
||||
assert!(tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE));
|
||||
assert!(!tls_clienthello_len_in_bounds(
|
||||
MIN_TLS_CLIENT_HELLO_SIZE - 1
|
||||
));
|
||||
assert!(tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE));
|
||||
assert!(!tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE + 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_synthetic_local_addr_boundaries() {
|
||||
assert_eq!(synthetic_local_addr(0).port(), 0);
|
||||
assert_eq!(synthetic_local_addr(80).port(), 80);
|
||||
assert_eq!(synthetic_local_addr(u16::MAX).port(), u16::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_beobachten_record_handshake_failure_class_stream_error_eof() {
|
||||
let beobachten = BeobachtenStore::new();
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = 1;
|
||||
|
||||
let eof_err = ProxyError::Stream(crate::error::StreamError::UnexpectedEof);
|
||||
let peer_ip: IpAddr = "198.51.100.100".parse().unwrap();
|
||||
|
||||
record_handshake_failure_class(&beobachten, &config, peer_ip, &eof_err);
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn adversarial_tls_handshake_timeout_during_masking_delay() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.timeouts.client_handshake = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.server_hello_delay_min_ms = 3000;
|
||||
cfg.censorship.server_hello_delay_max_ms = 3000;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.1:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0xFF, 0xFF])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::TgHandshakeTimeout)));
|
||||
assert_eq!(stats.get_handshake_timeouts(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_proxy_protocol_slowloris_timeout() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.proxy_protocol_header_timeout_ms = 200;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.2:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
true,
|
||||
));
|
||||
|
||||
client_side.write_all(b"PROXY TCP4 192.").await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::InvalidProxyProtocol)));
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blackhat_ipv4_mapped_ipv6_proxy_source_bypass_attempt() {
|
||||
let trusted = vec!["192.0.2.0/24".parse().unwrap()];
|
||||
let peer_ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x0201));
|
||||
assert!(!is_trusted_proxy_source(peer_ip, &trusted));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn negative_proxy_protocol_enabled_but_client_sends_tls_hello() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.proxy_protocol_header_timeout_ms = 500;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.3:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
true,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x02, 0x00])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::InvalidProxyProtocol)));
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_client_stream_exactly_4_bytes_eof() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.4:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x00])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_client_stream_tls_header_valid_but_body_1_byte_short_eof() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.5:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x00, 100])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&vec![0x41; 99]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn integration_non_tls_modes_disabled_immediately_masks() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
cfg.censorship.mask = true;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.6:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"GET / HTTP/1.1\r\n").await.unwrap();
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
struct YieldingReader {
|
||||
data: Vec<u8>,
|
||||
pos: usize,
|
||||
yields_left: usize,
|
||||
}
|
||||
|
||||
impl AsyncRead for YieldingReader {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
if this.yields_left > 0 {
|
||||
this.yields_left -= 1;
|
||||
cx.waker().wake_by_ref();
|
||||
return Poll::Pending;
|
||||
}
|
||||
if this.pos >= this.data.len() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
buf.put_slice(&this.data[this.pos..this.pos + 1]);
|
||||
this.pos += 1;
|
||||
this.yields_left = 2;
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fuzz_read_with_progress_heavy_yielding() {
|
||||
let expected_data = b"HEAVY_YIELD_TEST_DATA".to_vec();
|
||||
let mut reader = YieldingReader {
|
||||
data: expected_data.clone(),
|
||||
pos: 0,
|
||||
yields_left: 2,
|
||||
};
|
||||
|
||||
let mut buf = vec![0u8; expected_data.len()];
|
||||
let read_bytes = read_with_progress(&mut reader, &mut buf).await.unwrap();
|
||||
|
||||
assert_eq!(read_bytes, expected_data.len());
|
||||
assert_eq!(buf, expected_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_wrap_tls_application_record_exactly_u16_max() {
|
||||
let payload = vec![0u8; 65535];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
assert_eq!(wrapped.len(), 65540);
|
||||
assert_eq!(wrapped[0], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(&wrapped[3..5], &65535u16.to_be_bytes());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fuzz_wrap_tls_application_record_lengths() {
|
||||
let lengths = [0, 1, 65534, 65535, 65536, 131070, 131071, 131072];
|
||||
for len in lengths {
|
||||
let payload = vec![0u8; len];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
let expected_chunks = len.div_ceil(65535).max(1);
|
||||
assert_eq!(wrapped.len(), len + 5 * expected_chunks);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stress_user_connection_reservation_concurrent_same_ip_exhaustion() {
|
||||
let user = "stress-same-ip-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 5);
|
||||
|
||||
let config = Arc::new(config);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 10).await;
|
||||
|
||||
let peer = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77)), 55000);
|
||||
|
||||
let mut tasks = tokio::task::JoinSet::new();
|
||||
let mut reservations = Vec::new();
|
||||
|
||||
for _ in 0..10 {
|
||||
let config = config.clone();
|
||||
let stats = stats.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
tasks.spawn(async move {
|
||||
RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await
|
||||
});
|
||||
}
|
||||
|
||||
let mut successes = 0;
|
||||
let mut failures = 0;
|
||||
|
||||
while let Some(res) = tasks.join_next().await {
|
||||
match res.unwrap() {
|
||||
Ok(r) => {
|
||||
successes += 1;
|
||||
reservations.push(r);
|
||||
}
|
||||
Err(_) => failures += 1,
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(successes, 5);
|
||||
assert_eq!(failures, 5);
|
||||
assert_eq!(stats.get_user_curr_connects(user), 5);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 1);
|
||||
|
||||
for reservation in reservations {
|
||||
reservation.release().await;
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncWriteExt, duplex};
|
||||
|
||||
fn preload_user_quota(stats: &Stats, user: &str, bytes: u64) {
|
||||
let user_stats = stats.get_or_create_user_stats_handle(user);
|
||||
stats.quota_charge_post_write(user_stats.as_ref(), bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invariant_wrap_tls_application_record_exact_multiples() {
|
||||
let chunk_size = u16::MAX as usize;
|
||||
let payload = vec![0xAA; chunk_size * 2];
|
||||
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
|
||||
assert_eq!(wrapped.len(), 2 * (5 + chunk_size));
|
||||
assert_eq!(wrapped[0], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(&wrapped[3..5], &65535u16.to_be_bytes());
|
||||
|
||||
let second_header_idx = 5 + chunk_size;
|
||||
assert_eq!(wrapped[second_header_idx], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(
|
||||
&wrapped[second_header_idx + 3..second_header_idx + 5],
|
||||
&65535u16.to_be_bytes()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_tls_clienthello_truncation_exact_boundary_triggers_masking() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.20:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
let claimed_len = MIN_TLS_CLIENT_HELLO_SIZE as u16;
|
||||
let mut header = vec![0x16, 0x03, 0x01];
|
||||
header.extend_from_slice(&claimed_len.to_be_bytes());
|
||||
|
||||
client_side.write_all(&header).await.unwrap();
|
||||
client_side
|
||||
.write_all(&vec![0x42; MIN_TLS_CLIENT_HELLO_SIZE - 1])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_acquire_reservation_ip_limit_rollback() {
|
||||
let user = "rollback-test-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 10);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let peer_a = "198.51.100.21:55000".parse().unwrap();
|
||||
let _res_a = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer_a,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
|
||||
let peer_b = "203.0.113.22:55000".parse().unwrap();
|
||||
let res_b = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer_b,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(
|
||||
res_b,
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. })
|
||||
));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_quota_exact_boundary_inclusive() {
|
||||
let user = "quota-strict-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_data_quota.insert(user.to_string(), 1000);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let peer = "198.51.100.23:55000".parse().unwrap();
|
||||
|
||||
preload_user_quota(stats.as_ref(), user, 999);
|
||||
let res1 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(res1.is_ok());
|
||||
res1.unwrap().release().await;
|
||||
|
||||
preload_user_quota(stats.as_ref(), user, 1);
|
||||
let res2 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(res2, Err(ProxyError::DataQuotaExceeded { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_direct_mode_partial_header_eof_is_error_not_bad_connect() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.25:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&[0xEF, 0xEF, 0xEF]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(stats.get_connects_bad(), 0);
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_route_mode_snapshot_picks_up_latest_mode() {
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
|
||||
assert!(matches!(
|
||||
route_runtime.snapshot().mode,
|
||||
RelayRouteMode::Direct
|
||||
));
|
||||
|
||||
route_runtime.set_mode(RelayRouteMode::Middle);
|
||||
assert!(matches!(
|
||||
route_runtime.snapshot().mode,
|
||||
RelayRouteMode::Middle
|
||||
));
|
||||
}
|
||||
|
|
@ -40,6 +40,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ fn build_harness(config: ProxyConfig) -> PipelineHarness {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,101 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::Duration;
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fragmented_connect_probe_is_classified_as_http_via_prefetch_window() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let peer: SocketAddr = "198.51.100.251:57501".parse().unwrap();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"CONNE").await.unwrap();
|
||||
client_side
|
||||
.write_all(b"CT example.org:443 HTTP/1.1\r\nHost: example.org\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT example.org:443 HTTP/1.1"),
|
||||
"mask backend must receive the full fragmented CONNECT probe"
|
||||
);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[HTTP]"));
|
||||
assert!(snapshot.contains("198.51.100.251-1"));
|
||||
}
|
||||
|
|
@ -34,6 +34,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,123 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
async fn run_http2_fragment_case(split_at: usize, delay_ms: u64, peer: SocketAddr) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
let preface = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n".to_vec();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
let first = split_at.min(preface.len());
|
||||
client_side.write_all(&preface[..first]).await.unwrap();
|
||||
if first < preface.len() {
|
||||
sleep(Duration::from_millis(delay_ms)).await;
|
||||
client_side.write_all(&preface[first..]).await.unwrap();
|
||||
}
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(
|
||||
forwarded.starts_with(&preface),
|
||||
"mask backend must receive an intact HTTP/2 preface prefix"
|
||||
);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[HTTP]"));
|
||||
assert!(snapshot.contains(&format!("{}-1", peer.ip())));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn http2_preface_fragmentation_matrix_is_classified_and_forwarded() {
|
||||
let cases = [(2usize, 0u64), (3, 0), (4, 0), (2, 7), (3, 7), (8, 1)];
|
||||
|
||||
for (i, (split_at, delay_ms)) in cases.into_iter().enumerate() {
|
||||
let peer: SocketAddr = format!("198.51.100.{}:58{}", 140 + i, 100 + i)
|
||||
.parse()
|
||||
.unwrap();
|
||||
run_http2_fragment_case(split_at, delay_ms, peer).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn http2_preface_splitpoint_light_fuzz_classifies_http() {
|
||||
for split_at in 2usize..=12 {
|
||||
let delay_ms = if split_at % 3 == 0 { 7 } else { 1 };
|
||||
let peer: SocketAddr = format!("198.51.101.{}:59{}", split_at, 10 + split_at)
|
||||
.parse()
|
||||
.unwrap();
|
||||
run_http2_fragment_case(split_at, delay_ms, peer).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,151 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
async fn run_pipeline_prefetch_case(
|
||||
prefetch_timeout_ms: u64,
|
||||
delayed_tail_ms: u64,
|
||||
peer: SocketAddr,
|
||||
) -> (Vec<u8>, String) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.censorship.mask_classifier_prefetch_timeout_ms = prefetch_timeout_ms;
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"C").await.unwrap();
|
||||
sleep(Duration::from_millis(delayed_tail_ms)).await;
|
||||
|
||||
client_side
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\nHost: example.org\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
(forwarded, snapshot)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tdd_pipeline_prefetch_5ms_misses_15ms_tail_and_classifies_as_port_scanner() {
|
||||
let peer: SocketAddr = "198.51.100.171:58071".parse().unwrap();
|
||||
let (forwarded, snapshot) = run_pipeline_prefetch_case(5, 15, peer).await;
|
||||
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT"),
|
||||
"mask backend must still receive full payload bytes in-order"
|
||||
);
|
||||
assert!(
|
||||
snapshot.contains("[HTTP]") || snapshot.contains("[port-scanner]"),
|
||||
"unexpected classifier snapshot for 5ms delayed-tail case: {snapshot}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tdd_pipeline_prefetch_20ms_recovers_15ms_tail_and_classifies_as_http() {
|
||||
let peer: SocketAddr = "198.51.100.172:58072".parse().unwrap();
|
||||
let (forwarded, snapshot) = run_pipeline_prefetch_case(20, 15, peer).await;
|
||||
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT"),
|
||||
"mask backend must receive full CONNECT payload"
|
||||
);
|
||||
assert!(
|
||||
snapshot.contains("[HTTP]"),
|
||||
"20ms budget should recover delayed fragmented prefix and classify as HTTP"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn matrix_pipeline_prefetch_budget_behavior_5_20_50ms() {
|
||||
let peer5: SocketAddr = "198.51.100.173:58073".parse().unwrap();
|
||||
let peer20: SocketAddr = "198.51.100.174:58074".parse().unwrap();
|
||||
let peer50: SocketAddr = "198.51.100.175:58075".parse().unwrap();
|
||||
|
||||
let (_, snap5) = run_pipeline_prefetch_case(5, 35, peer5).await;
|
||||
let (_, snap20) = run_pipeline_prefetch_case(20, 35, peer20).await;
|
||||
let (_, snap50) = run_pipeline_prefetch_case(50, 35, peer50).await;
|
||||
|
||||
assert!(
|
||||
snap5.contains("[HTTP]") || snap5.contains("[port-scanner]"),
|
||||
"unexpected 5ms snapshot: {snap5}"
|
||||
);
|
||||
assert!(
|
||||
snap20.contains("[HTTP]") || snap20.contains("[port-scanner]"),
|
||||
"unexpected 20ms snapshot: {snap20}"
|
||||
);
|
||||
assert!(snap50.contains("[HTTP]"));
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
use super::*;
|
||||
use tokio::io::{AsyncWriteExt, duplex};
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
#[test]
|
||||
fn prefetch_timeout_budget_reads_from_config() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
assert_eq!(
|
||||
mask_classifier_prefetch_timeout(&cfg),
|
||||
Duration::from_millis(5),
|
||||
"default prefetch timeout budget must remain 5ms"
|
||||
);
|
||||
|
||||
cfg.censorship.mask_classifier_prefetch_timeout_ms = 20;
|
||||
assert_eq!(
|
||||
mask_classifier_prefetch_timeout(&cfg),
|
||||
Duration::from_millis(20),
|
||||
"runtime prefetch timeout budget must follow configured value"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn configured_prefetch_budget_20ms_recovers_tail_delayed_15ms() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(15)).await;
|
||||
writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.expect("tail bytes must be writable");
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.expect("writer shutdown must succeed");
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(20),
|
||||
)
|
||||
.await;
|
||||
|
||||
writer_task
|
||||
.await
|
||||
.expect("writer task must not panic in runtime timeout test");
|
||||
|
||||
assert!(
|
||||
initial_data.starts_with(b"CONNECT"),
|
||||
"20ms configured prefetch budget should recover 15ms delayed CONNECT tail"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn configured_prefetch_budget_5ms_misses_tail_delayed_15ms() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(15)).await;
|
||||
writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.expect("tail bytes must be writable");
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.expect("writer shutdown must succeed");
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(5),
|
||||
)
|
||||
.await;
|
||||
|
||||
writer_task
|
||||
.await
|
||||
.expect("writer task must not panic in runtime timeout test");
|
||||
|
||||
assert!(
|
||||
!initial_data.starts_with(b"CONNECT"),
|
||||
"5ms configured prefetch budget should miss 15ms delayed CONNECT tail"
|
||||
);
|
||||
}
|
||||
|
|
@ -0,0 +1,265 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||
use crate::protocol::tls;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
struct PipelineHarness {
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
}
|
||||
|
||||
fn build_harness(secret_hex: &str, mask_port: u16) -> PipelineHarness {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = mask_port;
|
||||
cfg.censorship.mask_proxy_protocol = 0;
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
PipelineHarness {
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||
buffer_pool: Arc::new(BufferPool::new()),
|
||||
rng: Arc::new(SecureRandom::new()),
|
||||
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||
beobachten: Arc::new(BeobachtenStore::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||
let total_len = 5 + tls_len;
|
||||
let mut handshake = vec![fill; total_len];
|
||||
|
||||
handshake[0] = 0x16;
|
||||
handshake[1] = 0x03;
|
||||
handshake[2] = 0x01;
|
||||
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
|
||||
let session_id_len: usize = 32;
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
|
||||
handshake
|
||||
}
|
||||
|
||||
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||
let mut record = Vec::with_capacity(5 + payload.len());
|
||||
record.push(0x17);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(payload);
|
||||
record
|
||||
}
|
||||
|
||||
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||
where
|
||||
T: tokio::io::AsyncRead + Unpin,
|
||||
{
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_initial_data_prefetch_gate_is_fail_closed() {
|
||||
assert!(
|
||||
!should_prefetch_mask_classifier_window(&[]),
|
||||
"empty initial_data must not trigger classifier prefetch"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_empty_initial_data_prefetch_must_not_consume_fallback_payload() {
|
||||
let payload = b"\x17\x03\x03\x00\x10coalesced-tail-bytes".to_vec();
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
writer.write_all(&payload).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = Vec::new();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
|
||||
assert!(
|
||||
initial_data.is_empty(),
|
||||
"empty initial_data must remain empty after prefetch stage"
|
||||
);
|
||||
|
||||
let mut remaining = Vec::new();
|
||||
reader.read_to_end(&mut remaining).await.unwrap();
|
||||
assert_eq!(
|
||||
remaining, payload,
|
||||
"prefetch stage must not consume fallback payload when initial_data is empty"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn positive_fragmented_http_prefix_still_prefetches_within_window() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
writer
|
||||
.write_all(b"NECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = b"CON".to_vec();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
|
||||
assert!(
|
||||
initial_data.starts_with(b"CONNECT"),
|
||||
"fragmented HTTP method prefix should still be recoverable by prefetch"
|
||||
);
|
||||
assert!(
|
||||
initial_data.len() <= 16,
|
||||
"prefetch window must remain bounded"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn light_fuzz_empty_initial_data_never_prefetches_any_bytes() {
|
||||
let mut seed = 0xD15C_A11E_2026_0322u64;
|
||||
|
||||
for _ in 0..128 {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
let len = ((seed & 0x3f) as usize).saturating_add(1);
|
||||
let mut payload = vec![0u8; len];
|
||||
for (idx, byte) in payload.iter_mut().enumerate() {
|
||||
*byte = (seed as u8).wrapping_add(idx as u8).wrapping_mul(17);
|
||||
}
|
||||
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
writer.write_all(&payload).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = Vec::new();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
assert!(initial_data.is_empty());
|
||||
|
||||
let mut remaining = Vec::new();
|
||||
reader.read_to_end(&mut remaining).await.unwrap();
|
||||
assert_eq!(remaining, payload);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_integration_empty_initial_data_path_is_byte_exact_and_eof_clean() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let secret = [0xD3u8; 16];
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 411, 600, 0x2B);
|
||||
let mut invalid_payload = vec![0u8; HANDSHAKE_LEN];
|
||||
invalid_payload[0] = 0xFF;
|
||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_payload);
|
||||
let trailing_record = wrap_tls_application_data(b"empty-prefetch-invariant");
|
||||
let expected = trailing_record.clone();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
|
||||
let mut got = vec![0u8; expected.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
assert_eq!(got, expected);
|
||||
|
||||
let mut one = [0u8; 1];
|
||||
let n = stream.read(&mut one).await.unwrap();
|
||||
assert_eq!(
|
||||
n, 0,
|
||||
"fallback stream must not append synthetic bytes on empty initial_data path"
|
||||
);
|
||||
});
|
||||
|
||||
let harness = build_harness("d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3", backend_addr.port());
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.245:56145".parse().unwrap(),
|
||||
harness.config,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&client_hello).await.unwrap();
|
||||
let mut head = [0u8; 5];
|
||||
client_side.read_exact(&mut head).await.unwrap();
|
||||
assert_eq!(head[0], 0x16);
|
||||
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||
|
||||
client_side
|
||||
.write_all(&invalid_mtproto_record)
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&trailing_record).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue