mirror of https://github.com/telemt/telemt.git
Compare commits
159 Commits
| Author | SHA1 | Date |
|---|---|---|
|
|
e630ea0045 | |
|
|
4574e423c6 | |
|
|
5f5582865e | |
|
|
1f54e4a203 | |
|
|
defa37da05 | |
|
|
5fd058b6fd | |
|
|
977ee53b72 | |
|
|
5b11522620 | |
|
|
8fe6fcb7eb | |
|
|
486e439ae6 | |
|
|
8e7b27a16d | |
|
|
7f0057acd7 | |
|
|
7fe38f1b9f | |
|
|
c2f16a343a | |
|
|
6ea867ce36 | |
|
|
bb6237151c | |
|
|
a9f695623d | |
|
|
5c29870632 | |
|
|
f6704d7d65 | |
|
|
3d20002e56 | |
|
|
8fcd0fa950 | |
|
|
645e968778 | |
|
|
b46216d357 | |
|
|
8ac1a0017d | |
|
|
3df274caa6 | |
|
|
780546a680 | |
|
|
729ffa0fcd | |
|
|
e594d6f079 | |
|
|
ecd6a19246 | |
|
|
2df6b8704d | |
|
|
5f5a046710 | |
|
|
2dc81ad0e0 | |
|
|
d8d8534cf8 | |
|
|
6c850e4150 | |
|
|
b8cf596e7d | |
|
|
5bf56b6dd8 | |
|
|
65da1f91ec | |
|
|
f3e9d00132 | |
|
|
dee6e13fef | |
|
|
07d774a82a | |
|
|
618bc7e0b6 | |
|
|
d06ac222d6 | |
|
|
567453e0f8 | |
|
|
cba837745b | |
|
|
876c8f1612 | |
|
|
ac8ad864be | |
|
|
fe56dc7c1a | |
|
|
96ae01078c | |
|
|
3b9919fa4d | |
|
|
6c4a3b59f9 | |
|
|
01c3d0a707 | |
|
|
fbee4631d6 | |
|
|
d0b52ea299 | |
|
|
677195e587 | |
|
|
a383efcb21 | |
|
|
cb5753f77c | |
|
|
7a075b2ffe | |
|
|
7de822dd15 | |
|
|
1bbf4584a6 | |
|
|
70479c4094 | |
|
|
b94746a6e0 | |
|
|
ceae1564af | |
|
|
7ce5fc66db | |
|
|
41493462a1 | |
|
|
6ee4d4648c | |
|
|
97f6649584 | |
|
|
dc6b6d3f9d | |
|
|
1c3e0d4e46 | |
|
|
0b78583cf5 | |
|
|
28d318d724 | |
|
|
70c2f0f045 | |
|
|
b9b1271f14 | |
|
|
3c734bd811 | |
|
|
6391df0583 | |
|
|
6a781c8bc3 | |
|
|
138652af8e | |
|
|
59157d31a6 | |
|
|
8bab3f70e1 | |
|
|
41d786cc11 | |
|
|
c43de1bd2a | |
|
|
101efe45b7 | |
|
|
11df61c6ac | |
|
|
08684bcbd2 | |
|
|
744fb4425f | |
|
|
80cb1bc221 | |
|
|
8461556b02 | |
|
|
cfd516edf3 | |
|
|
803c2c0492 | |
|
|
b762bd029f | |
|
|
761679d306 | |
|
|
41668b153d | |
|
|
1d2f88ad29 | |
|
|
80917f5abc | |
|
|
dc61d300ab | |
|
|
ae16080de5 | |
|
|
b8ca1fc166 | |
|
|
f9986944df | |
|
|
cb877c2bc3 | |
|
|
4426082c17 | |
|
|
22097f8c7c | |
|
|
1450af60a0 | |
|
|
f1cc8d65f2 | |
|
|
ec7e808daf | |
|
|
e4b7e23e76 | |
|
|
8b92b80b4a | |
|
|
f7868aa00f | |
|
|
655a08fa5c | |
|
|
8bc432db49 | |
|
|
a40d6929e5 | |
|
|
8db566dbe9 | |
|
|
bb71de0230 | |
|
|
62a258f8e3 | |
|
|
c868eaae74 | |
|
|
8e1860f912 | |
|
|
814bef9d99 | |
|
|
3ceda15073 | |
|
|
a3a6ea2880 | |
|
|
24156b5067 | |
|
|
a1dfa5b11d | |
|
|
800356c751 | |
|
|
1546b012a6 | |
|
|
e6b77af931 | |
|
|
8cfaab9320 | |
|
|
2d69b9d0ae | |
|
|
41c2b4de65 | |
|
|
0a5e8a09fd | |
|
|
2f9fddfa6f | |
|
|
6f4356f72a | |
|
|
0c3c9009a9 | |
|
|
0475844701 | |
|
|
1abf9bd05c | |
|
|
6f17d4d231 | |
|
|
bf30e93284 | |
|
|
91be148b72 | |
|
|
e46d2cfc52 | |
|
|
d4cda6d546 | |
|
|
e35d69c61f | |
|
|
a353a94175 | |
|
|
b856250b2c | |
|
|
97d1476ded | |
|
|
cde14fc1bf | |
|
|
5723d50d0b | |
|
|
3eb384e02a | |
|
|
c960e0e245 | |
|
|
6fc188f0c4 | |
|
|
5c9fea5850 | |
|
|
3011a9ef6d | |
|
|
7b570be5b3 | |
|
|
0461bc65c6 | |
|
|
ead23608f0 | |
|
|
cf82b637d2 | |
|
|
2e8bfa1101 | |
|
|
d091b0b251 | |
|
|
56fc6c4896 | |
|
|
95685adba7 | |
|
|
909714af31 | |
|
|
dc2b4395bd | |
|
|
39875afbff | |
|
|
2ea7813ed4 |
|
|
@ -7,7 +7,16 @@ queries:
|
|||
- uses: security-and-quality
|
||||
- uses: ./.github/codeql/queries
|
||||
|
||||
paths-ignore:
|
||||
- "**/tests/**"
|
||||
- "**/test/**"
|
||||
- "**/*_test.rs"
|
||||
- "**/*/tests.rs"
|
||||
query-filters:
|
||||
- exclude:
|
||||
tags:
|
||||
- test
|
||||
|
||||
- exclude:
|
||||
id:
|
||||
- rust/unwrap-on-option
|
||||
|
|
|
|||
|
|
@ -0,0 +1,126 @@
|
|||
# Architecture Directives
|
||||
|
||||
> Companion to `Agents.md`. These are **activation directives**, not tutorials.
|
||||
> You already know these patterns — apply them. When making any structural or
|
||||
> design decision, run the relevant section below as a checklist.
|
||||
|
||||
---
|
||||
|
||||
## 1. Active Principles (always on)
|
||||
|
||||
Apply these on every non-trivial change. No exceptions.
|
||||
|
||||
- **SRP** — one reason to change per component. If you can't name the responsibility in one noun phrase, split it.
|
||||
- **OCP** — extend by adding, not by modifying. New variants/impls over patching existing logic.
|
||||
- **ISP** — traits stay minimal. More than ~5 methods is a split signal.
|
||||
- **DIP** — high-level modules depend on traits, not concrete types. Infrastructure implements domain traits; it does not own domain logic.
|
||||
- **DRY** — one authoritative source per piece of knowledge. Copies are bugs that haven't diverged yet.
|
||||
- **YAGNI** — generic parameters, extension hooks, and pluggable strategies require an *existing* concrete use case, not a hypothetical one.
|
||||
- **KISS** — two equivalent designs: choose the one with fewer concepts. Justify complexity; never assume it.
|
||||
|
||||
---
|
||||
|
||||
## 2. Layered Architecture
|
||||
|
||||
Dependencies point **inward only**: `Presentation → Application → Domain ← Infrastructure`.
|
||||
|
||||
- Domain layer: zero I/O. No network, no filesystem, no async runtime imports.
|
||||
- Infrastructure: implements domain traits at the boundary. Never leaks SDK/wire types inward.
|
||||
- Anti-Corruption Layer (ACL): all third-party and external-protocol types are translated here. If the external format changes, only the ACL changes.
|
||||
- Presentation: translates wire/HTTP representations to domain types and back. Nothing else.
|
||||
|
||||
---
|
||||
|
||||
## 3. Design Pattern Selection
|
||||
|
||||
Apply the right pattern. Do not invent a new abstraction when a named pattern fits.
|
||||
|
||||
| Situation | Pattern to apply |
|
||||
|---|---|
|
||||
| Struct with 3+ optional/dependent fields | **Builder** — `build()` returns `Result`, never panics |
|
||||
| Cross-cutting behavior (logging, retry, metrics) on a trait impl | **Decorator** — implements same trait, delegates all calls |
|
||||
| Subsystem with multiple internal components | **Façade** — single public entry point, internals are `pub(crate)` |
|
||||
| Swappable algorithm or policy | **Strategy** — trait injection; generics for compile-time, `dyn` for runtime |
|
||||
| Component notifying decoupled consumers | **Observer** — typed channels (`broadcast`, `watch`), not callback `Vec<Box<dyn Fn>>` |
|
||||
| Exclusive mutable state serving concurrent callers | **Actor** — `mpsc` command channel + `oneshot` reply; no lock needed on state |
|
||||
| Finite state with invalid transition prevention | **Typestate** — distinct types per state; invalid ops are compile errors |
|
||||
| Fixed process skeleton with overridable steps | **Template Method** — defaulted trait method calls required hooks |
|
||||
| Request pipeline with independent handlers | **Chain/Middleware** — generic compile-time chain for hot paths, `dyn` for runtime assembly |
|
||||
| Hiding a concrete type behind a trait | **Factory Function** — returns `Box<dyn Trait>` or `impl Trait` |
|
||||
|
||||
---
|
||||
|
||||
## 4. Data Modeling Rules
|
||||
|
||||
- **Make illegal states unrepresentable.** Type system enforces invariants; runtime validation is a second line, not the first.
|
||||
- **Newtype every primitive** that carries domain meaning. `SessionId(u64)` ≠ `UserId(u64)` — the compiler enforces it.
|
||||
- **Enums over booleans** for any parameter or field with two or more named states.
|
||||
- **Typed error enums** with named variants carrying full diagnostic context. `anyhow` is application-layer only; never in library code.
|
||||
- **Domain types carry no I/O concerns.** No `serde`, no codec, no DB derives on domain structs. Conversions via `From`/`TryFrom` at layer boundaries.
|
||||
|
||||
---
|
||||
|
||||
## 5. Concurrency Rules
|
||||
|
||||
- Prefer message-passing over shared memory. Shared state is a fallback.
|
||||
- All channels must be **bounded**. Document the bound's rationale inline.
|
||||
- Never hold a lock across an `await` unless atomicity explicitly requires it — document why.
|
||||
- Document lock acquisition order wherever two locks are taken together.
|
||||
- Every `async fn` is cancellation-safe unless explicitly documented otherwise. Mutate shared state *after* the `await` that may be cancelled, not before.
|
||||
- High-read/low-write state: use `arc-swap` or `watch` for lock-free reads.
|
||||
|
||||
---
|
||||
|
||||
## 6. Error Handling Rules
|
||||
|
||||
- Errors translated at every layer boundary — low-level errors never surface unmodified.
|
||||
- Add context at the propagation site: what operation failed and where.
|
||||
- No `unwrap()`/`expect()` in production paths without a comment proving `None`/`Err` is impossible.
|
||||
- Panics are only permitted in: tests, startup/init unrecoverable failure, and `unreachable!()` with an invariant comment.
|
||||
|
||||
---
|
||||
|
||||
## 7. API Design Rules
|
||||
|
||||
- **CQS**: functions that return data must not mutate; functions that mutate return only `Result`.
|
||||
- **Least surprise**: a function does exactly what its name implies. Side effects are documented.
|
||||
- **Idempotency**: `close()`, `shutdown()`, `unregister()` called twice must not panic or error.
|
||||
- **Fallibility at the type level**: failure → `Result<T, E>`. No sentinel values.
|
||||
- **Minimal public surface**: default to `pub(crate)`. Mark `pub` only deliberate API. Re-export through a single surface in `mod.rs`.
|
||||
|
||||
---
|
||||
|
||||
## 8. Performance Rules (hot paths)
|
||||
|
||||
- Annotate hot-path functions with `// HOT PATH: <throughput requirement>`.
|
||||
- Zero allocations per operation in hot paths after initialization. Preallocate in constructors, reuse buffers.
|
||||
- Pass `&[u8]` / `Bytes` slices — not `Vec<u8>`. Use `BytesMut` for reusable mutable buffers.
|
||||
- No `String` formatting in hot paths. No logging without a rate-limit or sampling gate.
|
||||
- Any allocation in a hot path gets a comment: `// ALLOC: <reason and size>`.
|
||||
|
||||
---
|
||||
|
||||
## 9. Testing Rules
|
||||
|
||||
- Bug fixes require a regression test that is **red before the fix, green after**. Name it after the bug.
|
||||
- Property tests for: codec round-trips, state machine invariants, cryptographic protocol correctness.
|
||||
- No shared mutable state between tests. Each test constructs its own environment.
|
||||
- Test doubles hierarchy (simplest first): Fake → Stub → Spy → Mock. Mocks couple to implementation, not behavior — use sparingly.
|
||||
|
||||
---
|
||||
|
||||
## 10. Pre-Change Checklist
|
||||
|
||||
Run this before proposing or implementing any structural decision:
|
||||
|
||||
- [ ] Responsibility nameable in one noun phrase?
|
||||
- [ ] Layer dependencies point inward only?
|
||||
- [ ] Invalid states unrepresentable in the type system?
|
||||
- [ ] State transitions gated through a single interface?
|
||||
- [ ] All channels bounded?
|
||||
- [ ] No locks held across `await` (or documented)?
|
||||
- [ ] Errors typed and translated at layer boundaries?
|
||||
- [ ] No panics in production paths without invariant proof?
|
||||
- [ ] Hot paths annotated and allocation-free?
|
||||
- [ ] Public surface minimal — only deliberate API marked `pub`?
|
||||
- [ ] Correct pattern chosen from Section 3 table?
|
||||
|
|
@ -0,0 +1,39 @@
|
|||
name: Build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
|
@ -5,35 +5,87 @@ on:
|
|||
tags:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: 'Release tag (example: 3.3.15)'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref }}
|
||||
group: release-${{ github.ref_name }}-${{ github.event.inputs.tag || 'auto' }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BINARY_NAME: telemt
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.target }}
|
||||
prepare:
|
||||
name: Prepare
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
outputs:
|
||||
version: ${{ steps.vars.outputs.version }}
|
||||
prerelease: ${{ steps.vars.outputs.prerelease }}
|
||||
|
||||
steps:
|
||||
- name: Resolve version
|
||||
id: vars
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${GITHUB_EVENT_NAME}" = "workflow_dispatch" ]; then
|
||||
VERSION="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${VERSION#refs/tags/}"
|
||||
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "Release version is empty" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "${VERSION}" == *-* ]]; then
|
||||
PRERELEASE=true
|
||||
else
|
||||
PRERELEASE=false
|
||||
fi
|
||||
|
||||
echo "version=${VERSION}" >> "${GITHUB_OUTPUT}"
|
||||
echo "prerelease=${PRERELEASE}" >> "${GITHUB_OUTPUT}"
|
||||
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-linux-gnu
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
asset: telemt-x86_64-v3-linux-gnu
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
asset: telemt-aarch64-linux-gnu
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -47,8 +99,8 @@ jobs:
|
|||
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y \
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
build-essential \
|
||||
clang \
|
||||
lld \
|
||||
|
|
@ -59,53 +111,65 @@ jobs:
|
|||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: gnu-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: gnu-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
gnu-${{ matrix.asset }}-
|
||||
gnu-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
export CXX=aarch64-linux-gnu-g++
|
||||
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
export CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
export CC_x86_64_unknown_linux_gnu=clang
|
||||
export CXX_x86_64_unknown_linux_gnu=clang++
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld"
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
set -euo pipefail
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
build-musl:
|
||||
name: MUSL ${{ matrix.target }}
|
||||
name: MUSL ${{ matrix.asset }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: prepare
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
|
@ -116,8 +180,15 @@ jobs:
|
|||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-linux-musl
|
||||
cpu: baseline
|
||||
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-v3-linux-musl
|
||||
cpu: v3
|
||||
|
||||
- target: aarch64-unknown-linux-musl
|
||||
asset: telemt-aarch64-linux-musl
|
||||
cpu: generic
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
|
@ -138,30 +209,29 @@ jobs:
|
|||
|
||||
- name: Install aarch64 musl toolchain
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
shell: bash
|
||||
run: |
|
||||
set -e
|
||||
set -euo pipefail
|
||||
|
||||
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
|
||||
ARCHIVE="aarch64-linux-musl-cross.tgz"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/$ARCHIVE"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/${ARCHIVE}"
|
||||
|
||||
if [ -x "$TOOLCHAIN_DIR/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "✅ MUSL toolchain already installed"
|
||||
if [ -x "${TOOLCHAIN_DIR}/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "MUSL toolchain cached"
|
||||
else
|
||||
echo "⬇️ Downloading musl toolchain from Telemt GitHub Releases..."
|
||||
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "$ARCHIVE" "$URL"
|
||||
-o "${ARCHIVE}" "${URL}"
|
||||
|
||||
mkdir -p "$TOOLCHAIN_DIR"
|
||||
tar -xzf "$ARCHIVE" --strip-components=1 -C "$TOOLCHAIN_DIR"
|
||||
mkdir -p "${TOOLCHAIN_DIR}"
|
||||
tar -xzf "${ARCHIVE}" --strip-components=1 -C "${TOOLCHAIN_DIR}"
|
||||
fi
|
||||
|
||||
echo "$TOOLCHAIN_DIR/bin" >> $GITHUB_PATH
|
||||
echo "${TOOLCHAIN_DIR}/bin" >> "${GITHUB_PATH}"
|
||||
|
||||
- name: Add rust target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
|
@ -172,96 +242,62 @@ jobs:
|
|||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: musl-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
key: musl-${{ matrix.asset }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
musl-${{ matrix.asset }}-
|
||||
musl-
|
||||
|
||||
- name: Build
|
||||
shell: bash
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
export CC=aarch64-linux-musl-gcc
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc"
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc -C lto=fat -C panic=abort"
|
||||
else
|
||||
export CC=musl-gcc
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static"
|
||||
|
||||
if [ "${{ matrix.cpu }}" = "v3" ]; then
|
||||
CPU_FLAGS="-C target-cpu=x86-64-v3"
|
||||
else
|
||||
CPU_FLAGS="-C target-cpu=x86-64"
|
||||
fi
|
||||
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C lto=fat -C panic=abort ${CPU_FLAGS}"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
cargo build --release --target ${{ matrix.target }} -j "$(nproc)"
|
||||
|
||||
- name: Package
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
set -euo pipefail
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
mkdir -p dist
|
||||
cp "target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}" dist/telemt
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
tar -czf "${{ matrix.asset }}.tar.gz" \
|
||||
--owner=0 --group=0 --numeric-owner \
|
||||
telemt
|
||||
|
||||
sha256sum "${{ matrix.asset }}.tar.gz" > "${{ matrix.asset }}.tar.gz.sha256"
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
path: dist/*
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Extract binaries
|
||||
run: |
|
||||
mkdir dist
|
||||
find artifacts -name "*.tar.gz" -exec tar -xzf {} -C dist \;
|
||||
|
||||
cp dist/telemt-x86_64-unknown-linux-musl dist/telemt || true
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract version
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
build-args: |
|
||||
BINARY=dist/telemt
|
||||
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
needs: [prepare, build-gnu, build-musl]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
|
@ -272,14 +308,98 @@ jobs:
|
|||
path: artifacts
|
||||
|
||||
- name: Flatten artifacts
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir dist
|
||||
set -euo pipefail
|
||||
mkdir -p dist
|
||||
find artifacts -type f -exec cp {} dist/ \;
|
||||
|
||||
- name: Create Release
|
||||
- name: Create GitHub Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
tag_name: ${{ needs.prepare.outputs.version }}
|
||||
target_commitish: ${{ github.sha }}
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
prerelease: ${{ needs.prepare.outputs.prerelease == 'true' }}
|
||||
overwrite_files: true
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [prepare, release]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
- uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Probe release assets
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
for asset in \
|
||||
telemt-x86_64-linux-musl.tar.gz \
|
||||
telemt-x86_64-linux-musl.tar.gz.sha256 \
|
||||
telemt-aarch64-linux-musl.tar.gz \
|
||||
telemt-aarch64-linux-musl.tar.gz.sha256
|
||||
do
|
||||
curl -fsIL \
|
||||
--retry 10 \
|
||||
--retry-delay 3 \
|
||||
"https://github.com/${GITHUB_REPOSITORY}/releases/download/${VERSION}/${asset}" \
|
||||
> /dev/null
|
||||
done
|
||||
|
||||
- name: Compute image tags
|
||||
id: meta
|
||||
shell: bash
|
||||
env:
|
||||
VERSION: ${{ needs.prepare.outputs.version }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
IMAGE="$(echo "ghcr.io/${GITHUB_REPOSITORY}" | tr '[:upper:]' '[:lower:]')"
|
||||
TAGS="${IMAGE}:${VERSION}"
|
||||
|
||||
if [[ "${VERSION}" != *-* ]]; then
|
||||
TAGS="${TAGS}"$'\n'"${IMAGE}:latest"
|
||||
fi
|
||||
|
||||
{
|
||||
echo "tags<<EOF"
|
||||
printf '%s\n' "${TAGS}"
|
||||
echo "EOF"
|
||||
} >> "${GITHUB_OUTPUT}"
|
||||
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
pull: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
build-args: |
|
||||
TELEMT_REPOSITORY=${{ github.repository }}
|
||||
TELEMT_VERSION=${{ needs.prepare.outputs.version }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
|
|
|||
|
|
@ -1,66 +0,0 @@
|
|||
name: Rust
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install latest stable Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Cache cargo registry & build artifacts
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Build Release
|
||||
run: cargo build --release --verbose
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
- name: Stress quota-lock suites (PR only)
|
||||
if: github.event_name == 'pull_request'
|
||||
env:
|
||||
RUST_TEST_THREADS: 16
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for i in $(seq 1 12); do
|
||||
echo "[quota-lock-stress] iteration ${i}/12"
|
||||
cargo test quota_lock_ --bin telemt -- --nocapture --test-threads 16
|
||||
cargo test relay_quota_wake --bin telemt -- --nocapture --test-threads 16
|
||||
done
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
run: cargo clippy -- --cap-lints warn
|
||||
|
||||
- name: Check for unused dependencies
|
||||
run: cargo udeps || true
|
||||
|
|
@ -0,0 +1,139 @@
|
|||
name: Check
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "*" ]
|
||||
pull_request:
|
||||
branches: [ "*" ]
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
concurrency:
|
||||
group: test-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# ==========================
|
||||
# Formatting
|
||||
# ==========================
|
||||
fmt:
|
||||
name: Fmt
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rustfmt
|
||||
|
||||
- run: cargo fmt -- --check
|
||||
|
||||
# ==========================
|
||||
# Tests
|
||||
# ==========================
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
actions: write
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-nextest-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-nextest-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-nextest
|
||||
run: cargo install --locked cargo-nextest || true
|
||||
|
||||
- name: Run tests with nextest
|
||||
run: cargo nextest run -j "$(nproc)"
|
||||
|
||||
# ==========================
|
||||
# Clippy
|
||||
# ==========================
|
||||
clippy:
|
||||
name: Clippy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: clippy
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-clippy-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-clippy-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Run clippy
|
||||
run: cargo clippy -j "$(nproc)" -- --cap-lints warn
|
||||
|
||||
# ==========================
|
||||
# Udeps
|
||||
# ==========================
|
||||
udeps:
|
||||
name: Udeps
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with:
|
||||
components: rust-src
|
||||
|
||||
- name: Cache cargo
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-udeps-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-udeps-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Install cargo-udeps
|
||||
run: cargo install --locked cargo-udeps || true
|
||||
|
||||
- name: Run udeps
|
||||
run: cargo udeps -j "$(nproc)" || true
|
||||
|
|
@ -1,58 +0,0 @@
|
|||
# Architect Mode Rules for Telemt
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
```mermaid
|
||||
graph TB
|
||||
subgraph Entry
|
||||
Client[Clients] --> Listener[TCP/Unix Listener]
|
||||
end
|
||||
|
||||
subgraph Proxy Layer
|
||||
Listener --> ClientHandler[ClientHandler]
|
||||
ClientHandler --> Handshake[Handshake Validator]
|
||||
Handshake --> |Valid| Relay[Relay Layer]
|
||||
Handshake --> |Invalid| Masking[Masking/TLS Fronting]
|
||||
end
|
||||
|
||||
subgraph Transport
|
||||
Relay --> MiddleProxy[Middle-End Proxy Pool]
|
||||
Relay --> DirectRelay[Direct DC Relay]
|
||||
MiddleProxy --> TelegramDC[Telegram DCs]
|
||||
DirectRelay --> TelegramDC
|
||||
end
|
||||
```
|
||||
|
||||
## Module Dependencies
|
||||
- [`src/main.rs`](src/main.rs) - Entry point, spawns all async tasks
|
||||
- [`src/config/`](src/config/) - Configuration loading with auto-migration
|
||||
- [`src/error.rs`](src/error.rs) - Error types, must be used by all modules
|
||||
- [`src/crypto/`](src/crypto/) - AES, SHA, random number generation
|
||||
- [`src/protocol/`](src/protocol/) - MTProto constants, frame encoding, obfuscation
|
||||
- [`src/stream/`](src/stream/) - Stream wrappers, buffer pool, frame codecs
|
||||
- [`src/proxy/`](src/proxy/) - Client handling, handshake, relay logic
|
||||
- [`src/transport/`](src/transport/) - Upstream management, middle-proxy, SOCKS support
|
||||
- [`src/stats/`](src/stats/) - Statistics and replay protection
|
||||
- [`src/ip_tracker.rs`](src/ip_tracker.rs) - Per-user IP tracking
|
||||
|
||||
## Key Architectural Constraints
|
||||
|
||||
### Middle-End Proxy Mode
|
||||
- Requires public IP on interface OR 1:1 NAT with STUN probing
|
||||
- Uses separate `proxy-secret` from Telegram (NOT user secrets)
|
||||
- Falls back to direct mode automatically on STUN mismatch
|
||||
|
||||
### TLS Fronting
|
||||
- Invalid handshakes are transparently proxied to `mask_host`
|
||||
- This is critical for DPI evasion - do not change this behavior
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive
|
||||
|
||||
### Stream Architecture
|
||||
- Buffer pool is shared globally via Arc - prevents allocation storms
|
||||
- Frame codecs implement tokio-util Encoder/Decoder traits
|
||||
- State machine in [`src/stream/state.rs`](src/stream/state.rs) manages stream transitions
|
||||
|
||||
### Configuration Migration
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config in-place
|
||||
- New fields must have sensible defaults
|
||||
- DC203 override is auto-injected for CDN/media support
|
||||
|
|
@ -1,23 +0,0 @@
|
|||
# Code Mode Rules for Telemt
|
||||
|
||||
## Error Handling
|
||||
- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations
|
||||
- [`HandshakeResult<T,R,W>`](src/error.rs:292) returns streams on bad client - these MUST be returned for masking, never dropped
|
||||
- Use [`Recoverable`](src/error.rs:110) trait to check if errors are retryable
|
||||
|
||||
## Configuration Changes
|
||||
- [`ProxyConfig::load()`](src/config/mod.rs:641) auto-mutates config - new fields should have defaults
|
||||
- DC203 override is auto-injected if missing - do not remove this behavior
|
||||
- When adding config fields, add migration logic in [`ProxyConfig::load()`](src/config/mod.rs:641)
|
||||
|
||||
## Crypto Code
|
||||
- [`SecureRandom`](src/crypto/random.rs) from [`src/crypto/random.rs`](src/crypto/random.rs) must be used for all crypto operations
|
||||
- Never use `rand::thread_rng()` directly - use the shared `Arc<SecureRandom>`
|
||||
|
||||
## Stream Handling
|
||||
- Buffer pool [`BufferPool`](src/stream/buffer_pool.rs) is shared via Arc - always use it instead of allocating
|
||||
- Frame codecs in [`src/stream/frame_codec.rs`](src/stream/frame_codec.rs) implement tokio-util's Encoder/Decoder traits
|
||||
|
||||
## Testing
|
||||
- Tests are inline in modules using `#[cfg(test)]`
|
||||
- Use `cargo test --lib <module_name>` to run tests for specific modules
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
# Debug Mode Rules for Telemt
|
||||
|
||||
## Logging
|
||||
- `RUST_LOG` environment variable takes absolute priority over all config log levels
|
||||
- Log levels: `trace`, `debug`, `info`, `warn`, `error`
|
||||
- Use `RUST_LOG=debug cargo run` for detailed operational logs
|
||||
- Use `RUST_LOG=trace cargo run` for full protocol-level debugging
|
||||
|
||||
## Middle-End Proxy Debugging
|
||||
- Set `ME_DIAG=1` environment variable for high-precision cryptography diagnostics
|
||||
- STUN probe results are logged at startup - check for mismatch between local and reflected IP
|
||||
- If Middle-End fails, check `proxy_secret_path` points to valid file from https://core.telegram.org/getProxySecret
|
||||
|
||||
## Connection Issues
|
||||
- DC connectivity is logged at startup with RTT measurements
|
||||
- If DC ping fails, check `dc_overrides` for custom addresses
|
||||
- Use `prefer_ipv6=false` in config if IPv6 is unreliable
|
||||
|
||||
## TLS Fronting Issues
|
||||
- Invalid handshakes are proxied to `mask_host` - check this host is reachable
|
||||
- `mask_unix_sock` and `mask_host` are mutually exclusive - only one can be set
|
||||
- If `mask_unix_sock` is set, socket must exist before connections arrive
|
||||
|
||||
## Common Errors
|
||||
- `ReplayAttack` - client replayed a handshake nonce, potential attack
|
||||
- `TimeSkew` - client clock is off, can disable with `ignore_time_skew=true`
|
||||
- `TgHandshakeTimeout` - upstream DC connection failed, check network
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
# Code of Conduct
|
||||
|
||||
## 1. Purpose
|
||||
## Purpose
|
||||
|
||||
Telemt exists to solve technical problems.
|
||||
**Telemt exists to solve technical problems.**
|
||||
|
||||
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
|
||||
|
||||
|
|
@ -18,27 +18,34 @@ Technology has consequences. Responsibility is inherent.
|
|||
|
||||
---
|
||||
|
||||
## 2. Principles
|
||||
## Principles
|
||||
|
||||
* **Technical over emotional**
|
||||
|
||||
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
|
||||
|
||||
* **Clarity over noise**
|
||||
|
||||
Communication is structured, concise, and relevant.
|
||||
|
||||
* **Openness with standards**
|
||||
|
||||
Participation is open. The work remains disciplined.
|
||||
|
||||
* **Independence of judgment**
|
||||
|
||||
Claims are evaluated on technical merit, not affiliation or posture.
|
||||
|
||||
* **Responsibility over capability**
|
||||
|
||||
Capability does not justify careless use.
|
||||
|
||||
* **Cooperation over friction**
|
||||
|
||||
Progress depends on coordination, mutual support, and honest review.
|
||||
|
||||
* **Good intent, rigorous method**
|
||||
|
||||
Assume good intent, but require rigor.
|
||||
|
||||
> **Aussagen gelten nach ihrer Begründung.**
|
||||
|
|
@ -47,7 +54,7 @@ Technology has consequences. Responsibility is inherent.
|
|||
|
||||
---
|
||||
|
||||
## 3. Expected Behavior
|
||||
## Expected Behavior
|
||||
|
||||
Participants are expected to:
|
||||
|
||||
|
|
@ -69,7 +76,7 @@ New contributors are welcome. They are expected to grow into these standards. Ex
|
|||
|
||||
---
|
||||
|
||||
## 4. Unacceptable Behavior
|
||||
## Unacceptable Behavior
|
||||
|
||||
The following is not allowed:
|
||||
|
||||
|
|
@ -89,7 +96,7 @@ Such discussions may be closed, removed, or redirected.
|
|||
|
||||
---
|
||||
|
||||
## 5. Security and Misuse
|
||||
## Security and Misuse
|
||||
|
||||
Telemt is intended for responsible use.
|
||||
|
||||
|
|
@ -109,15 +116,13 @@ Security is both technical and behavioral.
|
|||
|
||||
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
|
||||
|
||||
Standards are public, legible, and applied to the work itself.
|
||||
|
||||
Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
|
||||
Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
- Standards are public, legible, and applied to the work itself.
|
||||
- Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
- Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
|
||||
---
|
||||
|
||||
## 7. Scope
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies to all official spaces:
|
||||
|
||||
|
|
@ -127,16 +132,19 @@ This Code of Conduct applies to all official spaces:
|
|||
|
||||
---
|
||||
|
||||
## 8. Maintainer Stewardship
|
||||
## Maintainer Stewardship
|
||||
|
||||
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
|
||||
|
||||
This responsibility is stewardship: preserving continuity, protecting signal, maintaining standards, and keeping Telemt workable for others.
|
||||
This responsibility is stewardship:
|
||||
- preserving continuity,
|
||||
- protecting signal,
|
||||
- maintaining standards,
|
||||
- keeping Telemt workable for others.
|
||||
|
||||
Judgment should be exercised with restraint, consistency, and institutional responsibility.
|
||||
|
||||
Not every decision requires extended debate.
|
||||
Not every intervention requires public explanation.
|
||||
- Not every decision requires extended debate.
|
||||
- Not every intervention requires public explanation.
|
||||
|
||||
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
|
||||
|
||||
|
|
@ -146,7 +154,7 @@ All decisions are expected to serve the durability, clarity, and integrity of Te
|
|||
|
||||
---
|
||||
|
||||
## 9. Enforcement
|
||||
## Enforcement
|
||||
|
||||
Maintainers may act to preserve the integrity of Telemt, including by:
|
||||
|
||||
|
|
@ -156,44 +164,40 @@ Maintainers may act to preserve the integrity of Telemt, including by:
|
|||
* Restricting or banning participants
|
||||
|
||||
Actions are taken to maintain function, continuity, and signal quality.
|
||||
|
||||
Where possible, correction is preferred to exclusion.
|
||||
|
||||
Where necessary, exclusion is preferred to decay.
|
||||
- Where possible, correction is preferred to exclusion.
|
||||
- Where necessary, exclusion is preferred to decay.
|
||||
|
||||
---
|
||||
|
||||
## 10. Final
|
||||
## Final
|
||||
|
||||
Telemt is built on discipline, structure, and shared intent.
|
||||
- Signal over noise.
|
||||
- Facts over opinion.
|
||||
- Systems over rhetoric.
|
||||
|
||||
Signal over noise.
|
||||
Facts over opinion.
|
||||
Systems over rhetoric.
|
||||
- Work is collective.
|
||||
- Outcomes are shared.
|
||||
- Responsibility is distributed.
|
||||
|
||||
Work is collective.
|
||||
Outcomes are shared.
|
||||
Responsibility is distributed.
|
||||
|
||||
Precision is learned.
|
||||
Rigor is expected.
|
||||
Help is part of the work.
|
||||
- Precision is learned.
|
||||
- Rigor is expected.
|
||||
- Help is part of the work.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Freiheit.**
|
||||
|
||||
If you contribute — contribute with care.
|
||||
If you speak — speak with substance.
|
||||
If you engage — engage constructively.
|
||||
- If you contribute — contribute with care.
|
||||
- If you speak — speak with substance.
|
||||
- If you engage — engage constructively.
|
||||
|
||||
---
|
||||
|
||||
## 11. After All
|
||||
## After All
|
||||
|
||||
Systems outlive intentions.
|
||||
|
||||
What is built will be used.
|
||||
What is released will propagate.
|
||||
What is maintained will define the future state.
|
||||
- What is built will be used.
|
||||
- What is released will propagate.
|
||||
- What is maintained will define the future state.
|
||||
|
||||
There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
||||
|
||||
|
|
@ -201,8 +205,8 @@ There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
|||
|
||||
> Every system carries responsibility.
|
||||
|
||||
Stability requires discipline.
|
||||
Freedom requires structure.
|
||||
Trust requires honesty.
|
||||
- Stability requires discipline.
|
||||
- Freedom requires structure.
|
||||
- Trust requires honesty.
|
||||
|
||||
In the end, the system reflects its contributors.
|
||||
In the end: the system reflects its contributors.
|
||||
|
|
|
|||
|
|
@ -1,19 +1,82 @@
|
|||
# Issues - Rules
|
||||
# Issues
|
||||
## Warnung
|
||||
Before opening Issue, if it is more question than problem or bug - ask about that [in our chat](https://t.me/telemtrs)
|
||||
|
||||
## What it is not
|
||||
- NOT Question and Answer
|
||||
- NOT Helpdesk
|
||||
|
||||
# Pull Requests - Rules
|
||||
***Each of your Issues triggers attempts to reproduce problems and analyze them, which are done manually by people***
|
||||
|
||||
---
|
||||
|
||||
# Pull Requests
|
||||
|
||||
## General
|
||||
- ONLY signed and verified commits
|
||||
- ONLY from your name
|
||||
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||
- DO NOT commit with `codex`, `claude`, or other AI tools as author/committer
|
||||
- PREFER `flow` branch for development, not `main`
|
||||
|
||||
## AI
|
||||
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||
---
|
||||
|
||||
- you really understand what you're doing
|
||||
- you understand the relationships and dependencies of the components being modified
|
||||
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||
## Definition of Ready (MANDATORY)
|
||||
|
||||
A Pull Request WILL be ignored or closed if:
|
||||
|
||||
- it does NOT build
|
||||
- it does NOT pass tests
|
||||
- it does NOT follow formatting rules
|
||||
- it contains unrelated or excessive changes
|
||||
- the author cannot clearly explain the change
|
||||
|
||||
---
|
||||
|
||||
## Blessed Principles
|
||||
- PR must build
|
||||
- PR must pass tests
|
||||
- PR must be understood by author
|
||||
|
||||
---
|
||||
|
||||
## AI Usage Policy
|
||||
|
||||
AI tools (Claude, ChatGPT, Codex, DeepSeek, etc.) are allowed as **assistants**, NOT as decision-makers.
|
||||
|
||||
By submitting a PR, you confirm that:
|
||||
|
||||
- you fully understand the code you submit
|
||||
- you verified correctness manually
|
||||
- you reviewed architecture and dependencies
|
||||
- you take full responsibility for the change
|
||||
|
||||
AI-generated code is treated as **draft** and must be validated like any other external contribution.
|
||||
|
||||
PRs that look like unverified AI dumps WILL be closed
|
||||
|
||||
---
|
||||
|
||||
## Maintainer Policy
|
||||
|
||||
Maintainers reserve the right to:
|
||||
|
||||
- close PRs that do not meet basic quality requirements
|
||||
- request explanations before review
|
||||
- ignore low-effort contributions
|
||||
|
||||
Respect the reviewers time
|
||||
|
||||
---
|
||||
|
||||
## Enforcement
|
||||
|
||||
Pull Requests that violate project standards may be closed without review.
|
||||
|
||||
This includes (but is not limited to):
|
||||
|
||||
- non-building code
|
||||
- failing tests
|
||||
- unverified or low-effort changes
|
||||
- inability to explain the change
|
||||
|
||||
These actions follow the Code of Conduct and are intended to preserve signal, quality, and Telemt's integrity
|
||||
|
|
@ -183,9 +183,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "aws-lc-sys"
|
||||
version = "0.39.0"
|
||||
version = "0.39.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
|
||||
checksum = "83a25cf98105baa966497416dbd42565ce3a8cf8dbfd59803ec9ad46f3126399"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"cmake",
|
||||
|
|
@ -234,16 +234,16 @@ checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af"
|
|||
|
||||
[[package]]
|
||||
name = "blake3"
|
||||
version = "1.8.3"
|
||||
version = "1.8.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d"
|
||||
checksum = "4d2d5991425dfd0785aed03aedcf0b321d61975c9b5b3689c774a2610ae0b51e"
|
||||
dependencies = [
|
||||
"arrayref",
|
||||
"arrayvec",
|
||||
"cc",
|
||||
"cfg-if",
|
||||
"constant_time_eq",
|
||||
"cpufeatures 0.2.17",
|
||||
"cpufeatures 0.3.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -299,9 +299,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.57"
|
||||
version = "1.2.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
|
||||
checksum = "e1e928d4b69e3077709075a938a05ffbedfa53a84c8f766efbf8220bb1ff60e1"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
|
|
@ -441,9 +441,9 @@ checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
|
|||
|
||||
[[package]]
|
||||
name = "cmake"
|
||||
version = "0.1.57"
|
||||
version = "0.1.58"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
|
||||
checksum = "c0f78a02292a74a88ac736019ab962ece0bc380e3f977bf72e376c5d78ff0678"
|
||||
dependencies = [
|
||||
"cc",
|
||||
]
|
||||
|
|
@ -1191,9 +1191,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
|
|||
|
||||
[[package]]
|
||||
name = "hyper"
|
||||
version = "1.8.1"
|
||||
version = "1.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
|
||||
checksum = "6299f016b246a94207e63da54dbe807655bf9e00044f73ded42c3ac5305fbcca"
|
||||
dependencies = [
|
||||
"atomic-waker",
|
||||
"bytes",
|
||||
|
|
@ -1206,7 +1206,6 @@ dependencies = [
|
|||
"httpdate",
|
||||
"itoa",
|
||||
"pin-project-lite",
|
||||
"pin-utils",
|
||||
"smallvec",
|
||||
"tokio",
|
||||
"want",
|
||||
|
|
@ -1245,7 +1244,7 @@ dependencies = [
|
|||
"libc",
|
||||
"percent-encoding",
|
||||
"pin-project-lite",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
|
|
@ -1277,12 +1276,13 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_collections"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43"
|
||||
checksum = "2984d1cd16c883d7935b9e07e44071dca8d917fd52ecc02c04d5fa0b5a3f191c"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"potential_utf",
|
||||
"utf8_iter",
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
"zerovec",
|
||||
|
|
@ -1290,9 +1290,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_locale_core"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6"
|
||||
checksum = "92219b62b3e2b4d88ac5119f8904c10f8f61bf7e95b640d25ba3075e6cac2c29"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"litemap",
|
||||
|
|
@ -1303,9 +1303,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_normalizer"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599"
|
||||
checksum = "c56e5ee99d6e3d33bd91c5d85458b6005a22140021cc324cea84dd0e72cff3b4"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_normalizer_data",
|
||||
|
|
@ -1317,15 +1317,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_normalizer_data"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a"
|
||||
checksum = "da3be0ae77ea334f4da67c12f149704f19f81d1adf7c51cf482943e84a2bad38"
|
||||
|
||||
[[package]]
|
||||
name = "icu_properties"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec"
|
||||
checksum = "bee3b67d0ea5c2cca5003417989af8996f8604e34fb9ddf96208a033901e70de"
|
||||
dependencies = [
|
||||
"icu_collections",
|
||||
"icu_locale_core",
|
||||
|
|
@ -1337,15 +1337,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "icu_properties_data"
|
||||
version = "2.1.2"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af"
|
||||
checksum = "8e2bbb201e0c04f7b4b3e14382af113e17ba4f63e2c9d2ee626b720cbce54a14"
|
||||
|
||||
[[package]]
|
||||
name = "icu_provider"
|
||||
version = "2.1.1"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614"
|
||||
checksum = "139c4cf31c8b5f33d7e199446eff9c1e02decfc2f0eec2c8d71f65befa45b421"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"icu_locale_core",
|
||||
|
|
@ -1427,14 +1427,15 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "ipconfig"
|
||||
version = "0.3.2"
|
||||
version = "0.3.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f"
|
||||
checksum = "4d40460c0ce33d6ce4b0630ad68ff63d6661961c48b6dba35e5a4d81cfb48222"
|
||||
dependencies = [
|
||||
"socket2 0.5.10",
|
||||
"socket2",
|
||||
"widestring",
|
||||
"windows-sys 0.48.0",
|
||||
"winreg",
|
||||
"windows-registry",
|
||||
"windows-result",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
|
|
@ -1454,9 +1455,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.10"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
checksum = "25e659a4bb38e810ebc252e53b5814ff908a8c58c2a9ce2fae1bbec24cbf4e20"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
|
|
@ -1486,7 +1487,7 @@ dependencies = [
|
|||
"cesu8",
|
||||
"cfg-if",
|
||||
"combine",
|
||||
"jni-sys",
|
||||
"jni-sys 0.3.1",
|
||||
"log",
|
||||
"thiserror 1.0.69",
|
||||
"walkdir",
|
||||
|
|
@ -1495,9 +1496,31 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "jni-sys"
|
||||
version = "0.3.0"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
|
||||
checksum = "41a652e1f9b6e0275df1f15b32661cf0d4b78d4d87ddec5e0c3c20f097433258"
|
||||
dependencies = [
|
||||
"jni-sys 0.4.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jni-sys"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c6377a88cb3910bee9b0fa88d4f42e1d2da8e79915598f65fb0c7ee14c878af2"
|
||||
dependencies = [
|
||||
"jni-sys-macros",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jni-sys-macros"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "38c0b942f458fe50cdac086d2f946512305e5631e720728f2a61aabcd47a6264"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "jobserver"
|
||||
|
|
@ -1511,10 +1534,12 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "js-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b49715b7073f385ba4bc528e5747d02e66cb39c6146efb66b781f131f0fb399c"
|
||||
checksum = "2e04e2ef80ce82e13552136fabeef8a5ed1f985a96805761cbb9a2c34e7664d9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
|
@ -1553,9 +1578,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2"
|
|||
|
||||
[[package]]
|
||||
name = "libc"
|
||||
version = "0.2.183"
|
||||
version = "0.2.184"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
|
||||
checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af"
|
||||
|
||||
[[package]]
|
||||
name = "linux-raw-sys"
|
||||
|
|
@ -1565,9 +1590,9 @@ checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53"
|
|||
|
||||
[[package]]
|
||||
name = "litemap"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77"
|
||||
checksum = "92daf443525c4cce67b150400bc2316076100ce0b3686209eb8cf3c31612e6f0"
|
||||
|
||||
[[package]]
|
||||
name = "lock_api"
|
||||
|
|
@ -1647,9 +1672,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
|
|||
|
||||
[[package]]
|
||||
name = "mio"
|
||||
version = "1.1.1"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
checksum = "50b7e5b27aa02a74bac8c3f23f448f8d87ff11f92d3aac1a6ed369ee08cc56c1"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
|
|
@ -1659,9 +1684,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "moka"
|
||||
version = "0.12.14"
|
||||
version = "0.12.15"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "85f8024e1c8e71c778968af91d43700ce1d11b219d127d79fb2934153b82b42b"
|
||||
checksum = "957228ad12042ee839f93c8f257b62b4c0ab5eaae1d4fa60de53b27c9d7c5046"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"crossbeam-epoch",
|
||||
|
|
@ -1745,9 +1770,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "num-conv"
|
||||
version = "0.2.0"
|
||||
version = "0.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050"
|
||||
checksum = "c6673768db2d862beb9b39a78fdcb1a69439615d5794a1be50caa9bc92c81967"
|
||||
|
||||
[[package]]
|
||||
name = "num-integer"
|
||||
|
|
@ -1869,12 +1894,6 @@ version = "0.2.17"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a89322df9ebe1c1578d689c92318e070967d1042b512afbe49518723f4e6d5cd"
|
||||
|
||||
[[package]]
|
||||
name = "pin-utils"
|
||||
version = "0.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184"
|
||||
|
||||
[[package]]
|
||||
name = "pkcs8"
|
||||
version = "0.10.2"
|
||||
|
|
@ -1944,9 +1963,9 @@ checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49"
|
|||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
version = "0.1.4"
|
||||
version = "0.1.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77"
|
||||
checksum = "0103b1cef7ec0cf76490e969665504990193874ea05c85ff9bab8b911d0a0564"
|
||||
dependencies = [
|
||||
"zerovec",
|
||||
]
|
||||
|
|
@ -1987,9 +2006,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "proptest"
|
||||
version = "1.10.0"
|
||||
version = "1.11.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532"
|
||||
checksum = "4b45fcc2344c680f5025fe57779faef368840d0bd1f42f216291f0dc4ace4744"
|
||||
dependencies = [
|
||||
"bit-set",
|
||||
"bit-vec",
|
||||
|
|
@ -2023,7 +2042,7 @@ dependencies = [
|
|||
"quinn-udp",
|
||||
"rustc-hash",
|
||||
"rustls",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
"tracing",
|
||||
|
|
@ -2061,7 +2080,7 @@ dependencies = [
|
|||
"cfg_aliases",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tracing",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
|
@ -2279,9 +2298,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "rustc-hash"
|
||||
version = "2.1.1"
|
||||
version = "2.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d"
|
||||
checksum = "94300abf3f1ae2e2b8ffb7b58043de3d399c73fa6f4b73826402a5c457614dbe"
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
|
|
@ -2533,9 +2552,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "1.0.4"
|
||||
version = "1.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
|
||||
checksum = "6662b5879511e06e8999a8a235d848113e942c9124f211511b16466ee2995f26"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
|
@ -2603,7 +2622,7 @@ dependencies = [
|
|||
"serde_json",
|
||||
"serde_urlencoded",
|
||||
"shadowsocks-crypto",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"spin",
|
||||
"thiserror 2.0.18",
|
||||
"tokio",
|
||||
|
|
@ -2675,16 +2694,6 @@ version = "1.15.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03"
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.5.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "socket2"
|
||||
version = "0.6.3"
|
||||
|
|
@ -2771,7 +2780,7 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
|
|||
|
||||
[[package]]
|
||||
name = "telemt"
|
||||
version = "3.3.29"
|
||||
version = "3.3.38"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"anyhow",
|
||||
|
|
@ -2812,7 +2821,7 @@ dependencies = [
|
|||
"sha1",
|
||||
"sha2",
|
||||
"shadowsocks",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"static_assertions",
|
||||
"subtle",
|
||||
"thiserror 2.0.18",
|
||||
|
|
@ -2822,6 +2831,7 @@ dependencies = [
|
|||
"tokio-util",
|
||||
"toml",
|
||||
"tracing",
|
||||
"tracing-appender",
|
||||
"tracing-subscriber",
|
||||
"url",
|
||||
"webpki-roots",
|
||||
|
|
@ -2925,9 +2935,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "tinystr"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869"
|
||||
checksum = "c8323304221c2a851516f22236c5722a72eaa19749016521d6dff0824447d96d"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"zerovec",
|
||||
|
|
@ -2970,7 +2980,7 @@ dependencies = [
|
|||
"parking_lot",
|
||||
"pin-project-lite",
|
||||
"signal-hook-registry",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio-macros",
|
||||
"tracing",
|
||||
"windows-sys 0.61.2",
|
||||
|
|
@ -3031,7 +3041,7 @@ dependencies = [
|
|||
"log",
|
||||
"once_cell",
|
||||
"pin-project",
|
||||
"socket2 0.6.3",
|
||||
"socket2",
|
||||
"tokio",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
|
@ -3055,9 +3065,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "1.0.7+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96"
|
||||
checksum = "81f3d15e84cbcd896376e6730314d59fb5a87f31e4b038454184435cd57defee"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde_core",
|
||||
|
|
@ -3070,27 +3080,27 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "1.0.1+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
|
||||
checksum = "3165f65f62e28e0115a00b2ebdd37eb6f3b641855f9d636d3cd4103767159ad7"
|
||||
dependencies = [
|
||||
"serde_core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_parser"
|
||||
version = "1.0.10+spec-1.1.0"
|
||||
version = "1.1.2+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
|
||||
checksum = "a2abe9b86193656635d2411dc43050282ca48aa31c2451210f4202550afb7526"
|
||||
dependencies = [
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_writer"
|
||||
version = "1.0.7+spec-1.1.0"
|
||||
version = "1.1.1+spec-1.1.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
|
||||
checksum = "756daf9b1013ebe47a8776667b466417e2d4c5679d441c26230efd9ef78692db"
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
|
|
@ -3148,6 +3158,18 @@ dependencies = [
|
|||
"tracing-core",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-appender"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf"
|
||||
dependencies = [
|
||||
"crossbeam-channel",
|
||||
"thiserror 2.0.18",
|
||||
"time",
|
||||
"tracing-subscriber",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tracing-attributes"
|
||||
version = "0.1.31"
|
||||
|
|
@ -3275,9 +3297,9 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be"
|
|||
|
||||
[[package]]
|
||||
name = "uuid"
|
||||
version = "1.22.0"
|
||||
version = "1.23.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a68d3c8f01c0cfa54a75291d83601161799e4a89a39e0929f4b0354d88757a37"
|
||||
checksum = "5ac8b6f42ead25368cf5b098aeb3dc8a1a2c05a3eee8a9a1a68c640edbfc79d9"
|
||||
dependencies = [
|
||||
"getrandom 0.4.2",
|
||||
"js-sys",
|
||||
|
|
@ -3350,9 +3372,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6532f9a5c1ece3798cb1c2cfdba640b9b3ba884f5db45973a6f442510a87d38e"
|
||||
checksum = "0551fc1bb415591e3372d0bc4780db7e587d84e2a7e79da121051c5c4b89d0b0"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"once_cell",
|
||||
|
|
@ -3363,23 +3385,19 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-futures"
|
||||
version = "0.4.64"
|
||||
version = "0.4.67"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e9c5522b3a28661442748e09d40924dfb9ca614b21c00d3fd135720e48b67db8"
|
||||
checksum = "03623de6905b7206edd0a75f69f747f134b7f0a2323392d664448bf2d3c5d87e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"futures-util",
|
||||
"js-sys",
|
||||
"once_cell",
|
||||
"wasm-bindgen",
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "18a2d50fcf105fb33bb15f00e7a77b772945a2ee45dcf454961fd843e74c18e6"
|
||||
checksum = "7fbdf9a35adf44786aecd5ff89b4563a90325f9da0923236f6104e603c7e86be"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"wasm-bindgen-macro-support",
|
||||
|
|
@ -3387,9 +3405,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-macro-support"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03ce4caeaac547cdf713d280eda22a730824dd11e6b8c3ca9e42247b25c631e3"
|
||||
checksum = "dca9693ef2bab6d4e6707234500350d8dad079eb508dca05530c85dc3a529ff2"
|
||||
dependencies = [
|
||||
"bumpalo",
|
||||
"proc-macro2",
|
||||
|
|
@ -3400,9 +3418,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "wasm-bindgen-shared"
|
||||
version = "0.2.114"
|
||||
version = "0.2.117"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75a326b8c223ee17883a4251907455a2431acc2791c98c26279376490c378c16"
|
||||
checksum = "39129a682a6d2d841b6c429d0c51e5cb0ed1a03829d8b3d1e69a011e62cb3d3b"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
|
|
@ -3443,9 +3461,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.91"
|
||||
version = "0.3.94"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "854ba17bb104abfb26ba36da9729addc7ce7f06f5c0f90f3c391f8461cca21f9"
|
||||
checksum = "cd70027e39b12f0849461e08ffc50b9cd7688d942c1c8e3c7b22273236b4dd0a"
|
||||
dependencies = [
|
||||
"js-sys",
|
||||
"wasm-bindgen",
|
||||
|
|
@ -3557,6 +3575,17 @@ version = "0.2.1"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5"
|
||||
|
||||
[[package]]
|
||||
name = "windows-registry"
|
||||
version = "0.6.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "02752bf7fbdcce7f2a27a742f798510f3e5ad88dbe84871e5168e2120c3d5720"
|
||||
dependencies = [
|
||||
"windows-link",
|
||||
"windows-result",
|
||||
"windows-strings",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-result"
|
||||
version = "0.4.1"
|
||||
|
|
@ -3584,15 +3613,6 @@ dependencies = [
|
|||
"windows-targets 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.48.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9"
|
||||
dependencies = [
|
||||
"windows-targets 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-sys"
|
||||
version = "0.52.0"
|
||||
|
|
@ -3635,21 +3655,6 @@ dependencies = [
|
|||
"windows_x86_64_msvc 0.42.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.48.5",
|
||||
"windows_aarch64_msvc 0.48.5",
|
||||
"windows_i686_gnu 0.48.5",
|
||||
"windows_i686_msvc 0.48.5",
|
||||
"windows_x86_64_gnu 0.48.5",
|
||||
"windows_x86_64_gnullvm 0.48.5",
|
||||
"windows_x86_64_msvc 0.48.5",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3689,12 +3694,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3713,12 +3712,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3737,12 +3730,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3773,12 +3760,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3797,12 +3778,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3821,12 +3796,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3845,12 +3814,6 @@ version = "0.42.2"
|
|||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.48.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.52.6"
|
||||
|
|
@ -3865,19 +3828,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
|
|||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "1.0.0"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
|
||||
|
||||
[[package]]
|
||||
name = "winreg"
|
||||
version = "0.50.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-sys 0.48.0",
|
||||
]
|
||||
checksum = "09dac053f1cd375980747450bfc7250c264eaae0583872e845c0c7cd578872b5"
|
||||
|
||||
[[package]]
|
||||
name = "wit-bindgen"
|
||||
|
|
@ -4004,9 +3957,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "yoke"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954"
|
||||
checksum = "abe8c5fda708d9ca3df187cae8bfb9ceda00dd96231bed36e445a1a48e66f9ca"
|
||||
dependencies = [
|
||||
"stable_deref_trait",
|
||||
"yoke-derive",
|
||||
|
|
@ -4015,9 +3968,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "yoke-derive"
|
||||
version = "0.8.1"
|
||||
version = "0.8.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
|
||||
checksum = "de844c262c8848816172cef550288e7dc6c7b7814b4ee56b3e1553f275f1858e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4027,18 +3980,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerocopy"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efbb2a062be311f2ba113ce66f697a4dc589f85e78a4aea276200804cea0ed87"
|
||||
checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9"
|
||||
dependencies = [
|
||||
"zerocopy-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerocopy-derive"
|
||||
version = "0.8.47"
|
||||
version = "0.8.48"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
|
||||
checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4047,18 +4000,18 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerofrom"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5"
|
||||
checksum = "69faa1f2a1ea75661980b013019ed6687ed0e83d069bc1114e2cc74c6c04c4df"
|
||||
dependencies = [
|
||||
"zerofrom-derive",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "zerofrom-derive"
|
||||
version = "0.1.6"
|
||||
version = "0.1.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
|
||||
checksum = "11532158c46691caf0f2593ea8358fed6bbf68a0315e80aae9bd41fbade684a1"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
@ -4088,9 +4041,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerotrie"
|
||||
version = "0.2.3"
|
||||
version = "0.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851"
|
||||
checksum = "0f9152d31db0792fa83f70fb2f83148effb5c1f5b8c7686c3459e361d9bc20bf"
|
||||
dependencies = [
|
||||
"displaydoc",
|
||||
"yoke",
|
||||
|
|
@ -4099,9 +4052,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerovec"
|
||||
version = "0.11.5"
|
||||
version = "0.11.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002"
|
||||
checksum = "90f911cbc359ab6af17377d242225f4d75119aec87ea711a880987b18cd7b239"
|
||||
dependencies = [
|
||||
"yoke",
|
||||
"zerofrom",
|
||||
|
|
@ -4110,9 +4063,9 @@ dependencies = [
|
|||
|
||||
[[package]]
|
||||
name = "zerovec-derive"
|
||||
version = "0.11.2"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
|
||||
checksum = "625dc425cab0dca6dc3c3319506e6593dcb08a9f387ea3b284dbd52a92c40555"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
|
|
|
|||
28
Cargo.toml
28
Cargo.toml
|
|
@ -1,8 +1,11 @@
|
|||
[package]
|
||||
name = "telemt"
|
||||
version = "3.3.29"
|
||||
version = "3.3.38"
|
||||
edition = "2024"
|
||||
|
||||
[features]
|
||||
redteam_offline_expected_fail = []
|
||||
|
||||
[dependencies]
|
||||
# C
|
||||
libc = "0.2"
|
||||
|
|
@ -27,7 +30,13 @@ static_assertions = "1.1"
|
|||
|
||||
# Network
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
|
||||
nix = { version = "0.31", default-features = false, features = [
|
||||
"net",
|
||||
"user",
|
||||
"process",
|
||||
"fs",
|
||||
"signal",
|
||||
] }
|
||||
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
|
||||
|
||||
# Serialization
|
||||
|
|
@ -41,6 +50,7 @@ bytes = "1.9"
|
|||
thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
tracing-appender = "0.2"
|
||||
parking_lot = "0.12"
|
||||
dashmap = "6.1"
|
||||
arc-swap = "1.7"
|
||||
|
|
@ -65,8 +75,14 @@ hyper = { version = "1", features = ["server", "http1"] }
|
|||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = [
|
||||
"tls12",
|
||||
] }
|
||||
rustls = { version = "0.23", default-features = false, features = [
|
||||
"std",
|
||||
"tls12",
|
||||
"ring",
|
||||
] }
|
||||
webpki-roots = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
|
|
@ -80,4 +96,6 @@ name = "crypto_bench"
|
|||
harness = false
|
||||
|
||||
[profile.release]
|
||||
lto = "thin"
|
||||
lto = "fat"
|
||||
codegen-units = 1
|
||||
|
||||
|
|
|
|||
123
Dockerfile
123
Dockerfile
|
|
@ -1,97 +1,98 @@
|
|||
# syntax=docker/dockerfile:1
|
||||
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
FROM rust:1.88-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Depcache
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
# Build
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
ARG TELEMT_REPOSITORY=telemt/telemt
|
||||
ARG TELEMT_VERSION=latest
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Compress (strip + UPX)
|
||||
# Minimal Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS minimal
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
upx \
|
||||
binutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
ARG TARGETARCH
|
||||
ARG TELEMT_REPOSITORY
|
||||
ARG TELEMT_VERSION
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /telemt
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
binutils \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tar; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN strip /telemt || true
|
||||
RUN upx --best --lzma /telemt || true
|
||||
RUN set -eux; \
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ASSET="telemt-x86_64-linux-musl.tar.gz" ;; \
|
||||
arm64) ASSET="telemt-aarch64-linux-musl.tar.gz" ;; \
|
||||
*) echo "Unsupported TARGETARCH: ${TARGETARCH}" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
VERSION="${TELEMT_VERSION#refs/tags/}"; \
|
||||
if [ -z "${VERSION}" ] || [ "${VERSION}" = "latest" ]; then \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/latest/download"; \
|
||||
else \
|
||||
BASE_URL="https://github.com/${TELEMT_REPOSITORY}/releases/download/${VERSION}"; \
|
||||
fi; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}" \
|
||||
"${BASE_URL}/${ASSET}"; \
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "/tmp/${ASSET}.sha256" \
|
||||
"${BASE_URL}/${ASSET}.sha256"; \
|
||||
cd /tmp; \
|
||||
sha256sum -c "${ASSET}.sha256"; \
|
||||
tar -xzf "${ASSET}" -C /tmp; \
|
||||
test -f /tmp/telemt; \
|
||||
install -m 0755 /tmp/telemt /telemt; \
|
||||
strip --strip-unneeded /telemt || true; \
|
||||
rm -f "/tmp/${ASSET}" "/tmp/${ASSET}.sha256" /tmp/telemt
|
||||
|
||||
# ==========================
|
||||
# Stage 3: Debug base
|
||||
# Debug Image
|
||||
# ==========================
|
||||
FROM debian:12-slim AS debug-base
|
||||
FROM debian:12-slim AS debug
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# ==========================
|
||||
# Stage 4: Debug image
|
||||
# ==========================
|
||||
FROM debug-base AS debug
|
||||
RUN set -eux; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
USER root
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
# ==========================
|
||||
# Stage 5: Production (distroless)
|
||||
# Production Distroless on MUSL
|
||||
# ==========================
|
||||
FROM gcr.io/distroless/base-debian12 AS prod
|
||||
FROM gcr.io/distroless/static-debian12 AS prod
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
# TLS + timezone + shell
|
||||
COPY --from=debug-base /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=debug-base /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
COPY --from=debug-base /bin/busybox /bin/busybox
|
||||
|
||||
RUN ["/bin/busybox", "--install", "-s", "/bin"]
|
||||
|
||||
# distroless user
|
||||
USER nonroot:nonroot
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
EXPOSE 443 9090 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
67
README.md
67
README.md
|
|
@ -2,6 +2,11 @@
|
|||
|
||||
***Löst Probleme, bevor andere überhaupt wissen, dass sie existieren*** / ***It solves problems before others even realize they exist***
|
||||
|
||||
### [**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
#### Fixed TLS ClientHello is now available in Telegram Desktop starting from version 6.7.2: to work with EE-MTProxy, please update your client;
|
||||
#### Fixed TLS ClientHello for Telegram Android Client is available in [our chat](https://t.me/telemtrs/30234/36441); official releases for Android and iOS are "work in progress";
|
||||
|
||||
|
||||
**Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as:
|
||||
- [ME Pool + Reader/Writer + Registry + Refill + Adaptive Floor + Trio-State + Generation Lifecycle](https://github.com/telemt/telemt/blob/main/docs/model/MODEL.en.md)
|
||||
- [Full-covered API w/ management](https://github.com/telemt/telemt/blob/main/docs/API.md)
|
||||
|
|
@ -9,60 +14,6 @@
|
|||
- Prometheus-format Metrics
|
||||
- TLS-Fronting and TCP-Splicing for masking from "prying" eyes
|
||||
|
||||
[**Telemt Chat in Telegram**](https://t.me/telemtrs)
|
||||
|
||||
## NEWS and EMERGENCY
|
||||
### ✈️ Telemt 3 is released!
|
||||
<table>
|
||||
<tr>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### О релизах
|
||||
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||
|
||||
---
|
||||
|
||||
Если у вас есть компетенции в:
|
||||
|
||||
- Асинхронных сетевых приложениях
|
||||
- Анализе трафика
|
||||
- Реверс-инжиниринге
|
||||
- Сетевых расследованиях
|
||||
|
||||
Мы открыты к архитектурным предложениям, идеям и pull requests
|
||||
</td>
|
||||
<td width="50%" valign="top">
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### About releases
|
||||
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||
|
||||
---
|
||||
|
||||
If you have expertise in:
|
||||
|
||||
- Asynchronous network applications
|
||||
- Traffic analysis
|
||||
- Reverse engineering
|
||||
- Network forensics
|
||||
|
||||
We welcome ideas, architectural feedback, and pull requests.
|
||||
</td>
|
||||
</tr>
|
||||
</table>
|
||||
|
||||
# Features
|
||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||
|
||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||
|
||||
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||
|
|
@ -103,8 +54,12 @@ We welcome ideas, architectural feedback, and pull requests.
|
|||
- [FAQ EN](docs/FAQ.en.md)
|
||||
|
||||
### Recognizability for DPI and crawler
|
||||
Since version 1.1.0.0, we have debugged masking perfectly: for all clients without "presenting" a key,
|
||||
we transparently direct traffic to the target host!
|
||||
|
||||
On April 1, 2026, we became aware of a method for detecting MTProxy Fake-TLS,
|
||||
based on the ECH extension and the ordering of cipher suites,
|
||||
as well as an overall unique JA3/JA4 fingerprint
|
||||
that does not occur in modern browsers:
|
||||
we have already submitted initial changes to the Telegram Desktop developers and are working on updates for other clients.
|
||||
|
||||
- We consider this a breakthrough aspect, which has no stable analogues today
|
||||
- Based on this: if `telemt` configured correctly, **TLS mode is completely identical to real-life handshake + communication** with a specified host
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -1,107 +1,122 @@
|
|||
## How to set up "proxy sponsor" channel and statistics via @MTProxybot bot
|
||||
## How to set up a "proxy sponsor" channel and statistics via the @MTProxybot
|
||||
|
||||
1. Go to @MTProxybot bot.
|
||||
2. Enter the command `/newproxy`
|
||||
3. Send the server IP and port. For example: 1.2.3.4:443
|
||||
4. Open the config `nano /etc/telemt.toml`.
|
||||
5. Copy and send the user secret from the [access.users] section to the bot.
|
||||
6. Copy the tag received from the bot. For example 1234567890abcdef1234567890abcdef.
|
||||
1. Go to the @MTProxybot.
|
||||
2. Enter the `/newproxy` command.
|
||||
3. Send your server's IP address and port. For example: `1.2.3.4:443`.
|
||||
4. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
5. Copy and send the user secret from the `[access.users]` section to the bot.
|
||||
6. Copy the tag provided by the bot. For example: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> The link provided by the bot will not work. Do not copy or use it!
|
||||
7. Uncomment the ad_tag parameter and enter the tag received from the bot.
|
||||
8. Uncomment/add the parameter `use_middle_proxy = true`.
|
||||
7. Uncomment the `ad_tag` parameter and enter the tag received from the bot.
|
||||
8. Uncomment or add the `use_middle_proxy = true` parameter.
|
||||
|
||||
Config example:
|
||||
Configuration example:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Save the config. Ctrl+S -> Ctrl+X.
|
||||
10. Restart telemt `systemctl restart telemt`.
|
||||
11. In the bot, send the command /myproxies and select the added server.
|
||||
9. Save the changes (in nano: Ctrl+S -> Ctrl+X).
|
||||
10. Restart the telemt service: `systemctl restart telemt`.
|
||||
11. Send the `/myproxies` command to the bot and select the added server.
|
||||
12. Click the "Set promotion" button.
|
||||
13. Send a **public link** to the channel. Private channels cannot be added!
|
||||
14. Wait approximately 1 hour for the information to update on Telegram servers.
|
||||
14. Wait for about 1 hour for the information to update on Telegram servers.
|
||||
> [!WARNING]
|
||||
> You will not see the "proxy sponsor" if you are already subscribed to the channel.
|
||||
> The sponsored channel will not be displayed to you if you are already subscribed to it.
|
||||
|
||||
**You can also set up different channels for different users.**
|
||||
**You can also configure different sponsored channels for different users:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## How many people can use 1 link
|
||||
## Why do you need a middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
By default, 1 link can be used by any number of people.
|
||||
You can limit the number of IPs using the proxy.
|
||||
|
||||
## How many people can use one link
|
||||
|
||||
By default, an unlimited number of people can use a single link.
|
||||
However, you can limit the number of unique IP addresses for each user:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
This parameter limits how many unique IPs can use 1 link simultaneously. If one user disconnects, a second user can connect. Also, multiple users can sit behind the same IP.
|
||||
This parameter sets the maximum number of unique IP addresses from which a single link can be used simultaneously. If the first user disconnects, a second one can connect. At the same time, multiple users can connect from a single IP address simultaneously (for example, devices on the same Wi-Fi network).
|
||||
|
||||
## How to create multiple different links
|
||||
|
||||
1. Generate the required number of secrets `openssl rand -hex 16`
|
||||
2. Open the config `nano /etc/telemt.toml`
|
||||
3. Add new users.
|
||||
1. Generate the required number of secrets using the command: `openssl rand -hex 16`.
|
||||
2. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
3. Add new users to the `[access.users]` section:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||
5. Get the links via
|
||||
4. Save the configuration (Ctrl+S -> Ctrl+X). There is no need to restart the telemt service.
|
||||
5. Get the ready-to-use links using the command:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## "Unknown TLS SNI" error
|
||||
Usually, this error occurs if you have changed the `tls_domain` parameter, but users continue to connect using old links with the previous domain.
|
||||
|
||||
If you need to allow connections with any domains (ignoring SNI mismatches), add the following parameters:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
## How to view metrics
|
||||
|
||||
1. Open the config `nano /etc/telemt.toml`
|
||||
2. Add the following parameters
|
||||
1. Open the configuration file: `nano /etc/telemt/telemt.toml`.
|
||||
2. Add the following parameters:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Save the config. Ctrl+S -> Ctrl+X.
|
||||
4. Metrics are available at SERVER_IP:9090/metrics.
|
||||
3. Save the changes (Ctrl+S -> Ctrl+X).
|
||||
4. After that, metrics will be available at: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" in metrics_whitelist opens access from any IP. Replace with your own IP. For example "1.2.3.4"
|
||||
> The value `"0.0.0.0/0"` in `metrics_whitelist` opens access to metrics from any IP address. It is recommended to replace it with your personal IP, for example: `"1.2.3.4/32"`.
|
||||
|
||||
## Additional parameters
|
||||
|
||||
### Domain in link instead of IP
|
||||
To specify a domain in the links, add to the `[general.links]` section of the config file.
|
||||
### Domain in the link instead of IP
|
||||
To display a domain instead of an IP address in the connection links, add the following lines to the configuration file:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Server connection limit
|
||||
Limits the total number of open connections to the server:
|
||||
### Total server connection limit
|
||||
This parameter limits the total number of active connections to the server:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||
#### Binding to IP
|
||||
To configure outbound connections (upstreams), add the corresponding parameters to the `[[upstreams]]` section of the configuration file:
|
||||
|
||||
#### Binding to an outbound IP address
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
interface = "192.168.1.100" # Replace with your outbound IP
|
||||
```
|
||||
#### SOCKS4/5 as Upstream
|
||||
- Without authentication:
|
||||
|
||||
#### Using SOCKS4/5 as an Upstream
|
||||
- Without authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
|
|
@ -110,7 +125,7 @@ weight = 1 # Set Weight for Scenarios
|
|||
enabled = true
|
||||
```
|
||||
|
||||
- With authentication:
|
||||
- With authorization:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "socks5" # Specify SOCKS4 or SOCKS5
|
||||
|
|
@ -121,8 +136,8 @@ weight = 1 # Set Weight for Scenarios
|
|||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks as Upstream
|
||||
Requires `use_middle_proxy = false`.
|
||||
#### Using Shadowsocks as an Upstream
|
||||
For this method to work, the `use_middle_proxy = false` parameter must be set.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
|
|
|
|||
|
|
@ -1,106 +1,121 @@
|
|||
## Как настроить канал "спонсор прокси" и статистику через бота @MTProxybot
|
||||
|
||||
1. Зайти в бота @MTProxybot.
|
||||
2. Ввести команду `/newproxy`
|
||||
3. Отправить IP и порт сервера. Например: 1.2.3.4:443
|
||||
4. Открыть конфиг `nano /etc/telemt.toml`.
|
||||
5. Скопировать и отправить боту секрет пользователя из раздела [access.users].
|
||||
6. Скопировать полученный tag у бота. Например 1234567890abcdef1234567890abcdef.
|
||||
1. Зайдите в бота @MTProxybot.
|
||||
2. Введите команду `/newproxy`.
|
||||
3. Отправьте IP-адрес и порт сервера. Например: `1.2.3.4:443`.
|
||||
4. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
5. Скопируйте и отправьте боту секрет пользователя из раздела `[access.users]`.
|
||||
6. Скопируйте тег (tag), который выдаст бот. Например: `1234567890abcdef1234567890abcdef`.
|
||||
> [!WARNING]
|
||||
> Ссылка, которую выдает бот, не будет работать. Не копируйте и не используйте её!
|
||||
7. Раскомментировать параметр ad_tag и вписать tag, полученный у бота.
|
||||
8. Раскомментировать/добавить параметр use_middle_proxy = true.
|
||||
> Ссылка, которую выдает бот, работать не будет. Не копируйте и не используйте её!
|
||||
7. Раскомментируйте параметр `ad_tag` и впишите тег, полученный от бота.
|
||||
8. Раскомментируйте или добавьте параметр `use_middle_proxy = true`.
|
||||
|
||||
Пример конфига:
|
||||
Пример конфигурации:
|
||||
```toml
|
||||
[general]
|
||||
ad_tag = "1234567890abcdef1234567890abcdef"
|
||||
use_middle_proxy = true
|
||||
```
|
||||
9. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
10. Перезапустить telemt `systemctl restart telemt`.
|
||||
11. В боте отправить команду /myproxies и выбрать добавленный сервер.
|
||||
12. Нажать кнопку "Set promotion".
|
||||
13. Отправить **публичную ссылку** на канал. Приватный канал добавить нельзя!
|
||||
14. Подождать примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
9. Сохраните изменения (в nano: Ctrl+S -> Ctrl+X).
|
||||
10. Перезапустите службу telemt: `systemctl restart telemt`.
|
||||
11. В боте отправьте команду `/myproxies` и выберите добавленный сервер.
|
||||
12. Нажмите кнопку «Set promotion».
|
||||
13. Отправьте **публичную ссылку** на канал. Приватные каналы добавлять нельзя!
|
||||
14. Подождите примерно 1 час, пока информация обновится на серверах Telegram.
|
||||
> [!WARNING]
|
||||
> У вас не будет отображаться "спонсор прокси" если вы уже подписаны на канал.
|
||||
> Спонсорский канал не будет у вас отображаться, если вы уже на него подписаны.
|
||||
|
||||
**Также вы можете настроить разные каналы для разных пользователей.**
|
||||
**Вы также можете настроить разные спонсорские каналы для разных пользователей:**
|
||||
```toml
|
||||
[access.user_ad_tags]
|
||||
hello = "ad_tag"
|
||||
hello2 = "ad_tag2"
|
||||
```
|
||||
|
||||
## Сколько человек может пользоваться 1 ссылкой
|
||||
## Зачем нужен middle proxy (ME)
|
||||
https://github.com/telemt/telemt/discussions/167
|
||||
|
||||
По умолчанию 1 ссылкой может пользоваться сколько угодно человек.
|
||||
Вы можете ограничить число IP, использующих прокси.
|
||||
|
||||
## Сколько человек может пользоваться одной ссылкой
|
||||
|
||||
По умолчанию одной ссылкой может пользоваться неограниченное число людей.
|
||||
Однако вы можете ограничить количество уникальных IP-адресов для каждого пользователя:
|
||||
```toml
|
||||
[access.user_max_unique_ips]
|
||||
hello = 1
|
||||
```
|
||||
Этот параметр ограничивает, сколько уникальных IP может использовать 1 ссылку одновременно. Если один пользователь отключится, второй сможет подключиться. Также с одного IP может сидеть несколько пользователей.
|
||||
Этот параметр задает максимальное количество уникальных IP-адресов, с которых можно одновременно использовать одну ссылку. Если первый пользователь отключится, второй сможет подключиться. При этом с одного IP-адреса могут подключаться несколько пользователей одновременно (например, устройства в одной Wi-Fi сети).
|
||||
|
||||
## Как сделать несколько разных ссылок
|
||||
## Как создать несколько разных ссылок
|
||||
|
||||
1. Сгенерируйте нужное число секретов `openssl rand -hex 16`
|
||||
2. Открыть конфиг `nano /etc/telemt.toml`
|
||||
3. Добавить новых пользователей.
|
||||
1. Сгенерируйте необходимое количество секретов с помощью команды: `openssl rand -hex 16`.
|
||||
2. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
3. Добавьте новых пользователей в секцию `[access.users]`:
|
||||
```toml
|
||||
[access.users]
|
||||
user1 = "00000000000000000000000000000001"
|
||||
user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через
|
||||
4. Сохраните конфигурацию (Ctrl+S -> Ctrl+X). Перезапускать службу telemt не нужно.
|
||||
5. Получите готовые ссылки с помощью команды:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## Ошибка "Unknown TLS SNI"
|
||||
Обычно эта ошибка возникает, если вы изменили параметр `tls_domain`, но пользователи продолжают подключаться по старым ссылкам с прежним доменом.
|
||||
|
||||
Если необходимо разрешить подключение с любыми доменами (игнорируя несовпадения SNI), добавьте следующие параметры:
|
||||
```toml
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
```
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
1. Открыть конфиг `nano /etc/telemt.toml`
|
||||
2. Добавить следующие параметры
|
||||
1. Откройте файл конфигурации: `nano /etc/telemt/telemt.toml`.
|
||||
2. Добавьте следующие параметры:
|
||||
```toml
|
||||
[server]
|
||||
metrics_port = 9090
|
||||
metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
```
|
||||
3. Сохранить конфиг. Ctrl+S -> Ctrl+X.
|
||||
4. Метрики доступны по адресу SERVER_IP:9090/metrics.
|
||||
3. Сохраните изменения (Ctrl+S -> Ctrl+X).
|
||||
4. После этого метрики будут доступны по адресу: `SERVER_IP:9090/metrics`.
|
||||
> [!WARNING]
|
||||
> "0.0.0.0/0" в metrics_whitelist открывает доступ с любого IP. Замените на свой ip. Например "1.2.3.4"
|
||||
> Значение `"0.0.0.0/0"` в `metrics_whitelist` открывает доступ к метрикам с любого IP-адреса. Рекомендуется заменить его на ваш личный IP, например: `"1.2.3.4/32"`.
|
||||
|
||||
## Дополнительные параметры
|
||||
|
||||
### Домен в ссылке вместо IP
|
||||
Чтобы указать домен в ссылках, добавьте в секцию `[general.links]` файла config.
|
||||
Чтобы в ссылках для подключения отображался домен вместо IP-адреса, добавьте следующие строки в файл конфигурации:
|
||||
```toml
|
||||
[general.links]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Общий лимит подключений к серверу
|
||||
Ограничивает общее число открытых подключений к серверу:
|
||||
Этот параметр ограничивает общее количество активных подключений к серверу:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
max_connections = 10000 # 0 - без ограничений, 10000 - по умолчанию
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||
#### Привязка к IP
|
||||
Для настройки исходящих подключений (апстримов) добавьте соответствующие параметры в секцию `[[upstreams]]` файла конфигурации:
|
||||
|
||||
#### Привязка к исходящему IP-адресу
|
||||
```toml
|
||||
[[upstreams]]
|
||||
type = "direct"
|
||||
weight = 1
|
||||
enabled = true
|
||||
interface = "192.168.1.100" # Change to your outgoing IP
|
||||
interface = "192.168.1.100" # Замените на ваш исходящий IP
|
||||
```
|
||||
#### SOCKS4/5 как Upstream
|
||||
|
||||
#### Использование SOCKS4/5 в качестве Upstream
|
||||
- Без авторизации:
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
|
@ -121,8 +136,8 @@ weight = 1 # Set Weight for Scenarios
|
|||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks как Upstream
|
||||
Требует `use_middle_proxy = false`.
|
||||
#### Использование Shadowsocks в качестве Upstream
|
||||
Для работы этого метода требуется установить параметр `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
|
|
|
|||
|
|
@ -27,12 +27,12 @@ chmod +x /bin/telemt
|
|||
|
||||
**0. Check port and generate secrets**
|
||||
|
||||
The port you have selected for use should be MISSING from the list, when:
|
||||
The port you have selected for use should not be in the list:
|
||||
```bash
|
||||
netstat -lnp
|
||||
```
|
||||
|
||||
Generate 16 bytes/32 characters HEX with OpenSSL or another way:
|
||||
Generate 16 bytes/32 characters in HEX format with OpenSSL or another way:
|
||||
```bash
|
||||
openssl rand -hex 16
|
||||
```
|
||||
|
|
@ -50,7 +50,7 @@ Save the obtained result somewhere. You will need it later!
|
|||
|
||||
**1. Place your config to /etc/telemt/telemt.toml**
|
||||
|
||||
Create config directory:
|
||||
Create the config directory:
|
||||
```bash
|
||||
mkdir /etc/telemt
|
||||
```
|
||||
|
|
@ -59,7 +59,7 @@ Open nano
|
|||
```bash
|
||||
nano /etc/telemt/telemt.toml
|
||||
```
|
||||
paste your config
|
||||
Insert your configuration:
|
||||
|
||||
```toml
|
||||
# === General Settings ===
|
||||
|
|
@ -93,8 +93,9 @@ hello = "00000000000000000000000000000000"
|
|||
then Ctrl+S -> Ctrl+X to save
|
||||
|
||||
> [!WARNING]
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Replace the value of the tls_domain parameter with another website.
|
||||
> Replace the value of the hello parameter with the value you obtained in step 0.
|
||||
> Additionally, change the value of the tls_domain parameter to a different website.
|
||||
> Changing the tls_domain parameter will break all links that use the old domain!
|
||||
|
||||
---
|
||||
|
||||
|
|
@ -105,14 +106,14 @@ useradd -d /opt/telemt -m -r -U telemt
|
|||
chown -R telemt:telemt /etc/telemt
|
||||
```
|
||||
|
||||
**3. Create service on /etc/systemd/system/telemt.service**
|
||||
**3. Create service in /etc/systemd/system/telemt.service**
|
||||
|
||||
Open nano
|
||||
```bash
|
||||
nano /etc/systemd/system/telemt.service
|
||||
```
|
||||
|
||||
paste this Systemd Module
|
||||
Insert this Systemd module:
|
||||
```bash
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
|
|
@ -147,13 +148,16 @@ systemctl daemon-reload
|
|||
|
||||
**6.** For automatic startup at system boot, enter `systemctl enable telemt`
|
||||
|
||||
**7.** To get the link(s), enter
|
||||
**7.** To get the link(s), enter:
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
> Any number of people can use one link.
|
||||
|
||||
> [!WARNING]
|
||||
> Only the command from step 7 can provide a working link. Do not try to create it yourself or copy it from anywhere if you are not sure what you are doing!
|
||||
|
||||
---
|
||||
|
||||
# Telemt via Docker Compose
|
||||
|
|
|
|||
|
|
@ -95,6 +95,7 @@ hello = "00000000000000000000000000000000"
|
|||
> [!WARNING]
|
||||
> Замените значение параметра hello на значение, которое вы получили в пункте 0.
|
||||
> Так же замените значение параметра tls_domain на другой сайт.
|
||||
> Изменение параметра tls_domain сделает нерабочими все ссылки, использующие старый домен!
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
|
|
@ -0,0 +1,287 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Concept
|
||||
- **Server A** (__conditionally Russian Federation_):\
|
||||
Entry point, receives Telegram proxy user traffic via **HAProxy** (port `443`)\
|
||||
and sends it to the tunnel to Server **B**.\
|
||||
Internal IP in the tunnel — `10.10.10.2`\
|
||||
Port for HAProxy clients — `443\tcp`
|
||||
- **Server B** (_conditionally Netherlands_):\
|
||||
Exit point, runs **telemt** and accepts client connections through Server **A**.\
|
||||
The server must have unrestricted access to Telegram servers.\
|
||||
Internal IP in the tunnel — `10.10.10.1`\
|
||||
AmneziaWG port — `8443\udp`\
|
||||
Port for telemt clients — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Step 1. Setting up the AmneziaWG tunnel (A <-> B)
|
||||
[AmneziaWG](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module) must be installed on all servers.\
|
||||
All following commands are given for **Ubuntu 24.04**.\
|
||||
For RHEL-based distributions, installation instructions are available at the link above.
|
||||
|
||||
### Installing AmneziaWG (Servers A and B)
|
||||
The following steps must be performed on each server:
|
||||
|
||||
#### 1. Adding the AmneziaWG repository and installing required packages:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Generating a unique key pair:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
|
||||
As a result, you will get two files in the `/etc/amnezia/amneziawg` folder:\
|
||||
`private.key` - private, and\
|
||||
`public.key` - public server keys
|
||||
|
||||
#### 3. Configuring network interfaces:
|
||||
Obfuscation parameters `S1`, `S2`, `H1`, `H2`, `H3`, `H4` must be strictly identical on both servers.\
|
||||
Parameters `Jc`, `Jmin` and `Jmax` can differ.\
|
||||
Parameters `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) must be specified on the client side (Server **A**).
|
||||
|
||||
Recommendations for choosing values:
|
||||
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; from 4 to 12 inclusive
|
||||
Jmin — Jmax > Jmin < 1280*; recommended 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; recommended 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
recommended range from 15 to 150 inclusive
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
recommended range from 15 to 150 inclusive
|
||||
H1/H2/H3/H4 — must be unique and differ from each other;
|
||||
recommended range from 5 to 2147483647 inclusive
|
||||
|
||||
* It is assumed that the Internet connection has an MTU of 1280.
|
||||
```
|
||||
|
||||
> [!IMPORTANT]
|
||||
> It is recommended to use your own, unique values.\
|
||||
> You can use the [generator](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html) to select parameters.
|
||||
|
||||
#### Server B Configuration (Netherlands):
|
||||
|
||||
Create the interface configuration file (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
`ListenPort` - the port on which the server will wait for connections, you can choose any free one.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - the content of the `private.key` file from Server **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - the content of the `public.key` file from Server **A**.
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - the external IP address of Server **A**.
|
||||
|
||||
#### Server A Configuration (Russian Federation):
|
||||
Create the interface configuration file (awg0)
|
||||
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
File content
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - the content of the `private.key` file from Server **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - the content of the `public.key` file from Server **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - the public IP address of Server **B**.
|
||||
|
||||
Enable the tunnel on both servers:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Make sure Server B is accessible from Server A through the tunnel.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
```
|
||||
---
|
||||
|
||||
## Step 2. Installing telemt on Server B (conditionally Netherlands)
|
||||
Installation and configuration are described [here](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) or [here](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
It is assumed that telemt expects connections on port `443\tcp`.
|
||||
|
||||
In the telemt config, you must enable the `Proxy` protocol and restrict connections to it only through the tunnel.
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
Also, for correct link generation, specify the FQDN or IP address and port of Server `A`
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Open the port on the firewall (if enabled):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Step 3. Configuring HAProxy on Server A (Russian Federation)
|
||||
Since the version in the standard Ubuntu repository is relatively old, it makes sense to use the official Docker image.\
|
||||
[Instructions](https://docs.docker.com/engine/install/ubuntu/) for installing Docker on Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> By default, regular users do not have rights to use ports < 1024.
|
||||
> Attempts to run HAProxy on port 443 can lead to errors:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> There are two simple ways to bypass this restriction, choose one:
|
||||
> 1. At the OS level, change the net.ipv4.ip_unprivileged_port_start setting to allow users to use all ports:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> or
|
||||
>
|
||||
> 2. Run HAProxy as root:
|
||||
> Uncomment the `user: "root"` parameter in docker-compose.yaml.
|
||||
|
||||
#### Create a folder for HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
|
||||
#### Create the docker-compose.yaml file
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
File content
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
|
||||
#### Create the haproxy.cfg config file
|
||||
Accept connections on port 443\tcp and send them through the tunnel to Server `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
File content
|
||||
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
> [!WARNING]
|
||||
> **The file must end with an empty line, otherwise HAProxy will not start!**
|
||||
|
||||
#### Allow port 443\tcp in the firewall (if enabled)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Start the HAProxy container
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
If everything is configured correctly, you can now try connecting Telegram clients using links from the telemt log\api.
|
||||
|
|
@ -0,0 +1,291 @@
|
|||
<img src="https://gist.githubusercontent.com/avbor/1f8a128e628f47249aae6e058a57610b/raw/19013276c035e91058e0a9799ab145f8e70e3ff5/scheme.svg">
|
||||
|
||||
## Концепция
|
||||
- **Сервер A** (_РФ_):\
|
||||
Точка входа, принимает трафик пользователей Telegram-прокси через **HAProxy** (порт `443`)\
|
||||
и отправляет в туннель на Сервер **B**.\
|
||||
Внутренний IP в туннеле — `10.10.10.2`\
|
||||
Порт для клиентов HAProxy — `443\tcp`
|
||||
- **Сервер B** (_условно Нидерланды_):\
|
||||
Точка выхода, на нем работает **telemt** и принимает подключения клиентов через Сервер **A**.\
|
||||
На сервере должен быть неограниченный доступ до серверов Telegram.\
|
||||
Внутренний IP в туннеле — `10.10.10.1`\
|
||||
Порт AmneziaWG — `8443\udp`\
|
||||
Порт для клиентов telemt — `443\tcp`
|
||||
|
||||
---
|
||||
|
||||
## Шаг 1. Настройка туннеля AmneziaWG (A <-> B)
|
||||
|
||||
На всех серверах необходимо установить [amneziawg](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module).\
|
||||
Далее все команды даны для **Ununtu 24.04**.\
|
||||
Для RHEL-based дистрибутивов инструкция по установке есть по ссылке выше.
|
||||
|
||||
### Установка AmneziaWG (Сервера A и B)
|
||||
На каждом из серверов необходимо выполнить следующие шаги:
|
||||
|
||||
#### 1. Добавление репозитория AmneziaWG и установка необходимых пакетов:
|
||||
```bash
|
||||
sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
|
||||
sudo add-apt-repository ppa:amnezia/ppa && \
|
||||
sudo apt-get install -y amneziawg
|
||||
```
|
||||
|
||||
#### 2. Генерация уникальной пары ключей:
|
||||
```bash
|
||||
cd /etc/amnezia/amneziawg && \
|
||||
awg genkey | tee private.key | awg pubkey > public.key
|
||||
```
|
||||
В результате вы получите в папке `/etc/amnezia/amneziawg` два файла:\
|
||||
`private.key` - приватный и\
|
||||
`public.key` - публичный ключи сервера
|
||||
|
||||
#### 3. Настройка сетевых интерфейсов:
|
||||
|
||||
Параметры обфускации `S1`, `S2`, `H1`, `H2`, `H3`, `H4` должны быть строго идентичными на обоих серверах.\
|
||||
Параметры `Jc`, `Jmin` и `Jmax` могут отличатся.\
|
||||
Параметры `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) нужно указывать на стороне _клиента_ (Сервер **А**).
|
||||
|
||||
Рекомендации по выбору значений:
|
||||
```text
|
||||
Jc — 1 ≤ Jc ≤ 128; от 4 до 12 включительно
|
||||
Jmin — Jmax > Jmin < 1280*; рекомендовано 8
|
||||
Jmax — Jmin < Jmax ≤ 1280*; рекомендовано 80
|
||||
S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
S2 — S2 ≤ 1188* (1280* - 92 = 1188);
|
||||
рекомендованный диапазон от 15 до 150 включительно
|
||||
H1/H2/H3/H4 — должны быть уникальны и отличаться друг от друга;
|
||||
рекомендованный диапазон от 5 до 2147483647 включительно
|
||||
|
||||
* Предполагается, что подключение к Интернету имеет MTU 1280.
|
||||
```
|
||||
> [!IMPORTANT]
|
||||
> Рекомендуется использовать собственные, уникальные значения.\
|
||||
> Для выбора параметров можете воспользоваться [генератором](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/13f5517ca473b47c412b9a99407066de973732bd/awg-gen.html).
|
||||
|
||||
#### Конфигурация Сервера B (_Нидерланды_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.1/24
|
||||
ListenPort = 8443
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_B>
|
||||
SaveConfig = true
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_A>
|
||||
AllowedIPs = 10.10.10.2/32
|
||||
```
|
||||
|
||||
`ListenPort` - порт, на котором сервер будет ждать подключения, можете выбрать любой свободный.\
|
||||
`<PRIVATE_KEY_SERVER_B>` - содержимое файла `private.key` с сервера **B**.\
|
||||
`<PUBLIC_KEY_SERVER_A>` - содержимое файла `public.key` с сервера **A**.
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from <PUBLIC_IP_SERVER_A> to any port 8443 proto udp
|
||||
```
|
||||
|
||||
`<PUBLIC_IP_SERVER_A>` - внешний IP адрес Сервера **A**.
|
||||
|
||||
#### Конфигурация Сервера A (_РФ_):
|
||||
|
||||
Создаем файл конфигурации интерфейса (`awg0`)
|
||||
```bash
|
||||
nano /etc/amnezia/amneziawg/awg0.conf
|
||||
```
|
||||
|
||||
Содержимое файла
|
||||
```ini
|
||||
[Interface]
|
||||
Address = 10.10.10.2/24
|
||||
PrivateKey = <PRIVATE_KEY_SERVER_A>
|
||||
Jc = 4
|
||||
Jmin = 8
|
||||
Jmax = 80
|
||||
S1 = 29
|
||||
S2 = 15
|
||||
S3 = 18
|
||||
S4 = 0
|
||||
H1 = 2087563914
|
||||
H2 = 188817757
|
||||
H3 = 101784570
|
||||
H4 = 432174303
|
||||
I1 = <b 0xc10000000108981eba846e21f74e00>
|
||||
I2 = <b 0xc20000000108981eba846e21f74e00>
|
||||
I3 = <b 0xc30000000108981eba846e21f74e00>
|
||||
I4 = <b 0x43981eba846e21f74e>
|
||||
I5 = <b 0x43981eba846e21f74e>
|
||||
|
||||
[Peer]
|
||||
PublicKey = <PUBLIC_KEY_SERVER_B>
|
||||
Endpoint = <PUBLIC_IP_SERVER_B>:8443
|
||||
AllowedIPs = 10.10.10.1/32
|
||||
PersistentKeepalive = 25
|
||||
```
|
||||
|
||||
`<PRIVATE_KEY_SERVER_A>` - содержимое файла `private.key` с сервера **A**.\
|
||||
`<PUBLIC_KEY_SERVER_B>` - содержимое файла `public.key` с сервера **B**.\
|
||||
`<PUBLIC_IP_SERVER_B>` - публичный IP адресс сервера **B**.
|
||||
|
||||
#### Включаем туннель на обоих серверах:
|
||||
```bash
|
||||
sudo systemctl enable --now awg-quick@awg0
|
||||
```
|
||||
|
||||
Убедитесь, что с Сервера `A` доступен Сервер `B` через туннель.
|
||||
```bash
|
||||
ping 10.10.10.1
|
||||
PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
|
||||
64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
|
||||
64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
|
||||
^C
|
||||
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Шаг 2. Установка telemt на Сервере B (_условно Нидерланды_)
|
||||
|
||||
Установка и настройка описаны [здесь](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) или [здесь](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
|
||||
Подразумевается что telemt ожидает подключения на порту `443\tcp`.
|
||||
|
||||
В конфиге telemt необходимо включить протокол `Proxy` и ограничить подключения к нему только через туннель.
|
||||
|
||||
```toml
|
||||
[server]
|
||||
port = 443
|
||||
listen_addr_ipv4 = "10.10.10.1"
|
||||
proxy_protocol = true
|
||||
```
|
||||
|
||||
А также, для правильной генерации ссылок, указать FQDN или IP адрес и порт Сервера `A`
|
||||
|
||||
```toml
|
||||
[general.links]
|
||||
show = "*"
|
||||
public_host = "<FQDN_OR_IP_SERVER_A>"
|
||||
public_port = 443
|
||||
```
|
||||
|
||||
Открываем порт на фаерволе (если включен):
|
||||
```bash
|
||||
sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### Шаг 3. Настройка HAProxy на Сервере A (_РФ_)
|
||||
|
||||
Т.к. в стандартном репозитории Ubuntu версия относительно старая, имеет смысл воспользоваться официальным образом Docker.\
|
||||
[Инструкция](https://docs.docker.com/engine/install/ubuntu/) по установке Docker на Ubuntu.
|
||||
|
||||
> [!WARNING]
|
||||
> По умолчанию у обычных пользователей нет прав на использование портов < 1024.\
|
||||
> Попытки запустить HAProxy на 443 порту могут приводить к ошибкам:
|
||||
> ```
|
||||
> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
|
||||
> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
|
||||
> ```
|
||||
> Есть два простых способа обойти это ограничение, выберите что-то одно:
|
||||
> 1. На уровне ОС изменить настройку net.ipv4.ip_unprivileged_port_start, разрешив пользователям использовать все порты:
|
||||
> ```
|
||||
> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
|
||||
> ```
|
||||
> или
|
||||
>
|
||||
> 2. Запустить HAProxy под root:\
|
||||
> Раскомментируйте в docker-compose.yaml параметр `user: "root"`.
|
||||
|
||||
#### Создаем папку для HAProxy:
|
||||
```bash
|
||||
mkdir -p /opt/docker-compose/haproxy && cd $_
|
||||
```
|
||||
#### Создаем файл docker-compose.yaml
|
||||
|
||||
`nano docker-compose.yaml`
|
||||
|
||||
Содержимое файла
|
||||
```yaml
|
||||
services:
|
||||
haproxy:
|
||||
image: haproxy:latest
|
||||
container_name: haproxy
|
||||
restart: unless-stopped
|
||||
# user: "root"
|
||||
network_mode: "host"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "1m"
|
||||
max-file: "1"
|
||||
```
|
||||
#### Создаем файл конфига haproxy.cfg
|
||||
Принимаем подключения на порту 443\tcp и отправляем их через туннель на Сервер `B` 10.10.10.1:443
|
||||
|
||||
`nano haproxy.cfg`
|
||||
|
||||
Содержимое файла
|
||||
```haproxy
|
||||
global
|
||||
log stdout format raw local0
|
||||
maxconn 10000
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode tcp
|
||||
option tcplog
|
||||
option clitcpka
|
||||
option srvtcpka
|
||||
timeout connect 5s
|
||||
timeout client 2h
|
||||
timeout server 2h
|
||||
timeout check 5s
|
||||
|
||||
frontend tcp_in_443
|
||||
bind *:443
|
||||
maxconn 8000
|
||||
option tcp-smart-accept
|
||||
default_backend telemt_nodes
|
||||
|
||||
backend telemt_nodes
|
||||
option tcp-smart-connect
|
||||
server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
|
||||
|
||||
|
||||
```
|
||||
>[!WARNING]
|
||||
>**Файл должен заканчиваться пустой строкой, иначе HAProxy не запустится!**
|
||||
|
||||
#### Разрешаем порт 443\tcp в фаерволе (если включен)
|
||||
```bash
|
||||
sudo ufw allow 443/tcp
|
||||
```
|
||||
|
||||
#### Запускаем контейнер HAProxy
|
||||
```bash
|
||||
docker compose up -d
|
||||
```
|
||||
|
||||
Если все настроено верно, то теперь можно пробовать подключить клиентов Telegram с использованием ссылок из лога\api telemt.
|
||||
34
install.sh
34
install.sh
|
|
@ -20,6 +20,13 @@ TARGET_VERSION="${VERSION:-latest}"
|
|||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) ACTION="help"; shift ;;
|
||||
-d|--domain)
|
||||
if [ "$#" -lt 2 ] || [ -z "$2" ]; then
|
||||
printf '[ERROR] %s requires a domain argument.\n' "$1" >&2
|
||||
exit 1
|
||||
fi
|
||||
TLS_DOMAIN="$2"
|
||||
shift 2 ;;
|
||||
uninstall|--uninstall)
|
||||
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
|
||||
shift ;;
|
||||
|
|
@ -52,11 +59,12 @@ cleanup() {
|
|||
trap cleanup EXIT INT TERM
|
||||
|
||||
show_help() {
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge | --help ]"
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge ] [ -d <domain> ] [ --help ]"
|
||||
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
|
||||
say " install Install the latest version"
|
||||
say " uninstall Remove the binary and service (keeps config and user)"
|
||||
say " purge Remove everything including configuration, data, and user"
|
||||
say " -d, --domain Set TLS domain (default: petrovich.ru)"
|
||||
exit 0
|
||||
}
|
||||
|
||||
|
|
@ -192,7 +200,13 @@ verify_install_deps() {
|
|||
detect_arch() {
|
||||
sys_arch="$(uname -m)"
|
||||
case "$sys_arch" in
|
||||
x86_64|amd64) echo "x86_64" ;;
|
||||
x86_64|amd64)
|
||||
if [ -r /proc/cpuinfo ] && grep -q "avx2" /proc/cpuinfo 2>/dev/null && grep -q "bmi2" /proc/cpuinfo 2>/dev/null; then
|
||||
echo "x86_64-v3"
|
||||
else
|
||||
echo "x86_64"
|
||||
fi
|
||||
;;
|
||||
aarch64|arm64) echo "aarch64" ;;
|
||||
*) die "Unsupported architecture: $sys_arch" ;;
|
||||
esac
|
||||
|
|
@ -500,7 +514,21 @@ case "$ACTION" in
|
|||
die "Temp directory is invalid or was not created"
|
||||
fi
|
||||
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
if ! fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}"; then
|
||||
if [ "$ARCH" = "x86_64-v3" ]; then
|
||||
say " -> x86_64-v3 build not found, falling back to standard x86_64..."
|
||||
ARCH="x86_64"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
else
|
||||
die "Download failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Extracting archive"
|
||||
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
|
||||
|
|
|
|||
|
|
@ -37,11 +37,12 @@ mod runtime_watch;
|
|||
mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use config_store::{current_revision, load_config_from_disk, parse_if_match};
|
||||
use events::ApiEventStore;
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
ApiFailure, CreateUserRequest, DeleteUserResponse, HealthData, PatchUserRequest,
|
||||
RotateSecretRequest, SummaryData, UserActiveIps,
|
||||
};
|
||||
use runtime_edge::{
|
||||
EdgeConnectionsCacheEntry, build_runtime_connections_summary_data,
|
||||
|
|
@ -362,15 +363,33 @@ async fn handle(
|
|||
);
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users/active-ips") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let usernames: Vec<_> = cfg.access.users.keys().cloned().collect();
|
||||
let active_ips_map = shared.ip_tracker.get_active_ips_for_users(&usernames).await;
|
||||
let mut data: Vec<UserActiveIps> = active_ips_map
|
||||
.into_iter()
|
||||
.filter(|(_, ips)| !ips.is_empty())
|
||||
.map(|(username, active_ips)| UserActiveIps {
|
||||
username,
|
||||
active_ips,
|
||||
})
|
||||
.collect();
|
||||
data.sort_by(|a, b| a.username.cmp(&b.username));
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/stats/users") | ("GET", "/v1/users") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
Ok(success_response(StatusCode::OK, users, revision))
|
||||
|
|
@ -389,7 +408,7 @@ async fn handle(
|
|||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<CreateUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = create_user(body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared
|
||||
|
|
@ -398,11 +417,18 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime = runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.create.ok",
|
||||
format!("username={}", data.user.username),
|
||||
);
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::CREATED
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
Ok(success_response(status, data, revision))
|
||||
}
|
||||
_ => {
|
||||
if let Some(user) = path.strip_prefix("/v1/users/")
|
||||
|
|
@ -411,13 +437,16 @@ async fn handle(
|
|||
{
|
||||
if method == Method::GET {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let disk_cfg = load_config_from_disk(&shared.config_path).await?;
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
let users = users_from_config(
|
||||
&cfg,
|
||||
&disk_cfg,
|
||||
&shared.stats,
|
||||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
Some(runtime_cfg.as_ref()),
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) =
|
||||
|
|
@ -445,7 +474,7 @@ async fn handle(
|
|||
let body =
|
||||
read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
|
|
@ -455,10 +484,17 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.in_runtime = runtime_cfg.access.users.contains_key(&data.username);
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.patch.ok", format!("username={}", data.username));
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
let status = if data.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::DELETE {
|
||||
if api_cfg.read_only {
|
||||
|
|
@ -486,7 +522,18 @@ async fn handle(
|
|||
shared
|
||||
.runtime_events
|
||||
.record("api.user.delete.ok", format!("username={}", deleted_user));
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
let in_runtime = runtime_cfg.access.users.contains_key(&deleted_user);
|
||||
let response = DeleteUserResponse {
|
||||
username: deleted_user,
|
||||
in_runtime,
|
||||
};
|
||||
let status = if response.in_runtime {
|
||||
StatusCode::ACCEPTED
|
||||
} else {
|
||||
StatusCode::OK
|
||||
};
|
||||
return Ok(success_response(status, response, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
&& let Some(base_user) = user.strip_suffix("/rotate-secret")
|
||||
|
|
@ -514,7 +561,7 @@ async fn handle(
|
|||
&shared,
|
||||
)
|
||||
.await;
|
||||
let (data, revision) = match result {
|
||||
let (mut data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record(
|
||||
|
|
@ -524,11 +571,19 @@ async fn handle(
|
|||
return Err(error);
|
||||
}
|
||||
};
|
||||
let runtime_cfg = config_rx.borrow().clone();
|
||||
data.user.in_runtime =
|
||||
runtime_cfg.access.users.contains_key(&data.user.username);
|
||||
shared.runtime_events.record(
|
||||
"api.user.rotate_secret.ok",
|
||||
format!("username={}", base_user),
|
||||
);
|
||||
return Ok(success_response(StatusCode::OK, data, revision));
|
||||
let status = if data.user.in_runtime {
|
||||
StatusCode::OK
|
||||
} else {
|
||||
StatusCode::ACCEPTED
|
||||
};
|
||||
return Ok(success_response(status, data, revision));
|
||||
}
|
||||
if method == Method::POST {
|
||||
return Ok(error_response(
|
||||
|
|
|
|||
|
|
@ -81,10 +81,21 @@ pub(super) struct ZeroCoreData {
|
|||
pub(super) connections_total: u64,
|
||||
pub(super) connections_bad_total: u64,
|
||||
pub(super) handshake_timeouts_total: u64,
|
||||
pub(super) accept_permit_timeout_total: u64,
|
||||
pub(super) configured_users: usize,
|
||||
pub(super) telemetry_core_enabled: bool,
|
||||
pub(super) telemetry_user_enabled: bool,
|
||||
pub(super) telemetry_me_level: String,
|
||||
pub(super) conntrack_control_enabled: bool,
|
||||
pub(super) conntrack_control_available: bool,
|
||||
pub(super) conntrack_pressure_active: bool,
|
||||
pub(super) conntrack_event_queue_depth: u64,
|
||||
pub(super) conntrack_rule_apply_ok: bool,
|
||||
pub(super) conntrack_delete_attempt_total: u64,
|
||||
pub(super) conntrack_delete_success_total: u64,
|
||||
pub(super) conntrack_delete_not_found_total: u64,
|
||||
pub(super) conntrack_delete_error_total: u64,
|
||||
pub(super) conntrack_close_event_drop_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
|
|
@ -174,6 +185,24 @@ pub(super) struct ZeroMiddleProxyData {
|
|||
pub(super) route_drop_queue_full_total: u64,
|
||||
pub(super) route_drop_queue_full_base_total: u64,
|
||||
pub(super) route_drop_queue_full_high_total: u64,
|
||||
pub(super) d2c_batches_total: u64,
|
||||
pub(super) d2c_batch_frames_total: u64,
|
||||
pub(super) d2c_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_queue_drain_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_frames_total: u64,
|
||||
pub(super) d2c_flush_reason_batch_bytes_total: u64,
|
||||
pub(super) d2c_flush_reason_max_delay_total: u64,
|
||||
pub(super) d2c_flush_reason_ack_immediate_total: u64,
|
||||
pub(super) d2c_flush_reason_close_total: u64,
|
||||
pub(super) d2c_data_frames_total: u64,
|
||||
pub(super) d2c_ack_frames_total: u64,
|
||||
pub(super) d2c_payload_bytes_total: u64,
|
||||
pub(super) d2c_write_mode_coalesced_total: u64,
|
||||
pub(super) d2c_write_mode_split_total: u64,
|
||||
pub(super) d2c_quota_reject_pre_write_total: u64,
|
||||
pub(super) d2c_quota_reject_post_write_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_total: u64,
|
||||
pub(super) d2c_frame_buf_shrink_bytes_total: u64,
|
||||
pub(super) socks_kdf_strict_reject_total: u64,
|
||||
pub(super) socks_kdf_compat_fallback_total: u64,
|
||||
pub(super) endpoint_quarantine_total: u64,
|
||||
|
|
@ -410,6 +439,7 @@ pub(super) struct UserLinks {
|
|||
#[derive(Serialize)]
|
||||
pub(super) struct UserInfo {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
pub(super) user_ad_tag: Option<String>,
|
||||
pub(super) max_tcp_conns: Option<usize>,
|
||||
pub(super) expiration_rfc3339: Option<String>,
|
||||
|
|
@ -424,12 +454,24 @@ pub(super) struct UserInfo {
|
|||
pub(super) links: UserLinks,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct UserActiveIps {
|
||||
pub(super) username: String,
|
||||
pub(super) active_ips: Vec<IpAddr>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct CreateUserResponse {
|
||||
pub(super) user: UserInfo,
|
||||
pub(super) secret: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct DeleteUserResponse {
|
||||
pub(super) username: String,
|
||||
pub(super) in_runtime: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub(super) struct CreateUserRequest {
|
||||
pub(super) username: String,
|
||||
|
|
|
|||
|
|
@ -39,10 +39,21 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
|||
connections_total: stats.get_connects_all(),
|
||||
connections_bad_total: stats.get_connects_bad(),
|
||||
handshake_timeouts_total: stats.get_handshake_timeouts(),
|
||||
accept_permit_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
configured_users,
|
||||
telemetry_core_enabled: telemetry.core_enabled,
|
||||
telemetry_user_enabled: telemetry.user_enabled,
|
||||
telemetry_me_level: telemetry.me_level.to_string(),
|
||||
conntrack_control_enabled: stats.get_conntrack_control_enabled(),
|
||||
conntrack_control_available: stats.get_conntrack_control_available(),
|
||||
conntrack_pressure_active: stats.get_conntrack_pressure_active(),
|
||||
conntrack_event_queue_depth: stats.get_conntrack_event_queue_depth(),
|
||||
conntrack_rule_apply_ok: stats.get_conntrack_rule_apply_ok(),
|
||||
conntrack_delete_attempt_total: stats.get_conntrack_delete_attempt_total(),
|
||||
conntrack_delete_success_total: stats.get_conntrack_delete_success_total(),
|
||||
conntrack_delete_not_found_total: stats.get_conntrack_delete_not_found_total(),
|
||||
conntrack_delete_error_total: stats.get_conntrack_delete_error_total(),
|
||||
conntrack_close_event_drop_total: stats.get_conntrack_close_event_drop_total(),
|
||||
},
|
||||
upstream: build_zero_upstream_data(stats),
|
||||
middle_proxy: ZeroMiddleProxyData {
|
||||
|
|
@ -68,6 +79,25 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
|
|||
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
|
||||
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
|
||||
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
|
||||
d2c_batches_total: stats.get_me_d2c_batches_total(),
|
||||
d2c_batch_frames_total: stats.get_me_d2c_batch_frames_total(),
|
||||
d2c_batch_bytes_total: stats.get_me_d2c_batch_bytes_total(),
|
||||
d2c_flush_reason_queue_drain_total: stats.get_me_d2c_flush_reason_queue_drain_total(),
|
||||
d2c_flush_reason_batch_frames_total: stats.get_me_d2c_flush_reason_batch_frames_total(),
|
||||
d2c_flush_reason_batch_bytes_total: stats.get_me_d2c_flush_reason_batch_bytes_total(),
|
||||
d2c_flush_reason_max_delay_total: stats.get_me_d2c_flush_reason_max_delay_total(),
|
||||
d2c_flush_reason_ack_immediate_total: stats
|
||||
.get_me_d2c_flush_reason_ack_immediate_total(),
|
||||
d2c_flush_reason_close_total: stats.get_me_d2c_flush_reason_close_total(),
|
||||
d2c_data_frames_total: stats.get_me_d2c_data_frames_total(),
|
||||
d2c_ack_frames_total: stats.get_me_d2c_ack_frames_total(),
|
||||
d2c_payload_bytes_total: stats.get_me_d2c_payload_bytes_total(),
|
||||
d2c_write_mode_coalesced_total: stats.get_me_d2c_write_mode_coalesced_total(),
|
||||
d2c_write_mode_split_total: stats.get_me_d2c_write_mode_split_total(),
|
||||
d2c_quota_reject_pre_write_total: stats.get_me_d2c_quota_reject_pre_write_total(),
|
||||
d2c_quota_reject_post_write_total: stats.get_me_d2c_quota_reject_post_write_total(),
|
||||
d2c_frame_buf_shrink_total: stats.get_me_d2c_frame_buf_shrink_total(),
|
||||
d2c_frame_buf_shrink_bytes_total: stats.get_me_d2c_frame_buf_shrink_bytes_total(),
|
||||
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
|
||||
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
|
||||
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
|
||||
|
|
|
|||
|
|
@ -35,11 +35,14 @@ pub(super) struct RuntimeGatesData {
|
|||
pub(super) conditional_cast_enabled: bool,
|
||||
pub(super) me_runtime_ready: bool,
|
||||
pub(super) me2dc_fallback_enabled: bool,
|
||||
pub(super) me2dc_fast_enabled: bool,
|
||||
pub(super) use_middle_proxy: bool,
|
||||
pub(super) route_mode: &'static str,
|
||||
pub(super) reroute_active: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_to_direct_at_epoch_secs: Option<u64>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) reroute_reason: Option<&'static str>,
|
||||
pub(super) startup_status: &'static str,
|
||||
pub(super) startup_stage: String,
|
||||
pub(super) startup_progress_pct: f64,
|
||||
|
|
@ -47,6 +50,7 @@ pub(super) struct RuntimeGatesData {
|
|||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveTimeoutLimits {
|
||||
pub(super) client_first_byte_idle_secs: u64,
|
||||
pub(super) client_handshake_secs: u64,
|
||||
pub(super) tg_connect_secs: u64,
|
||||
pub(super) client_keepalive_secs: u64,
|
||||
|
|
@ -86,6 +90,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
|
|||
pub(super) writer_pick_mode: &'static str,
|
||||
pub(super) writer_pick_sample_size: u8,
|
||||
pub(super) me2dc_fallback: bool,
|
||||
pub(super) me2dc_fast: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
|
@ -95,6 +100,11 @@ pub(super) struct EffectiveUserIpPolicyLimits {
|
|||
pub(super) window_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserTcpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveLimitsData {
|
||||
pub(super) update_every_secs: u64,
|
||||
|
|
@ -104,6 +114,7 @@ pub(super) struct EffectiveLimitsData {
|
|||
pub(super) upstream: EffectiveUpstreamLimits,
|
||||
pub(super) middle_proxy: EffectiveMiddleProxyLimits,
|
||||
pub(super) user_ip_policy: EffectiveUserIpPolicyLimits,
|
||||
pub(super) user_tcp_policy: EffectiveUserTcpPolicyLimits,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
|
|
@ -169,6 +180,8 @@ pub(super) async fn build_runtime_gates_data(
|
|||
let startup_summary = build_runtime_startup_summary(shared).await;
|
||||
let route_state = shared.route_runtime.snapshot();
|
||||
let route_mode = route_state.mode.as_str();
|
||||
let fast_fallback_enabled =
|
||||
cfg.general.use_middle_proxy && cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
let reroute_active = cfg.general.use_middle_proxy
|
||||
&& cfg.general.me2dc_fallback
|
||||
&& matches!(route_state.mode, RelayRouteMode::Direct);
|
||||
|
|
@ -177,6 +190,15 @@ pub(super) async fn build_runtime_gates_data(
|
|||
} else {
|
||||
None
|
||||
};
|
||||
let reroute_reason = if reroute_active {
|
||||
if fast_fallback_enabled {
|
||||
Some("fast_not_ready_fallback")
|
||||
} else {
|
||||
Some("strict_grace_fallback")
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let me_runtime_ready = if !cfg.general.use_middle_proxy {
|
||||
true
|
||||
} else {
|
||||
|
|
@ -194,10 +216,12 @@ pub(super) async fn build_runtime_gates_data(
|
|||
conditional_cast_enabled: cfg.general.use_middle_proxy,
|
||||
me_runtime_ready,
|
||||
me2dc_fallback_enabled: cfg.general.me2dc_fallback,
|
||||
me2dc_fast_enabled: fast_fallback_enabled,
|
||||
use_middle_proxy: cfg.general.use_middle_proxy,
|
||||
route_mode,
|
||||
reroute_active,
|
||||
reroute_to_direct_at_epoch_secs,
|
||||
reroute_reason,
|
||||
startup_status: startup_summary.status,
|
||||
startup_stage: startup_summary.stage,
|
||||
startup_progress_pct: startup_summary.progress_pct,
|
||||
|
|
@ -210,8 +234,9 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
|||
me_reinit_every_secs: cfg.general.effective_me_reinit_every_secs(),
|
||||
me_pool_force_close_secs: cfg.general.effective_me_pool_force_close_secs(),
|
||||
timeouts: EffectiveTimeoutLimits {
|
||||
client_first_byte_idle_secs: cfg.timeouts.client_first_byte_idle_secs,
|
||||
client_handshake_secs: cfg.timeouts.client_handshake,
|
||||
tg_connect_secs: cfg.timeouts.tg_connect,
|
||||
tg_connect_secs: cfg.general.tg_connect,
|
||||
client_keepalive_secs: cfg.timeouts.client_keepalive,
|
||||
client_ack_secs: cfg.timeouts.client_ack,
|
||||
me_one_retry: cfg.timeouts.me_one_retry,
|
||||
|
|
@ -263,12 +288,16 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
|||
writer_pick_mode: me_writer_pick_mode_label(cfg.general.me_writer_pick_mode),
|
||||
writer_pick_sample_size: cfg.general.me_writer_pick_sample_size,
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
me2dc_fast: cfg.general.me2dc_fast,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
user_tcp_policy: EffectiveUserTcpPolicyLimits {
|
||||
global_each: cfg.access.user_max_tcp_conns_global_each,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
117
src/api/users.rs
117
src/api/users.rs
|
|
@ -136,6 +136,7 @@ pub(super) async fn create_user(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user = users
|
||||
|
|
@ -143,8 +144,16 @@ pub(super) async fn create_user(
|
|||
.find(|entry| entry.username == body.username)
|
||||
.unwrap_or(UserInfo {
|
||||
username: body.username.clone(),
|
||||
in_runtime: false,
|
||||
user_ad_tag: None,
|
||||
max_tcp_conns: None,
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&body.username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: None,
|
||||
data_quota_bytes: None,
|
||||
max_unique_ips: updated_limit,
|
||||
|
|
@ -236,6 +245,7 @@ pub(super) async fn patch_user(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
|
|
@ -293,6 +303,7 @@ pub(super) async fn rotate_secret(
|
|||
&shared.ip_tracker,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let user_info = users
|
||||
|
|
@ -365,6 +376,7 @@ pub(super) async fn users_from_config(
|
|||
ip_tracker: &UserIpTracker,
|
||||
startup_detected_ip_v4: Option<IpAddr>,
|
||||
startup_detected_ip_v6: Option<IpAddr>,
|
||||
runtime_cfg: Option<&ProxyConfig>,
|
||||
) -> Vec<UserInfo> {
|
||||
let mut names = cfg.access.users.keys().cloned().collect::<Vec<_>>();
|
||||
names.sort();
|
||||
|
|
@ -394,8 +406,18 @@ pub(super) async fn users_from_config(
|
|||
tls: Vec::new(),
|
||||
});
|
||||
users.push(UserInfo {
|
||||
in_runtime: runtime_cfg
|
||||
.map(|runtime| runtime.access.users.contains_key(&username))
|
||||
.unwrap_or(false),
|
||||
user_ad_tag: cfg.access.user_ad_tags.get(&username).cloned(),
|
||||
max_tcp_conns: cfg.access.user_max_tcp_conns.get(&username).copied(),
|
||||
max_tcp_conns: cfg
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(cfg.access.user_max_tcp_conns_global_each)),
|
||||
expiration_rfc3339: cfg
|
||||
.access
|
||||
.user_expirations
|
||||
|
|
@ -572,3 +594,94 @@ fn resolve_tls_domains(cfg: &ProxyConfig) -> Vec<&str> {
|
|||
}
|
||||
domains
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_reports_effective_tcp_limit_with_global_fallback() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
cfg.access.user_max_tcp_conns_global_each = 7;
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 5);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(5));
|
||||
|
||||
cfg.access.user_max_tcp_conns.insert("alice".to_string(), 0);
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, Some(7));
|
||||
|
||||
cfg.access.user_max_tcp_conns_global_each = 0;
|
||||
let users = users_from_config(&cfg, &stats, &tracker, None, None, None).await;
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
assert!(!alice.in_runtime);
|
||||
assert_eq!(alice.max_tcp_conns, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn users_from_config_marks_runtime_membership_when_snapshot_is_provided() {
|
||||
let mut disk_cfg = ProxyConfig::default();
|
||||
disk_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
disk_cfg.access.users.insert(
|
||||
"bob".to_string(),
|
||||
"fedcba9876543210fedcba9876543210".to_string(),
|
||||
);
|
||||
|
||||
let mut runtime_cfg = ProxyConfig::default();
|
||||
runtime_cfg.access.users.insert(
|
||||
"alice".to_string(),
|
||||
"0123456789abcdef0123456789abcdef".to_string(),
|
||||
);
|
||||
|
||||
let stats = Stats::new();
|
||||
let tracker = UserIpTracker::new();
|
||||
let users =
|
||||
users_from_config(&disk_cfg, &stats, &tracker, None, None, Some(&runtime_cfg)).await;
|
||||
|
||||
let alice = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "alice")
|
||||
.expect("alice must be present");
|
||||
let bob = users
|
||||
.iter()
|
||||
.find(|entry| entry.username == "bob")
|
||||
.expect("bob must be present");
|
||||
|
||||
assert!(alice.in_runtime);
|
||||
assert!(!bob.in_runtime);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
490
src/cli.rs
490
src/cli.rs
|
|
@ -1,11 +1,270 @@
|
|||
//! CLI commands: --init (fire-and-forget setup)
|
||||
//! CLI commands: --init (fire-and-forget setup), daemon options, subcommands
|
||||
//!
|
||||
//! Subcommands:
|
||||
//! - `start [OPTIONS] [config.toml]` - Start the daemon
|
||||
//! - `stop [--pid-file PATH]` - Stop a running daemon
|
||||
//! - `reload [--pid-file PATH]` - Reload configuration (SIGHUP)
|
||||
//! - `status [--pid-file PATH]` - Check daemon status
|
||||
//! - `run [OPTIONS] [config.toml]` - Run in foreground (default behavior)
|
||||
|
||||
use rand::RngExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{self, DEFAULT_PID_FILE, DaemonOptions};
|
||||
|
||||
/// CLI subcommand to execute.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Subcommand {
|
||||
/// Run the proxy (default, or explicit `run` subcommand).
|
||||
Run,
|
||||
/// Start as daemon (`start` subcommand).
|
||||
Start,
|
||||
/// Stop a running daemon (`stop` subcommand).
|
||||
Stop,
|
||||
/// Reload configuration (`reload` subcommand).
|
||||
Reload,
|
||||
/// Check daemon status (`status` subcommand).
|
||||
Status,
|
||||
/// Fire-and-forget setup (`--init`).
|
||||
Init,
|
||||
}
|
||||
|
||||
/// Parsed subcommand with its options.
|
||||
#[derive(Debug)]
|
||||
pub struct ParsedCommand {
|
||||
pub subcommand: Subcommand,
|
||||
pub pid_file: PathBuf,
|
||||
pub config_path: String,
|
||||
#[cfg(unix)]
|
||||
pub daemon_opts: DaemonOptions,
|
||||
pub init_opts: Option<InitOptions>,
|
||||
}
|
||||
|
||||
impl Default for ParsedCommand {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
subcommand: Subcommand::Run,
|
||||
#[cfg(unix)]
|
||||
pid_file: PathBuf::from(DEFAULT_PID_FILE),
|
||||
#[cfg(not(unix))]
|
||||
pid_file: PathBuf::from("/var/run/telemt.pid"),
|
||||
config_path: "config.toml".to_string(),
|
||||
#[cfg(unix)]
|
||||
daemon_opts: DaemonOptions::default(),
|
||||
init_opts: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse CLI arguments into a command structure.
|
||||
pub fn parse_command(args: &[String]) -> ParsedCommand {
|
||||
let mut cmd = ParsedCommand::default();
|
||||
|
||||
// Check for --init first (legacy form)
|
||||
if args.iter().any(|a| a == "--init") {
|
||||
cmd.subcommand = Subcommand::Init;
|
||||
cmd.init_opts = parse_init_args(args);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
// Check for subcommand as first argument
|
||||
if let Some(first) = args.first() {
|
||||
match first.as_str() {
|
||||
"start" => {
|
||||
cmd.subcommand = Subcommand::Start;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
// Force daemonize for start command
|
||||
cmd.daemon_opts.daemonize = true;
|
||||
}
|
||||
}
|
||||
"stop" => {
|
||||
cmd.subcommand = Subcommand::Stop;
|
||||
}
|
||||
"reload" => {
|
||||
cmd.subcommand = Subcommand::Reload;
|
||||
}
|
||||
"status" => {
|
||||
cmd.subcommand = Subcommand::Status;
|
||||
}
|
||||
"run" => {
|
||||
cmd.subcommand = Subcommand::Run;
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
// No subcommand, default to Run
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts = parse_daemon_args(args);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Parse remaining options
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
// Skip subcommand names
|
||||
"start" | "stop" | "reload" | "status" | "run" => {}
|
||||
// PID file option (for stop/reload/status)
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
cmd.pid_file = PathBuf::from(&args[i]);
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
cmd.pid_file = PathBuf::from(s.trim_start_matches("--pid-file="));
|
||||
#[cfg(unix)]
|
||||
{
|
||||
cmd.daemon_opts.pid_file = Some(cmd.pid_file.clone());
|
||||
}
|
||||
}
|
||||
// Config path (positional, non-flag argument)
|
||||
s if !s.starts_with('-') => {
|
||||
cmd.config_path = s.to_string();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
cmd
|
||||
}
|
||||
|
||||
/// Execute a subcommand that doesn't require starting the server.
|
||||
/// Returns `Some(exit_code)` if the command was handled, `None` if server should start.
|
||||
#[cfg(unix)]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop => Some(cmd_stop(&cmd.pid_file)),
|
||||
Subcommand::Reload => Some(cmd_reload(&cmd.pid_file)),
|
||||
Subcommand::Status => Some(cmd_status(&cmd.pid_file)),
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
// Run and Start need the server
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
pub fn execute_subcommand(cmd: &ParsedCommand) -> Option<i32> {
|
||||
match cmd.subcommand {
|
||||
Subcommand::Stop | Subcommand::Reload | Subcommand::Status => {
|
||||
eprintln!("[telemt] Subcommand not supported on this platform");
|
||||
Some(1)
|
||||
}
|
||||
Subcommand::Init => {
|
||||
if let Some(opts) = cmd.init_opts.clone() {
|
||||
match run_init(opts) {
|
||||
Ok(()) => Some(0),
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Init failed: {}", e);
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Some(1)
|
||||
}
|
||||
}
|
||||
Subcommand::Run | Subcommand::Start => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop command: send SIGTERM to the running daemon.
|
||||
#[cfg(unix)]
|
||||
fn cmd_stop(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Stopping telemt daemon...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGTERM) {
|
||||
Ok(()) => {
|
||||
println!("Stop signal sent successfully");
|
||||
|
||||
// Wait for process to exit (up to 10 seconds)
|
||||
for _ in 0..20 {
|
||||
std::thread::sleep(std::time::Duration::from_millis(500));
|
||||
if let daemon::DaemonStatus::NotRunning = daemon::check_status(pid_file) {
|
||||
println!("Daemon stopped");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
println!("Daemon may still be shutting down");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to stop daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reload command: send SIGHUP to trigger config reload.
|
||||
#[cfg(unix)]
|
||||
fn cmd_reload(pid_file: &Path) -> i32 {
|
||||
use nix::sys::signal::Signal;
|
||||
|
||||
println!("Reloading telemt configuration...");
|
||||
|
||||
match daemon::signal_pid_file(pid_file, Signal::SIGHUP) {
|
||||
Ok(()) => {
|
||||
println!("Reload signal sent successfully");
|
||||
0
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to reload daemon: {}", e);
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Status command: check if daemon is running.
|
||||
#[cfg(unix)]
|
||||
fn cmd_status(pid_file: &Path) -> i32 {
|
||||
match daemon::check_status(pid_file) {
|
||||
daemon::DaemonStatus::Running(pid) => {
|
||||
println!("telemt is running (pid {})", pid);
|
||||
0
|
||||
}
|
||||
daemon::DaemonStatus::Stale(pid) => {
|
||||
println!("telemt is not running (stale pid file, was pid {})", pid);
|
||||
// Clean up stale PID file
|
||||
let _ = std::fs::remove_file(pid_file);
|
||||
1
|
||||
}
|
||||
daemon::DaemonStatus::NotRunning => {
|
||||
println!("telemt is not running");
|
||||
1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Options for the init command
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct InitOptions {
|
||||
pub port: u16,
|
||||
pub domain: String,
|
||||
|
|
@ -15,6 +274,64 @@ pub struct InitOptions {
|
|||
pub no_start: bool,
|
||||
}
|
||||
|
||||
/// Parse daemon-related options from CLI args.
|
||||
#[cfg(unix)]
|
||||
pub fn parse_daemon_args(args: &[String]) -> DaemonOptions {
|
||||
let mut opts = DaemonOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--daemon" | "-d" => {
|
||||
opts.daemonize = true;
|
||||
}
|
||||
"--foreground" | "-f" => {
|
||||
opts.foreground = true;
|
||||
}
|
||||
"--pid-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.pid_file = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--pid-file=") => {
|
||||
opts.pid_file = Some(PathBuf::from(s.trim_start_matches("--pid-file=")));
|
||||
}
|
||||
"--run-as-user" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.user = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user=") => {
|
||||
opts.user = Some(s.trim_start_matches("--run-as-user=").to_string());
|
||||
}
|
||||
"--run-as-group" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.group = Some(args[i].clone());
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group=") => {
|
||||
opts.group = Some(s.trim_start_matches("--run-as-group=").to_string());
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
opts.working_dir = Some(PathBuf::from(&args[i]));
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
opts.working_dir = Some(PathBuf::from(s.trim_start_matches("--working-dir=")));
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
opts
|
||||
}
|
||||
|
||||
impl Default for InitOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
|
|
@ -84,10 +401,16 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
|||
|
||||
/// Run the fire-and-forget setup.
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
use crate::service::{self, InitSystem, ServiceOptions};
|
||||
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
// 1. Generate or validate secret
|
||||
// 1. Detect init system
|
||||
let init_system = service::detect_init_system();
|
||||
eprintln!("[+] Detected init system: {}", init_system);
|
||||
|
||||
// 2. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
if s.len() != 32 || !s.chars().all(|c| c.is_ascii_hexdigit()) {
|
||||
|
|
@ -104,72 +427,126 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
|||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
// 2. Create config directory
|
||||
// 3. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
// 3. Write config
|
||||
// 4. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
// 4. Write systemd unit
|
||||
// 5. Generate and write service file
|
||||
let exe_path =
|
||||
std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
let service_opts = ServiceOptions {
|
||||
exe_path: &exe_path,
|
||||
config_path: &config_path,
|
||||
user: None, // Let systemd/init handle user
|
||||
group: None,
|
||||
pid_file: "/var/run/telemt.pid",
|
||||
working_dir: Some("/var/lib/telemt"),
|
||||
description: "Telemt MTProxy - Telegram MTProto Proxy",
|
||||
};
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
let service_path = service::service_file_path(init_system);
|
||||
let service_content = service::generate_service_file(init_system, &service_opts);
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = Path::new(service_path).parent() {
|
||||
let _ = fs::create_dir_all(parent);
|
||||
}
|
||||
|
||||
match fs::write(service_path, &service_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
eprintln!("[+] Service file written to {}", service_path);
|
||||
|
||||
// Make script executable for OpenRC/FreeBSD
|
||||
#[cfg(unix)]
|
||||
if init_system == InitSystem::OpenRC || init_system == InitSystem::FreeBSDRc {
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = fs::metadata(service_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
fs::set_permissions(service_path, perms)?;
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
eprintln!("[!] Cannot write service file (run as root?): {}", e);
|
||||
eprintln!("[!] Manual service file content:");
|
||||
eprintln!("{}", service_content);
|
||||
|
||||
// Still print links and config
|
||||
// Still print links and installation instructions
|
||||
eprintln!();
|
||||
eprintln!("{}", service::installation_instructions(init_system));
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// 5. Reload systemd
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
// 6. Install and enable service based on init system
|
||||
match init_system {
|
||||
InitSystem::Systemd => {
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
}
|
||||
_ => {
|
||||
eprintln!("[!] Service may not have started correctly");
|
||||
eprintln!("[!] Check: journalctl -u telemt.service -n 20");
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
}
|
||||
_ => {
|
||||
eprintln!("[!] Service may not have started correctly");
|
||||
eprintln!("[!] Check: journalctl -u telemt.service -n 20");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
InitSystem::OpenRC => {
|
||||
run_cmd("rc-update", &["add", "telemt", "default"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("rc-service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: rc-service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::FreeBSDRc => {
|
||||
run_cmd("sysrc", &["telemt_enable=YES"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
if !opts.no_start {
|
||||
run_cmd("service", &["telemt", "start"]);
|
||||
eprintln!("[+] Service started");
|
||||
} else {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: service telemt start");
|
||||
}
|
||||
}
|
||||
InitSystem::Unknown => {
|
||||
eprintln!("[!] Unknown init system - service file written but not installed");
|
||||
eprintln!("[!] You may need to install it manually");
|
||||
}
|
||||
}
|
||||
|
||||
eprintln!();
|
||||
|
||||
// 8. Print links
|
||||
// 7. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
Ok(())
|
||||
|
|
@ -207,6 +584,7 @@ me_pool_drain_soft_evict_cooldown_ms = 1000
|
|||
me_bind_stale_mode = "never"
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 90
|
||||
tg_connect = 10
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
|
|
@ -232,8 +610,8 @@ ip = "0.0.0.0"
|
|||
ip = "::"
|
||||
|
||||
[timeouts]
|
||||
client_handshake = 15
|
||||
tg_connect = 10
|
||||
client_first_byte_idle_secs = 300
|
||||
client_handshake = 60
|
||||
client_keepalive = 60
|
||||
client_ack = 300
|
||||
|
||||
|
|
@ -245,6 +623,7 @@ fake_cert_len = 2048
|
|||
tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
user_max_tcp_conns_global_each = 0
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 120
|
||||
ignore_time_skew = false
|
||||
|
|
@ -264,35 +643,6 @@ weight = 10
|
|||
)
|
||||
}
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/telemt/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart={exe} {config}
|
||||
Restart=always
|
||||
RestartSec=5
|
||||
LimitNOFILE=65535
|
||||
# Security hardening
|
||||
NoNewPrivileges=true
|
||||
ProtectSystem=strict
|
||||
ProtectHome=true
|
||||
ReadWritePaths=/etc/telemt
|
||||
PrivateTmp=true
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
"#,
|
||||
exe = exe_path.display(),
|
||||
config = config_path.display(),
|
||||
)
|
||||
}
|
||||
|
||||
fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
match Command::new(cmd).args(args).output() {
|
||||
Ok(output) => {
|
||||
|
|
|
|||
|
|
@ -29,6 +29,8 @@ const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
|||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
|
||||
const DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES: u64 = 64 * 1024;
|
||||
const DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
|
|
@ -46,6 +48,10 @@ const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
|
|||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_CONNTRACK_CONTROL_ENABLED: bool = true;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT: u8 = 85;
|
||||
const DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT: u8 = 70;
|
||||
const DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC: u64 = 4096;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
|
|
@ -69,6 +75,22 @@ pub(crate) fn default_tls_fetch_scope() -> String {
|
|||
String::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_attempt_timeout_ms() -> u64 {
|
||||
5_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_total_budget_ms() -> u64 {
|
||||
15_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_strict_route() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_profile_cache_ttl_secs() -> u64 {
|
||||
600
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
|
|
@ -78,7 +100,7 @@ pub(crate) fn default_fake_cert_len() -> usize {
|
|||
}
|
||||
|
||||
pub(crate) fn default_tls_front_dir() -> String {
|
||||
"tlsfront".to_string()
|
||||
"/etc/telemt/tlsfront".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_check_len() -> usize {
|
||||
|
|
@ -92,7 +114,11 @@ pub(crate) fn default_replay_window_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
60
|
||||
}
|
||||
|
||||
pub(crate) fn default_client_first_byte_idle_secs() -> u64 {
|
||||
300
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
|
||||
|
|
@ -183,14 +209,38 @@ pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
|||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_trusted_cidrs() -> Vec<IpNetwork> {
|
||||
vec!["0.0.0.0/0".parse().unwrap(), "::/0".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_max_connections() -> u32 {
|
||||
10_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_listen_backlog() -> u32 {
|
||||
1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
|
||||
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_control_enabled() -> bool {
|
||||
DEFAULT_CONNTRACK_CONTROL_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_high_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_HIGH_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_pressure_low_watermark_pct() -> u8 {
|
||||
DEFAULT_CONNTRACK_PRESSURE_LOW_WATERMARK_PCT
|
||||
}
|
||||
|
||||
pub(crate) fn default_conntrack_delete_budget_per_sec() -> u64 {
|
||||
DEFAULT_CONNTRACK_DELETE_BUDGET_PER_SEC
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
|
|
@ -251,6 +301,10 @@ pub(crate) fn default_me2dc_fallback() -> bool {
|
|||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_me2dc_fast() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||
8
|
||||
}
|
||||
|
|
@ -387,6 +441,14 @@ pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
|
|||
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_quota_soft_overshoot_bytes() -> u64 {
|
||||
DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_d2c_frame_buf_shrink_threshold_bytes() -> usize {
|
||||
DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES
|
||||
}
|
||||
|
||||
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
|
||||
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
|
||||
}
|
||||
|
|
@ -496,7 +558,7 @@ pub(crate) fn default_beobachten_flush_secs() -> u64 {
|
|||
}
|
||||
|
||||
pub(crate) fn default_beobachten_file() -> String {
|
||||
"cache/beobachten.txt".to_string()
|
||||
"/etc/telemt/beobachten.txt".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_new_session_tickets() -> u8 {
|
||||
|
|
@ -543,6 +605,20 @@ pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
|
|||
512
|
||||
}
|
||||
|
||||
#[cfg(not(test))]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
5 * 1024 * 1024
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn default_mask_relay_max_bytes() -> usize {
|
||||
32 * 1024
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_classifier_prefetch_timeout_ms() -> u64 {
|
||||
5
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
|
@ -755,6 +831,10 @@ pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
|||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_tcp_conns_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -106,6 +106,8 @@ pub struct HotFields {
|
|||
pub me_d2c_flush_batch_max_bytes: usize,
|
||||
pub me_d2c_flush_batch_max_delay_us: u64,
|
||||
pub me_d2c_ack_flush_immediate: bool,
|
||||
pub me_quota_soft_overshoot_bytes: u64,
|
||||
pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
|
||||
pub direct_relay_copy_buf_c2s_bytes: usize,
|
||||
pub direct_relay_copy_buf_s2c_bytes: usize,
|
||||
pub me_health_interval_ms_unhealthy: u64,
|
||||
|
|
@ -115,6 +117,7 @@ pub struct HotFields {
|
|||
pub users: std::collections::HashMap<String, String>,
|
||||
pub user_ad_tags: std::collections::HashMap<String, String>,
|
||||
pub user_max_tcp_conns: std::collections::HashMap<String, usize>,
|
||||
pub user_max_tcp_conns_global_each: usize,
|
||||
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
||||
pub user_data_quota: std::collections::HashMap<String, u64>,
|
||||
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
||||
|
|
@ -225,6 +228,10 @@ impl HotFields {
|
|||
me_d2c_flush_batch_max_bytes: cfg.general.me_d2c_flush_batch_max_bytes,
|
||||
me_d2c_flush_batch_max_delay_us: cfg.general.me_d2c_flush_batch_max_delay_us,
|
||||
me_d2c_ack_flush_immediate: cfg.general.me_d2c_ack_flush_immediate,
|
||||
me_quota_soft_overshoot_bytes: cfg.general.me_quota_soft_overshoot_bytes,
|
||||
me_d2c_frame_buf_shrink_threshold_bytes: cfg
|
||||
.general
|
||||
.me_d2c_frame_buf_shrink_threshold_bytes,
|
||||
direct_relay_copy_buf_c2s_bytes: cfg.general.direct_relay_copy_buf_c2s_bytes,
|
||||
direct_relay_copy_buf_s2c_bytes: cfg.general.direct_relay_copy_buf_s2c_bytes,
|
||||
me_health_interval_ms_unhealthy: cfg.general.me_health_interval_ms_unhealthy,
|
||||
|
|
@ -234,6 +241,7 @@ impl HotFields {
|
|||
users: cfg.access.users.clone(),
|
||||
user_ad_tags: cfg.access.user_ad_tags.clone(),
|
||||
user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
|
||||
user_max_tcp_conns_global_each: cfg.access.user_max_tcp_conns_global_each,
|
||||
user_expirations: cfg.access.user_expirations.clone(),
|
||||
user_data_quota: cfg.access.user_data_quota.clone(),
|
||||
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
||||
|
|
@ -511,6 +519,9 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||
cfg.general.me_d2c_flush_batch_max_bytes = new.general.me_d2c_flush_batch_max_bytes;
|
||||
cfg.general.me_d2c_flush_batch_max_delay_us = new.general.me_d2c_flush_batch_max_delay_us;
|
||||
cfg.general.me_d2c_ack_flush_immediate = new.general.me_d2c_ack_flush_immediate;
|
||||
cfg.general.me_quota_soft_overshoot_bytes = new.general.me_quota_soft_overshoot_bytes;
|
||||
cfg.general.me_d2c_frame_buf_shrink_threshold_bytes =
|
||||
new.general.me_d2c_frame_buf_shrink_threshold_bytes;
|
||||
cfg.general.direct_relay_copy_buf_c2s_bytes = new.general.direct_relay_copy_buf_c2s_bytes;
|
||||
cfg.general.direct_relay_copy_buf_s2c_bytes = new.general.direct_relay_copy_buf_s2c_bytes;
|
||||
cfg.general.me_health_interval_ms_unhealthy = new.general.me_health_interval_ms_unhealthy;
|
||||
|
|
@ -521,6 +532,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
|||
cfg.access.users = new.access.users.clone();
|
||||
cfg.access.user_ad_tags = new.access.user_ad_tags.clone();
|
||||
cfg.access.user_max_tcp_conns = new.access.user_max_tcp_conns.clone();
|
||||
cfg.access.user_max_tcp_conns_global_each = new.access.user_max_tcp_conns_global_each;
|
||||
cfg.access.user_expirations = new.access.user_expirations.clone();
|
||||
cfg.access.user_data_quota = new.access.user_data_quota.clone();
|
||||
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
|
||||
|
|
@ -561,6 +573,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
}
|
||||
if old.server.proxy_protocol != new.server.proxy_protocol
|
||||
|| !listeners_equal(&old.server.listeners, &new.server.listeners)
|
||||
|| old.server.listen_backlog != new.server.listen_backlog
|
||||
|| old.server.listen_addr_ipv4 != new.server.listen_addr_ipv4
|
||||
|| old.server.listen_addr_ipv6 != new.server.listen_addr_ipv6
|
||||
|| old.server.listen_tcp != new.server.listen_tcp
|
||||
|
|
@ -593,6 +606,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
|| old.censorship.mask_shape_above_cap_blur != new.censorship.mask_shape_above_cap_blur
|
||||
|| old.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
!= new.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
|| old.censorship.mask_relay_max_bytes != new.censorship.mask_relay_max_bytes
|
||||
|| old.censorship.mask_classifier_prefetch_timeout_ms
|
||||
!= new.censorship.mask_classifier_prefetch_timeout_ms
|
||||
|| old.censorship.mask_timing_normalization_enabled
|
||||
!= new.censorship.mask_timing_normalization_enabled
|
||||
|| old.censorship.mask_timing_normalization_floor_ms
|
||||
|
|
@ -639,6 +655,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
}
|
||||
if old.general.me_route_no_writer_mode != new.general.me_route_no_writer_mode
|
||||
|| old.general.me_route_no_writer_wait_ms != new.general.me_route_no_writer_wait_ms
|
||||
|| old.general.me_route_hybrid_max_wait_ms != new.general.me_route_hybrid_max_wait_ms
|
||||
|| old.general.me_route_blocking_send_timeout_ms
|
||||
!= new.general.me_route_blocking_send_timeout_ms
|
||||
|| old.general.me_route_inline_recovery_attempts
|
||||
!= new.general.me_route_inline_recovery_attempts
|
||||
|| old.general.me_route_inline_recovery_wait_ms
|
||||
|
|
@ -657,9 +676,11 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
warned = true;
|
||||
warn!("config reload: general.me_init_retry_attempts changed; restart required");
|
||||
}
|
||||
if old.general.me2dc_fallback != new.general.me2dc_fallback {
|
||||
if old.general.me2dc_fallback != new.general.me2dc_fallback
|
||||
|| old.general.me2dc_fast != new.general.me2dc_fast
|
||||
{
|
||||
warned = true;
|
||||
warn!("config reload: general.me2dc_fallback changed; restart required");
|
||||
warn!("config reload: general.me2dc_fallback/me2dc_fast changed; restart required");
|
||||
}
|
||||
if old.general.proxy_config_v4_cache_path != new.general.proxy_config_v4_cache_path
|
||||
|| old.general.proxy_config_v6_cache_path != new.general.proxy_config_v6_cache_path
|
||||
|
|
@ -678,6 +699,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
|||
if old.general.upstream_connect_retry_attempts != new.general.upstream_connect_retry_attempts
|
||||
|| old.general.upstream_connect_retry_backoff_ms
|
||||
!= new.general.upstream_connect_retry_backoff_ms
|
||||
|| old.general.tg_connect != new.general.tg_connect
|
||||
|| old.general.upstream_unhealthy_fail_threshold
|
||||
!= new.general.upstream_unhealthy_fail_threshold
|
||||
|| old.general.upstream_connect_failfast_hard_errors
|
||||
|
|
@ -1030,15 +1052,20 @@ fn log_changes(
|
|||
|| old_hot.me_d2c_flush_batch_max_bytes != new_hot.me_d2c_flush_batch_max_bytes
|
||||
|| old_hot.me_d2c_flush_batch_max_delay_us != new_hot.me_d2c_flush_batch_max_delay_us
|
||||
|| old_hot.me_d2c_ack_flush_immediate != new_hot.me_d2c_ack_flush_immediate
|
||||
|| old_hot.me_quota_soft_overshoot_bytes != new_hot.me_quota_soft_overshoot_bytes
|
||||
|| old_hot.me_d2c_frame_buf_shrink_threshold_bytes
|
||||
!= new_hot.me_d2c_frame_buf_shrink_threshold_bytes
|
||||
|| old_hot.direct_relay_copy_buf_c2s_bytes != new_hot.direct_relay_copy_buf_c2s_bytes
|
||||
|| old_hot.direct_relay_copy_buf_s2c_bytes != new_hot.direct_relay_copy_buf_s2c_bytes
|
||||
{
|
||||
info!(
|
||||
"config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} direct_buf_c2s={} direct_buf_s2c={}",
|
||||
"config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} me_quota_soft_overshoot_bytes={} me_d2c_frame_buf_shrink_threshold_bytes={} direct_buf_c2s={} direct_buf_s2c={}",
|
||||
new_hot.me_d2c_flush_batch_max_frames,
|
||||
new_hot.me_d2c_flush_batch_max_bytes,
|
||||
new_hot.me_d2c_flush_batch_max_delay_us,
|
||||
new_hot.me_d2c_ack_flush_immediate,
|
||||
new_hot.me_quota_soft_overshoot_bytes,
|
||||
new_hot.me_d2c_frame_buf_shrink_threshold_bytes,
|
||||
new_hot.direct_relay_copy_buf_c2s_bytes,
|
||||
new_hot.direct_relay_copy_buf_s2c_bytes,
|
||||
);
|
||||
|
|
@ -1121,6 +1148,12 @@ fn log_changes(
|
|||
new_hot.user_max_tcp_conns.len()
|
||||
);
|
||||
}
|
||||
if old_hot.user_max_tcp_conns_global_each != new_hot.user_max_tcp_conns_global_each {
|
||||
info!(
|
||||
"config reload: user_max_tcp_conns policy global_each={}",
|
||||
new_hot.user_max_tcp_conns_global_each
|
||||
);
|
||||
}
|
||||
if old_hot.user_expirations != new_hot.user_expirations {
|
||||
info!(
|
||||
"config reload: user_expirations updated ({} entries)",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
#![allow(deprecated)]
|
||||
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::collections::{BTreeSet, HashMap, HashSet};
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
|
@ -346,6 +346,12 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.general.tg_connect == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.tg_connect must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.upstream_unhealthy_fail_threshold == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.upstream_unhealthy_fail_threshold must be > 0".to_string(),
|
||||
|
|
@ -430,6 +436,24 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_relay_max_bytes == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_relay_max_bytes must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_relay_max_bytes > 67_108_864 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_relay_max_bytes must be <= 67108864".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(5..=50).contains(&config.censorship.mask_classifier_prefetch_timeout_ms) {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_timing_normalization_ceiling_ms
|
||||
< config.censorship.mask_timing_normalization_floor_ms
|
||||
{
|
||||
|
|
@ -533,6 +557,21 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.general.me_quota_soft_overshoot_bytes > 16 * 1024 * 1024 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_quota_soft_overshoot_bytes must be within [0, 16777216]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(4096..=16 * 1024 * 1024)
|
||||
.contains(&config.general.me_d2c_frame_buf_shrink_threshold_bytes)
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_d2c_frame_buf_shrink_threshold_bytes must be within [4096, 16777216]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
|
||||
|
|
@ -883,6 +922,43 @@ impl ProxyConfig {
|
|||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.pressure_high_watermark_pct == 0
|
||||
|| config.server.conntrack_control.pressure_high_watermark_pct > 100
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.pressure_high_watermark_pct must be within [1, 100]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.pressure_low_watermark_pct
|
||||
>= config.server.conntrack_control.pressure_high_watermark_pct
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.pressure_low_watermark_pct must be < pressure_high_watermark_pct"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.server.conntrack_control.delete_budget_per_sec == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.delete_budget_per_sec must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if matches!(config.server.conntrack_control.mode, ConntrackMode::Hybrid)
|
||||
&& config
|
||||
.server
|
||||
.conntrack_control
|
||||
.hybrid_listener_ips
|
||||
.is_empty()
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"server.conntrack_control.hybrid_listener_ips must be non-empty in mode=hybrid"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.effective_me_pool_force_close_secs() > 0
|
||||
&& config.general.effective_me_pool_force_close_secs()
|
||||
< config.general.me_pool_drain_ttl_secs
|
||||
|
|
@ -944,6 +1020,28 @@ impl ProxyConfig {
|
|||
// Normalize optional TLS fetch scope: whitespace-only values disable scoped routing.
|
||||
config.censorship.tls_fetch_scope = config.censorship.tls_fetch_scope.trim().to_string();
|
||||
|
||||
if config.censorship.tls_fetch.profiles.is_empty() {
|
||||
config.censorship.tls_fetch.profiles = TlsFetchConfig::default().profiles;
|
||||
} else {
|
||||
let mut seen = HashSet::new();
|
||||
config
|
||||
.censorship
|
||||
.tls_fetch
|
||||
.profiles
|
||||
.retain(|profile| seen.insert(*profile));
|
||||
}
|
||||
|
||||
if config.censorship.tls_fetch.attempt_timeout_ms == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.tls_fetch.attempt_timeout_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.censorship.tls_fetch.total_budget_ms == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.tls_fetch.total_budget_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// Merge primary + extra TLS domains, deduplicate (primary always first).
|
||||
if !config.censorship.tls_domains.is_empty() {
|
||||
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
|
|
@ -1121,6 +1219,10 @@ mod load_security_tests;
|
|||
#[path = "tests/load_mask_shape_security_tests.rs"]
|
||||
mod load_mask_shape_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_mask_classifier_prefetch_timeout_security_tests.rs"]
|
||||
mod load_mask_classifier_prefetch_timeout_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
@ -1158,6 +1260,7 @@ mod tests {
|
|||
default_me_init_retry_attempts()
|
||||
);
|
||||
assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
|
||||
assert_eq!(cfg.general.me2dc_fast, default_me2dc_fast());
|
||||
assert_eq!(
|
||||
cfg.general.proxy_config_v4_cache_path,
|
||||
default_proxy_config_v4_cache_path()
|
||||
|
|
@ -1226,6 +1329,11 @@ mod tests {
|
|||
assert_eq!(cfg.general.update_every, default_update_every());
|
||||
assert_eq!(cfg.server.listen_addr_ipv4, default_listen_addr_ipv4());
|
||||
assert_eq!(cfg.server.listen_addr_ipv6, default_listen_addr_ipv6_opt());
|
||||
assert_eq!(
|
||||
cfg.server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
assert_eq!(cfg.censorship.unknown_sni_action, UnknownSniAction::Drop);
|
||||
assert_eq!(cfg.server.api.listen, default_api_listen());
|
||||
assert_eq!(cfg.server.api.whitelist, default_api_whitelist());
|
||||
assert_eq!(
|
||||
|
|
@ -1256,7 +1364,36 @@ mod tests {
|
|||
cfg.server.api.runtime_edge_events_capacity,
|
||||
default_api_runtime_edge_events_capacity()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.inline_conntrack_control,
|
||||
default_conntrack_control_enabled()
|
||||
);
|
||||
assert_eq!(cfg.server.conntrack_control.mode, ConntrackMode::default());
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.backend,
|
||||
ConntrackBackend::default()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::default()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.pressure_high_watermark_pct,
|
||||
default_conntrack_pressure_high_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.pressure_low_watermark_pct,
|
||||
default_conntrack_pressure_low_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.delete_budget_per_sec,
|
||||
default_conntrack_delete_budget_per_sec()
|
||||
);
|
||||
assert_eq!(cfg.access.users, default_access_users());
|
||||
assert_eq!(
|
||||
cfg.access.user_max_tcp_conns_global_each,
|
||||
default_user_max_tcp_conns_global_each()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.access.user_max_unique_ips_mode,
|
||||
UserMaxUniqueIpsMode::default()
|
||||
|
|
@ -1292,6 +1429,7 @@ mod tests {
|
|||
default_me_init_retry_attempts()
|
||||
);
|
||||
assert_eq!(general.me2dc_fallback, default_me2dc_fallback());
|
||||
assert_eq!(general.me2dc_fast, default_me2dc_fast());
|
||||
assert_eq!(
|
||||
general.proxy_config_v4_cache_path,
|
||||
default_proxy_config_v4_cache_path()
|
||||
|
|
@ -1358,6 +1496,14 @@ mod tests {
|
|||
|
||||
let server = ServerConfig::default();
|
||||
assert_eq!(server.listen_addr_ipv6, Some(default_listen_addr_ipv6()));
|
||||
assert_eq!(
|
||||
server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
assert_eq!(
|
||||
AntiCensorshipConfig::default().unknown_sni_action,
|
||||
UnknownSniAction::Drop
|
||||
);
|
||||
assert_eq!(server.api.listen, default_api_listen());
|
||||
assert_eq!(server.api.whitelist, default_api_whitelist());
|
||||
assert_eq!(
|
||||
|
|
@ -1388,9 +1534,107 @@ mod tests {
|
|||
server.api.runtime_edge_events_capacity,
|
||||
default_api_runtime_edge_events_capacity()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.inline_conntrack_control,
|
||||
default_conntrack_control_enabled()
|
||||
);
|
||||
assert_eq!(server.conntrack_control.mode, ConntrackMode::default());
|
||||
assert_eq!(
|
||||
server.conntrack_control.backend,
|
||||
ConntrackBackend::default()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::default()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.pressure_high_watermark_pct,
|
||||
default_conntrack_pressure_high_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.pressure_low_watermark_pct,
|
||||
default_conntrack_pressure_low_watermark_pct()
|
||||
);
|
||||
assert_eq!(
|
||||
server.conntrack_control.delete_budget_per_sec,
|
||||
default_conntrack_delete_budget_per_sec()
|
||||
);
|
||||
|
||||
let access = AccessConfig::default();
|
||||
assert_eq!(access.users, default_access_users());
|
||||
assert_eq!(
|
||||
access.user_max_tcp_conns_global_each,
|
||||
default_user_max_tcp_conns_global_each()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn proxy_protocol_trusted_cidrs_missing_uses_trust_all_but_explicit_empty_stays_empty() {
|
||||
let cfg_missing: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_missing.server.proxy_protocol_trusted_cidrs,
|
||||
default_proxy_protocol_trusted_cidrs()
|
||||
);
|
||||
|
||||
let cfg_explicit_empty: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
proxy_protocol_trusted_cidrs = []
|
||||
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert!(
|
||||
cfg_explicit_empty
|
||||
.server
|
||||
.proxy_protocol_trusted_cidrs
|
||||
.is_empty()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn unknown_sni_action_parses_and_defaults_to_drop() {
|
||||
let cfg_default: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
[censorship]
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_default.censorship.unknown_sni_action,
|
||||
UnknownSniAction::Drop
|
||||
);
|
||||
|
||||
let cfg_mask: ProxyConfig = toml::from_str(
|
||||
r#"
|
||||
[server]
|
||||
[general]
|
||||
[network]
|
||||
[access]
|
||||
[censorship]
|
||||
unknown_sni_action = "mask"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
cfg_mask.censorship.unknown_sni_action,
|
||||
UnknownSniAction::Mask
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
@ -1764,6 +2008,26 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tg_connect_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
tg_connect = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tg_connect_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("general.tg_connect must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rpc_proxy_req_every_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
|
|
@ -2227,6 +2491,118 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_pressure_high_watermark_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
pressure_high_watermark_pct = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_high_watermark_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains(
|
||||
"server.conntrack_control.pressure_high_watermark_pct must be within [1, 100]"
|
||||
));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_pressure_low_watermark_must_be_below_high() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
pressure_high_watermark_pct = 50
|
||||
pressure_low_watermark_pct = 50
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_low_watermark_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(
|
||||
err.contains(
|
||||
"server.conntrack_control.pressure_low_watermark_pct must be < pressure_high_watermark_pct"
|
||||
)
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_delete_budget_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
delete_budget_per_sec = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_delete_budget_invalid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("server.conntrack_control.delete_budget_per_sec must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_hybrid_mode_requires_listener_allow_list() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
mode = "hybrid"
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_hybrid_requires_ips_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains(
|
||||
"server.conntrack_control.hybrid_listener_ips must be non-empty in mode=hybrid"
|
||||
));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn conntrack_profile_is_loaded_from_config() {
|
||||
let toml = r#"
|
||||
[server.conntrack_control]
|
||||
profile = "aggressive"
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_conntrack_profile_parse_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.server.conntrack_control.profile,
|
||||
ConntrackPressureProfile::Aggressive
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn force_close_default_matches_drain_ttl() {
|
||||
let toml = r#"
|
||||
|
|
@ -2340,6 +2716,94 @@ mod tests {
|
|||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_defaults_are_applied() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_defaults_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.censorship.tls_fetch.profiles,
|
||||
TlsFetchConfig::default().profiles
|
||||
);
|
||||
assert!(cfg.censorship.tls_fetch.strict_route);
|
||||
assert_eq!(cfg.censorship.tls_fetch.attempt_timeout_ms, 5_000);
|
||||
assert_eq!(cfg.censorship.tls_fetch.total_budget_ms, 15_000);
|
||||
assert_eq!(cfg.censorship.tls_fetch.profile_cache_ttl_secs, 600);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_profiles_are_deduplicated_preserving_order() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
profiles = ["compat_tls12", "modern_chrome_like", "compat_tls12", "legacy_minimal"]
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_profiles_dedup_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(
|
||||
cfg.censorship.tls_fetch.profiles,
|
||||
vec![
|
||||
TlsFetchProfile::CompatTls12,
|
||||
TlsFetchProfile::ModernChromeLike,
|
||||
TlsFetchProfile::LegacyMinimal
|
||||
]
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_attempt_timeout_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
attempt_timeout_ms = 0
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_attempt_timeout_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("censorship.tls_fetch.attempt_timeout_ms must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_total_budget_zero_is_rejected() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
[censorship.tls_fetch]
|
||||
total_budget_ms = 0
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_total_budget_zero_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(err.contains("censorship.tls_fetch.total_budget_ms must be > 0"));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_ad_tag_is_disabled_during_load() {
|
||||
let toml = r#"
|
||||
|
|
|
|||
|
|
@ -17,6 +17,28 @@ fn remove_temp_config(path: &PathBuf) {
|
|||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn default_timeouts_enable_apple_compatible_handshake_profile() {
|
||||
let cfg = ProxyConfig::default();
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 300);
|
||||
assert_eq!(cfg.timeouts.client_handshake, 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_zero_first_byte_idle_timeout_as_legacy_opt_out() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_first_byte_idle_secs = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("config with zero first-byte idle timeout must load");
|
||||
assert_eq!(cfg.timeouts.client_first_byte_idle_secs, 0);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
|
|
|
|||
|
|
@ -0,0 +1,76 @@
|
|||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!(
|
||||
"telemt-load-mask-prefetch-timeout-security-{nonce}.toml"
|
||||
));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_below_min_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 4
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout below minimum security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_classifier_prefetch_timeout_above_max_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 51
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("prefetch timeout above max security bound must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_classifier_prefetch_timeout_ms must be within [5, 50]"),
|
||||
"error must explain timeout bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_classifier_prefetch_timeout_within_bounds() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_classifier_prefetch_timeout_ms = 20
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("prefetch timeout within security bounds must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_classifier_prefetch_timeout_ms, 20);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
@ -236,3 +236,57 @@ mask_shape_above_cap_blur_max_bytes = 8
|
|||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("mask_relay_max_bytes must be > 0");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be > 0"),
|
||||
"error must explain non-zero relay cap invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_relay_max_bytes_above_upper_bound() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 67108865
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_relay_max_bytes above hard cap must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_relay_max_bytes must be <= 67108864"),
|
||||
"error must explain relay cap upper bound invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_mask_relay_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_relay_max_bytes = 8388608
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("valid mask_relay_max_bytes must be accepted");
|
||||
assert_eq!(cfg.censorship.mask_relay_max_bytes, 8_388_608);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -429,6 +429,11 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_me2dc_fallback")]
|
||||
pub me2dc_fallback: bool,
|
||||
|
||||
/// Fast ME->Direct fallback mode for new sessions.
|
||||
/// Active only when both `use_middle_proxy=true` and `me2dc_fallback=true`.
|
||||
#[serde(default = "default_me2dc_fast")]
|
||||
pub me2dc_fast: bool,
|
||||
|
||||
/// Enable ME keepalive padding frames.
|
||||
#[serde(default = "default_true")]
|
||||
pub me_keepalive_enabled: bool,
|
||||
|
|
@ -468,7 +473,7 @@ pub struct GeneralConfig {
|
|||
pub me_c2me_send_timeout_ms: u64,
|
||||
|
||||
/// Bounded wait in milliseconds for routing ME DATA to per-connection queue.
|
||||
/// `0` keeps legacy no-wait behavior.
|
||||
/// `0` keeps non-blocking routing; values >0 enable bounded wait for compatibility.
|
||||
#[serde(default = "default_me_reader_route_data_wait_ms")]
|
||||
pub me_reader_route_data_wait_ms: u64,
|
||||
|
||||
|
|
@ -489,6 +494,14 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_me_d2c_ack_flush_immediate")]
|
||||
pub me_d2c_ack_flush_immediate: bool,
|
||||
|
||||
/// Additional bytes above strict per-user quota allowed in hot-path soft mode.
|
||||
#[serde(default = "default_me_quota_soft_overshoot_bytes")]
|
||||
pub me_quota_soft_overshoot_bytes: u64,
|
||||
|
||||
/// Shrink threshold for reusable ME->Client frame assembly buffer.
|
||||
#[serde(default = "default_me_d2c_frame_buf_shrink_threshold_bytes")]
|
||||
pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
|
||||
|
||||
/// Copy buffer size for client->DC direction in direct relay.
|
||||
#[serde(default = "default_direct_relay_copy_buf_c2s_bytes")]
|
||||
pub direct_relay_copy_buf_c2s_bytes: usize,
|
||||
|
|
@ -650,6 +663,10 @@ pub struct GeneralConfig {
|
|||
#[serde(default = "default_upstream_connect_budget_ms")]
|
||||
pub upstream_connect_budget_ms: u64,
|
||||
|
||||
/// Per-attempt TCP connect timeout to Telegram DC (seconds).
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
/// Consecutive failed requests before upstream is marked unhealthy.
|
||||
#[serde(default = "default_upstream_unhealthy_fail_threshold")]
|
||||
pub upstream_unhealthy_fail_threshold: u32,
|
||||
|
|
@ -931,6 +948,7 @@ impl Default for GeneralConfig {
|
|||
middle_proxy_warm_standby: default_middle_proxy_warm_standby(),
|
||||
me_init_retry_attempts: default_me_init_retry_attempts(),
|
||||
me2dc_fallback: default_me2dc_fallback(),
|
||||
me2dc_fast: default_me2dc_fast(),
|
||||
me_keepalive_enabled: default_true(),
|
||||
me_keepalive_interval_secs: default_keepalive_interval(),
|
||||
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
||||
|
|
@ -945,6 +963,9 @@ impl Default for GeneralConfig {
|
|||
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
|
||||
me_d2c_flush_batch_max_delay_us: default_me_d2c_flush_batch_max_delay_us(),
|
||||
me_d2c_ack_flush_immediate: default_me_d2c_ack_flush_immediate(),
|
||||
me_quota_soft_overshoot_bytes: default_me_quota_soft_overshoot_bytes(),
|
||||
me_d2c_frame_buf_shrink_threshold_bytes:
|
||||
default_me_d2c_frame_buf_shrink_threshold_bytes(),
|
||||
direct_relay_copy_buf_c2s_bytes: default_direct_relay_copy_buf_c2s_bytes(),
|
||||
direct_relay_copy_buf_s2c_bytes: default_direct_relay_copy_buf_s2c_bytes(),
|
||||
me_warmup_stagger_enabled: default_true(),
|
||||
|
|
@ -990,6 +1011,7 @@ impl Default for GeneralConfig {
|
|||
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
|
||||
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
|
||||
upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
upstream_unhealthy_fail_threshold: default_upstream_unhealthy_fail_threshold(),
|
||||
upstream_connect_failfast_hard_errors: default_upstream_connect_failfast_hard_errors(),
|
||||
stun_iface_mismatch_ignore: false,
|
||||
|
|
@ -1194,6 +1216,118 @@ impl Default for ApiConfig {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackMode {
|
||||
#[default]
|
||||
Tracked,
|
||||
Notrack,
|
||||
Hybrid,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackBackend {
|
||||
#[default]
|
||||
Auto,
|
||||
Nftables,
|
||||
Iptables,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum ConntrackPressureProfile {
|
||||
Conservative,
|
||||
#[default]
|
||||
Balanced,
|
||||
Aggressive,
|
||||
}
|
||||
|
||||
impl ConntrackPressureProfile {
|
||||
pub fn client_first_byte_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 30,
|
||||
Self::Balanced => 20,
|
||||
Self::Aggressive => 10,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn direct_activity_timeout_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 180,
|
||||
Self::Balanced => 120,
|
||||
Self::Aggressive => 60,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn middle_soft_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 60,
|
||||
Self::Balanced => 30,
|
||||
Self::Aggressive => 20,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn middle_hard_idle_cap_secs(self) -> u64 {
|
||||
match self {
|
||||
Self::Conservative => 180,
|
||||
Self::Balanced => 90,
|
||||
Self::Aggressive => 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConntrackControlConfig {
|
||||
/// Enables runtime conntrack-control worker for pressure mitigation.
|
||||
#[serde(default = "default_conntrack_control_enabled")]
|
||||
pub inline_conntrack_control: bool,
|
||||
|
||||
/// Conntrack mode for listener ingress traffic.
|
||||
#[serde(default)]
|
||||
pub mode: ConntrackMode,
|
||||
|
||||
/// Netfilter backend used to reconcile notrack rules.
|
||||
#[serde(default)]
|
||||
pub backend: ConntrackBackend,
|
||||
|
||||
/// Pressure profile for timeout caps under resource saturation.
|
||||
#[serde(default)]
|
||||
pub profile: ConntrackPressureProfile,
|
||||
|
||||
/// Listener IP allow-list for hybrid mode.
|
||||
/// Ignored in tracked/notrack mode.
|
||||
#[serde(default)]
|
||||
pub hybrid_listener_ips: Vec<IpAddr>,
|
||||
|
||||
/// Pressure high watermark as percentage.
|
||||
#[serde(default = "default_conntrack_pressure_high_watermark_pct")]
|
||||
pub pressure_high_watermark_pct: u8,
|
||||
|
||||
/// Pressure low watermark as percentage.
|
||||
#[serde(default = "default_conntrack_pressure_low_watermark_pct")]
|
||||
pub pressure_low_watermark_pct: u8,
|
||||
|
||||
/// Maximum conntrack delete operations per second.
|
||||
#[serde(default = "default_conntrack_delete_budget_per_sec")]
|
||||
pub delete_budget_per_sec: u64,
|
||||
}
|
||||
|
||||
impl Default for ConntrackControlConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
inline_conntrack_control: default_conntrack_control_enabled(),
|
||||
mode: ConntrackMode::default(),
|
||||
backend: ConntrackBackend::default(),
|
||||
profile: ConntrackPressureProfile::default(),
|
||||
hybrid_listener_ips: Vec::new(),
|
||||
pressure_high_watermark_pct: default_conntrack_pressure_high_watermark_pct(),
|
||||
pressure_low_watermark_pct: default_conntrack_pressure_low_watermark_pct(),
|
||||
delete_budget_per_sec: default_conntrack_delete_budget_per_sec(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ServerConfig {
|
||||
#[serde(default = "default_port")]
|
||||
|
|
@ -1229,9 +1363,10 @@ pub struct ServerConfig {
|
|||
|
||||
/// Trusted source CIDRs allowed to send incoming PROXY protocol headers.
|
||||
///
|
||||
/// When non-empty, connections from addresses outside this allowlist are
|
||||
/// rejected before `src_addr` is applied.
|
||||
#[serde(default)]
|
||||
/// If this field is omitted in config, it defaults to trust-all CIDRs
|
||||
/// (`0.0.0.0/0` and `::/0`). If it is explicitly set to an empty list,
|
||||
/// all PROXY protocol headers are rejected.
|
||||
#[serde(default = "default_proxy_protocol_trusted_cidrs")]
|
||||
pub proxy_protocol_trusted_cidrs: Vec<IpNetwork>,
|
||||
|
||||
/// Port for the Prometheus-compatible metrics endpoint.
|
||||
|
|
@ -1254,6 +1389,11 @@ pub struct ServerConfig {
|
|||
#[serde(default)]
|
||||
pub listeners: Vec<ListenerConfig>,
|
||||
|
||||
/// TCP `listen(2)` backlog for client-facing sockets (also used for the metrics HTTP listener).
|
||||
/// The effective queue is capped by the kernel (for example `somaxconn` on Linux).
|
||||
#[serde(default = "default_listen_backlog")]
|
||||
pub listen_backlog: u32,
|
||||
|
||||
/// Maximum number of concurrent client connections.
|
||||
/// 0 means unlimited.
|
||||
#[serde(default = "default_server_max_connections")]
|
||||
|
|
@ -1263,6 +1403,10 @@ pub struct ServerConfig {
|
|||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_accept_permit_timeout_ms")]
|
||||
pub accept_permit_timeout_ms: u64,
|
||||
|
||||
/// Runtime conntrack control and pressure policy.
|
||||
#[serde(default)]
|
||||
pub conntrack_control: ConntrackControlConfig,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
|
|
@ -1276,20 +1420,28 @@ impl Default for ServerConfig {
|
|||
listen_tcp: None,
|
||||
proxy_protocol: false,
|
||||
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
||||
proxy_protocol_trusted_cidrs: Vec::new(),
|
||||
proxy_protocol_trusted_cidrs: default_proxy_protocol_trusted_cidrs(),
|
||||
metrics_port: None,
|
||||
metrics_listen: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
api: ApiConfig::default(),
|
||||
listeners: Vec::new(),
|
||||
listen_backlog: default_listen_backlog(),
|
||||
max_connections: default_server_max_connections(),
|
||||
accept_permit_timeout_ms: default_accept_permit_timeout_ms(),
|
||||
conntrack_control: ConntrackControlConfig::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TimeoutsConfig {
|
||||
/// Maximum idle wait in seconds for the first client byte before handshake parsing starts.
|
||||
/// `0` disables the separate idle phase and keeps legacy timeout behavior.
|
||||
#[serde(default = "default_client_first_byte_idle_secs")]
|
||||
pub client_first_byte_idle_secs: u64,
|
||||
|
||||
/// Maximum active handshake duration in seconds after the first client byte is received.
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake: u64,
|
||||
|
||||
|
|
@ -1311,9 +1463,6 @@ pub struct TimeoutsConfig {
|
|||
#[serde(default = "default_relay_idle_grace_after_downstream_activity_secs")]
|
||||
pub relay_idle_grace_after_downstream_activity_secs: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
#[serde(default = "default_keepalive")]
|
||||
pub client_keepalive: u64,
|
||||
|
||||
|
|
@ -1332,13 +1481,13 @@ pub struct TimeoutsConfig {
|
|||
impl Default for TimeoutsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_first_byte_idle_secs: default_client_first_byte_idle_secs(),
|
||||
client_handshake: default_handshake_timeout(),
|
||||
relay_idle_policy_v2_enabled: default_relay_idle_policy_v2_enabled(),
|
||||
relay_client_idle_soft_secs: default_relay_client_idle_soft_secs(),
|
||||
relay_client_idle_hard_secs: default_relay_client_idle_hard_secs(),
|
||||
relay_idle_grace_after_downstream_activity_secs:
|
||||
default_relay_idle_grace_after_downstream_activity_secs(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack: default_ack_timeout(),
|
||||
me_one_retry: default_me_one_retry(),
|
||||
|
|
@ -1347,6 +1496,90 @@ impl Default for TimeoutsConfig {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum UnknownSniAction {
|
||||
#[default]
|
||||
Drop,
|
||||
Mask,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum TlsFetchProfile {
|
||||
ModernChromeLike,
|
||||
ModernFirefoxLike,
|
||||
CompatTls12,
|
||||
LegacyMinimal,
|
||||
}
|
||||
|
||||
impl TlsFetchProfile {
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
TlsFetchProfile::ModernChromeLike => "modern_chrome_like",
|
||||
TlsFetchProfile::ModernFirefoxLike => "modern_firefox_like",
|
||||
TlsFetchProfile::CompatTls12 => "compat_tls12",
|
||||
TlsFetchProfile::LegacyMinimal => "legacy_minimal",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn default_tls_fetch_profiles() -> Vec<TlsFetchProfile> {
|
||||
vec![
|
||||
TlsFetchProfile::ModernChromeLike,
|
||||
TlsFetchProfile::ModernFirefoxLike,
|
||||
TlsFetchProfile::CompatTls12,
|
||||
TlsFetchProfile::LegacyMinimal,
|
||||
]
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct TlsFetchConfig {
|
||||
/// Ordered list of ClientHello profiles used for adaptive fallback.
|
||||
#[serde(default = "default_tls_fetch_profiles")]
|
||||
pub profiles: Vec<TlsFetchProfile>,
|
||||
|
||||
/// When true and upstream route is configured, TLS fetch fails closed on
|
||||
/// upstream connect errors and does not fallback to direct TCP.
|
||||
#[serde(default = "default_tls_fetch_strict_route")]
|
||||
pub strict_route: bool,
|
||||
|
||||
/// Timeout per one profile attempt in milliseconds.
|
||||
#[serde(default = "default_tls_fetch_attempt_timeout_ms")]
|
||||
pub attempt_timeout_ms: u64,
|
||||
|
||||
/// Total wall-clock budget in milliseconds across all profile attempts.
|
||||
#[serde(default = "default_tls_fetch_total_budget_ms")]
|
||||
pub total_budget_ms: u64,
|
||||
|
||||
/// Adds GREASE-style values into selected ClientHello extensions.
|
||||
#[serde(default)]
|
||||
pub grease_enabled: bool,
|
||||
|
||||
/// Produces deterministic ClientHello randomness for debugging/tests.
|
||||
#[serde(default)]
|
||||
pub deterministic: bool,
|
||||
|
||||
/// TTL for winner-profile cache entries in seconds.
|
||||
/// Set to 0 to disable profile cache.
|
||||
#[serde(default = "default_tls_fetch_profile_cache_ttl_secs")]
|
||||
pub profile_cache_ttl_secs: u64,
|
||||
}
|
||||
|
||||
impl Default for TlsFetchConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
profiles: default_tls_fetch_profiles(),
|
||||
strict_route: default_tls_fetch_strict_route(),
|
||||
attempt_timeout_ms: default_tls_fetch_attempt_timeout_ms(),
|
||||
total_budget_ms: default_tls_fetch_total_budget_ms(),
|
||||
grease_enabled: false,
|
||||
deterministic: false,
|
||||
profile_cache_ttl_secs: default_tls_fetch_profile_cache_ttl_secs(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct AntiCensorshipConfig {
|
||||
#[serde(default = "default_tls_domain")]
|
||||
|
|
@ -1356,11 +1589,19 @@ pub struct AntiCensorshipConfig {
|
|||
#[serde(default)]
|
||||
pub tls_domains: Vec<String>,
|
||||
|
||||
/// Policy for TLS ClientHello with unknown (non-configured) SNI.
|
||||
#[serde(default)]
|
||||
pub unknown_sni_action: UnknownSniAction,
|
||||
|
||||
/// Upstream scope used for TLS front metadata fetches.
|
||||
/// Empty value keeps default upstream routing behavior.
|
||||
#[serde(default = "default_tls_fetch_scope")]
|
||||
pub tls_fetch_scope: String,
|
||||
|
||||
/// Fetch strategy for TLS front metadata bootstrap and periodic refresh.
|
||||
#[serde(default)]
|
||||
pub tls_fetch: TlsFetchConfig,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
|
|
@ -1440,6 +1681,14 @@ pub struct AntiCensorshipConfig {
|
|||
#[serde(default = "default_mask_shape_above_cap_blur_max_bytes")]
|
||||
pub mask_shape_above_cap_blur_max_bytes: usize,
|
||||
|
||||
/// Maximum bytes relayed per direction on unauthenticated masking fallback paths.
|
||||
#[serde(default = "default_mask_relay_max_bytes")]
|
||||
pub mask_relay_max_bytes: usize,
|
||||
|
||||
/// Prefetch timeout (ms) for extending fragmented masking classifier window.
|
||||
#[serde(default = "default_mask_classifier_prefetch_timeout_ms")]
|
||||
pub mask_classifier_prefetch_timeout_ms: u64,
|
||||
|
||||
/// Enable outcome-time normalization envelope for masking fallback.
|
||||
#[serde(default = "default_mask_timing_normalization_enabled")]
|
||||
pub mask_timing_normalization_enabled: bool,
|
||||
|
|
@ -1458,7 +1707,9 @@ impl Default for AntiCensorshipConfig {
|
|||
Self {
|
||||
tls_domain: default_tls_domain(),
|
||||
tls_domains: Vec::new(),
|
||||
unknown_sni_action: UnknownSniAction::Drop,
|
||||
tls_fetch_scope: default_tls_fetch_scope(),
|
||||
tls_fetch: TlsFetchConfig::default(),
|
||||
mask: default_true(),
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
|
|
@ -1478,6 +1729,8 @@ impl Default for AntiCensorshipConfig {
|
|||
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
|
||||
mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),
|
||||
mask_shape_above_cap_blur_max_bytes: default_mask_shape_above_cap_blur_max_bytes(),
|
||||
mask_relay_max_bytes: default_mask_relay_max_bytes(),
|
||||
mask_classifier_prefetch_timeout_ms: default_mask_classifier_prefetch_timeout_ms(),
|
||||
mask_timing_normalization_enabled: default_mask_timing_normalization_enabled(),
|
||||
mask_timing_normalization_floor_ms: default_mask_timing_normalization_floor_ms(),
|
||||
mask_timing_normalization_ceiling_ms: default_mask_timing_normalization_ceiling_ms(),
|
||||
|
|
@ -1497,6 +1750,12 @@ pub struct AccessConfig {
|
|||
#[serde(default)]
|
||||
pub user_max_tcp_conns: HashMap<String, usize>,
|
||||
|
||||
/// Global per-user TCP connection limit applied when a user has no
|
||||
/// positive individual override.
|
||||
/// `0` disables the inherited limit.
|
||||
#[serde(default = "default_user_max_tcp_conns_global_each")]
|
||||
pub user_max_tcp_conns_global_each: usize,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_expirations: HashMap<String, DateTime<Utc>>,
|
||||
|
||||
|
|
@ -1533,6 +1792,7 @@ impl Default for AccessConfig {
|
|||
users: default_access_users(),
|
||||
user_ad_tags: HashMap::new(),
|
||||
user_max_tcp_conns: HashMap::new(),
|
||||
user_max_tcp_conns_global_each: default_user_max_tcp_conns_global_each(),
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
user_max_unique_ips: HashMap::new(),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,755 @@
|
|||
use std::collections::BTreeSet;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::{ConntrackBackend, ConntrackMode, ProxyConfig};
|
||||
use crate::proxy::middle_relay::note_global_relay_pressure;
|
||||
use crate::proxy::shared_state::{ConntrackCloseEvent, ConntrackCloseReason, ProxySharedState};
|
||||
use crate::stats::Stats;
|
||||
|
||||
const CONNTRACK_EVENT_QUEUE_CAPACITY: usize = 32_768;
|
||||
const PRESSURE_RELEASE_TICKS: u8 = 3;
|
||||
const PRESSURE_SAMPLE_INTERVAL: Duration = Duration::from_secs(1);
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
enum NetfilterBackend {
|
||||
Nftables,
|
||||
Iptables,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
struct PressureSample {
|
||||
conn_pct: Option<u8>,
|
||||
fd_pct: Option<u8>,
|
||||
accept_timeout_delta: u64,
|
||||
me_queue_pressure_delta: u64,
|
||||
}
|
||||
|
||||
struct PressureState {
|
||||
active: bool,
|
||||
low_streak: u8,
|
||||
prev_accept_timeout_total: u64,
|
||||
prev_me_queue_pressure_total: u64,
|
||||
}
|
||||
|
||||
impl PressureState {
|
||||
fn new(stats: &Stats) -> Self {
|
||||
Self {
|
||||
active: false,
|
||||
low_streak: 0,
|
||||
prev_accept_timeout_total: stats.get_accept_permit_timeout_total(),
|
||||
prev_me_queue_pressure_total: stats.get_me_c2me_send_full_total(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn spawn_conntrack_controller(
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) {
|
||||
if !cfg!(target_os = "linux") {
|
||||
let enabled = config_rx
|
||||
.borrow()
|
||||
.server
|
||||
.conntrack_control
|
||||
.inline_conntrack_control;
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_event_queue_depth(0);
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
if enabled {
|
||||
warn!(
|
||||
"conntrack control is configured but unsupported on this OS; disabling runtime worker"
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel(CONNTRACK_EVENT_QUEUE_CAPACITY);
|
||||
shared.set_conntrack_close_sender(tx);
|
||||
tokio::spawn(async move {
|
||||
run_conntrack_controller(config_rx, stats, shared, rx).await;
|
||||
});
|
||||
}
|
||||
|
||||
async fn run_conntrack_controller(
|
||||
mut config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
stats: Arc<Stats>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
mut close_rx: mpsc::Receiver<ConntrackCloseEvent>,
|
||||
) {
|
||||
let mut cfg = config_rx.borrow().clone();
|
||||
let mut pressure_state = PressureState::new(stats.as_ref());
|
||||
let mut delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
let mut backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
|
||||
apply_runtime_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
backend.is_some(),
|
||||
false,
|
||||
);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
changed = config_rx.changed() => {
|
||||
if changed.is_err() {
|
||||
break;
|
||||
}
|
||||
cfg = config_rx.borrow_and_update().clone();
|
||||
backend = pick_backend(cfg.server.conntrack_control.backend);
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
apply_runtime_state(stats.as_ref(), shared.as_ref(), &cfg, backend.is_some(), pressure_state.active);
|
||||
reconcile_rules(&cfg, backend, stats.as_ref()).await;
|
||||
}
|
||||
event = close_rx.recv() => {
|
||||
let Some(event) = event else {
|
||||
break;
|
||||
};
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
continue;
|
||||
}
|
||||
if !pressure_state.active {
|
||||
continue;
|
||||
}
|
||||
if !matches!(event.reason, ConntrackCloseReason::Timeout | ConntrackCloseReason::Pressure | ConntrackCloseReason::Reset) {
|
||||
continue;
|
||||
}
|
||||
if delete_budget_tokens == 0 {
|
||||
continue;
|
||||
}
|
||||
stats.increment_conntrack_delete_attempt_total();
|
||||
match delete_conntrack_entry(event).await {
|
||||
DeleteOutcome::Deleted => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_success_total();
|
||||
}
|
||||
DeleteOutcome::NotFound => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_not_found_total();
|
||||
}
|
||||
DeleteOutcome::Error => {
|
||||
delete_budget_tokens = delete_budget_tokens.saturating_sub(1);
|
||||
stats.increment_conntrack_delete_error_total();
|
||||
}
|
||||
}
|
||||
}
|
||||
_ = tokio::time::sleep(PRESSURE_SAMPLE_INTERVAL) => {
|
||||
delete_budget_tokens = cfg.server.conntrack_control.delete_budget_per_sec;
|
||||
stats.set_conntrack_event_queue_depth(close_rx.len() as u64);
|
||||
let sample = collect_pressure_sample(stats.as_ref(), &cfg, &mut pressure_state);
|
||||
update_pressure_state(
|
||||
stats.as_ref(),
|
||||
shared.as_ref(),
|
||||
&cfg,
|
||||
&sample,
|
||||
&mut pressure_state,
|
||||
);
|
||||
if pressure_state.active {
|
||||
note_global_relay_pressure(shared.as_ref());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
shared.disable_conntrack_close_sender();
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
}
|
||||
|
||||
fn apply_runtime_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
backend_available: bool,
|
||||
pressure_active: bool,
|
||||
) {
|
||||
let enabled = cfg.server.conntrack_control.inline_conntrack_control;
|
||||
let available = enabled && backend_available && has_cap_net_admin();
|
||||
if enabled && !available {
|
||||
warn!(
|
||||
"conntrack control enabled but unavailable (missing CAP_NET_ADMIN or backend binaries)"
|
||||
);
|
||||
}
|
||||
stats.set_conntrack_control_enabled(enabled);
|
||||
stats.set_conntrack_control_available(available);
|
||||
shared.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
stats.set_conntrack_pressure_active(enabled && pressure_active);
|
||||
}
|
||||
|
||||
fn collect_pressure_sample(
|
||||
stats: &Stats,
|
||||
cfg: &ProxyConfig,
|
||||
state: &mut PressureState,
|
||||
) -> PressureSample {
|
||||
let current_connections = stats.get_current_connections_total();
|
||||
let conn_pct = if cfg.server.max_connections == 0 {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
((current_connections.saturating_mul(100)) / u64::from(cfg.server.max_connections))
|
||||
.min(100) as u8,
|
||||
)
|
||||
};
|
||||
|
||||
let fd_pct = fd_usage_pct();
|
||||
|
||||
let accept_total = stats.get_accept_permit_timeout_total();
|
||||
let accept_delta = accept_total.saturating_sub(state.prev_accept_timeout_total);
|
||||
state.prev_accept_timeout_total = accept_total;
|
||||
|
||||
let me_total = stats.get_me_c2me_send_full_total();
|
||||
let me_delta = me_total.saturating_sub(state.prev_me_queue_pressure_total);
|
||||
state.prev_me_queue_pressure_total = me_total;
|
||||
|
||||
PressureSample {
|
||||
conn_pct,
|
||||
fd_pct,
|
||||
accept_timeout_delta: accept_delta,
|
||||
me_queue_pressure_delta: me_delta,
|
||||
}
|
||||
}
|
||||
|
||||
fn update_pressure_state(
|
||||
stats: &Stats,
|
||||
shared: &ProxySharedState,
|
||||
cfg: &ProxyConfig,
|
||||
sample: &PressureSample,
|
||||
state: &mut PressureState,
|
||||
) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
if state.active {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated (feature disabled)");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let high = cfg.server.conntrack_control.pressure_high_watermark_pct;
|
||||
let low = cfg.server.conntrack_control.pressure_low_watermark_pct;
|
||||
|
||||
let high_hit = sample.conn_pct.is_some_and(|v| v >= high)
|
||||
|| sample.fd_pct.is_some_and(|v| v >= high)
|
||||
|| sample.accept_timeout_delta > 0
|
||||
|| sample.me_queue_pressure_delta > 0;
|
||||
|
||||
let low_clear = sample.conn_pct.is_none_or(|v| v <= low)
|
||||
&& sample.fd_pct.is_none_or(|v| v <= low)
|
||||
&& sample.accept_timeout_delta == 0
|
||||
&& sample.me_queue_pressure_delta == 0;
|
||||
|
||||
if !state.active && high_hit {
|
||||
state.active = true;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(true);
|
||||
stats.set_conntrack_pressure_active(true);
|
||||
info!(
|
||||
conn_pct = ?sample.conn_pct,
|
||||
fd_pct = ?sample.fd_pct,
|
||||
accept_timeout_delta = sample.accept_timeout_delta,
|
||||
me_queue_pressure_delta = sample.me_queue_pressure_delta,
|
||||
"Conntrack pressure mode activated"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if state.active && low_clear {
|
||||
state.low_streak = state.low_streak.saturating_add(1);
|
||||
if state.low_streak >= PRESSURE_RELEASE_TICKS {
|
||||
state.active = false;
|
||||
state.low_streak = 0;
|
||||
shared.set_conntrack_pressure_active(false);
|
||||
stats.set_conntrack_pressure_active(false);
|
||||
info!("Conntrack pressure mode deactivated");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
state.low_streak = 0;
|
||||
}
|
||||
|
||||
async fn reconcile_rules(cfg: &ProxyConfig, backend: Option<NetfilterBackend>, stats: &Stats) {
|
||||
if !cfg.server.conntrack_control.inline_conntrack_control {
|
||||
clear_notrack_rules_all_backends().await;
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
return;
|
||||
}
|
||||
|
||||
if !has_cap_net_admin() {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(backend) = backend else {
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
return;
|
||||
};
|
||||
|
||||
let apply_result = match backend {
|
||||
NetfilterBackend::Nftables => apply_nft_rules(cfg).await,
|
||||
NetfilterBackend::Iptables => apply_iptables_rules(cfg).await,
|
||||
};
|
||||
|
||||
if let Err(error) = apply_result {
|
||||
warn!(error = %error, "Failed to reconcile conntrack/notrack rules");
|
||||
stats.set_conntrack_rule_apply_ok(false);
|
||||
} else {
|
||||
stats.set_conntrack_rule_apply_ok(true);
|
||||
}
|
||||
}
|
||||
|
||||
fn pick_backend(configured: ConntrackBackend) -> Option<NetfilterBackend> {
|
||||
match configured {
|
||||
ConntrackBackend::Auto => {
|
||||
if command_exists("nft") {
|
||||
Some(NetfilterBackend::Nftables)
|
||||
} else if command_exists("iptables") {
|
||||
Some(NetfilterBackend::Iptables)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
ConntrackBackend::Nftables => command_exists("nft").then_some(NetfilterBackend::Nftables),
|
||||
ConntrackBackend::Iptables => {
|
||||
command_exists("iptables").then_some(NetfilterBackend::Iptables)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn command_exists(binary: &str) -> bool {
|
||||
let Some(path_var) = std::env::var_os("PATH") else {
|
||||
return false;
|
||||
};
|
||||
std::env::split_paths(&path_var).any(|dir| {
|
||||
let candidate: PathBuf = dir.join(binary);
|
||||
candidate.exists() && candidate.is_file()
|
||||
})
|
||||
}
|
||||
|
||||
fn notrack_targets(cfg: &ProxyConfig) -> (Vec<Option<IpAddr>>, Vec<Option<IpAddr>>) {
|
||||
let mode = cfg.server.conntrack_control.mode;
|
||||
let mut v4_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
let mut v6_targets: BTreeSet<Option<IpAddr>> = BTreeSet::new();
|
||||
|
||||
match mode {
|
||||
ConntrackMode::Tracked => {}
|
||||
ConntrackMode::Notrack => {
|
||||
if cfg.server.listeners.is_empty() {
|
||||
if let Some(ipv4) = cfg
|
||||
.server
|
||||
.listen_addr_ipv4
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv4.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(ipv4));
|
||||
}
|
||||
}
|
||||
if let Some(ipv6) = cfg
|
||||
.server
|
||||
.listen_addr_ipv6
|
||||
.as_ref()
|
||||
.and_then(|s| s.parse::<IpAddr>().ok())
|
||||
{
|
||||
if ipv6.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(ipv6));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for listener in &cfg.server.listeners {
|
||||
if listener.ip.is_ipv4() {
|
||||
if listener.ip.is_unspecified() {
|
||||
v4_targets.insert(None);
|
||||
} else {
|
||||
v4_targets.insert(Some(listener.ip));
|
||||
}
|
||||
} else if listener.ip.is_unspecified() {
|
||||
v6_targets.insert(None);
|
||||
} else {
|
||||
v6_targets.insert(Some(listener.ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
ConntrackMode::Hybrid => {
|
||||
for ip in &cfg.server.conntrack_control.hybrid_listener_ips {
|
||||
if ip.is_ipv4() {
|
||||
v4_targets.insert(Some(*ip));
|
||||
} else {
|
||||
v6_targets.insert(Some(*ip));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
(
|
||||
v4_targets.into_iter().collect(),
|
||||
v6_targets.into_iter().collect(),
|
||||
)
|
||||
}
|
||||
|
||||
async fn apply_nft_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let mut rules = Vec::new();
|
||||
for ip in v4_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
for ip in v6_targets {
|
||||
let rule = if let Some(ip) = ip {
|
||||
format!("tcp dport {} ip6 daddr {} notrack", cfg.server.port, ip)
|
||||
} else {
|
||||
format!("tcp dport {} notrack", cfg.server.port)
|
||||
};
|
||||
rules.push(rule);
|
||||
}
|
||||
|
||||
let rule_blob = if rules.is_empty() {
|
||||
String::new()
|
||||
} else {
|
||||
format!(" {}\n", rules.join("\n "))
|
||||
};
|
||||
let script = format!(
|
||||
"table inet telemt_conntrack {{\n chain preraw {{\n type filter hook prerouting priority raw; policy accept;\n{rule_blob} }}\n}}\n"
|
||||
);
|
||||
run_command("nft", &["-f", "-"], Some(script)).await
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules(cfg: &ProxyConfig) -> Result<(), String> {
|
||||
apply_iptables_rules_for_binary("iptables", cfg, true).await?;
|
||||
apply_iptables_rules_for_binary("ip6tables", cfg, false).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_iptables_rules_for_binary(
|
||||
binary: &str,
|
||||
cfg: &ProxyConfig,
|
||||
ipv4: bool,
|
||||
) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Ok(());
|
||||
}
|
||||
let chain = "TELEMT_NOTRACK";
|
||||
let _ = run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-F", chain], None).await;
|
||||
let _ = run_command(binary, &["-t", "raw", "-X", chain], None).await;
|
||||
|
||||
if matches!(cfg.server.conntrack_control.mode, ConntrackMode::Tracked) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
run_command(binary, &["-t", "raw", "-N", chain], None).await?;
|
||||
run_command(binary, &["-t", "raw", "-F", chain], None).await?;
|
||||
if run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-C", "PREROUTING", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
run_command(
|
||||
binary,
|
||||
&["-t", "raw", "-I", "PREROUTING", "1", "-j", chain],
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let (v4_targets, v6_targets) = notrack_targets(cfg);
|
||||
let selected = if ipv4 { v4_targets } else { v6_targets };
|
||||
for ip in selected {
|
||||
let mut args = vec![
|
||||
"-t".to_string(),
|
||||
"raw".to_string(),
|
||||
"-A".to_string(),
|
||||
chain.to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"--dport".to_string(),
|
||||
cfg.server.port.to_string(),
|
||||
];
|
||||
if let Some(ip) = ip {
|
||||
args.push("-d".to_string());
|
||||
args.push(ip.to_string());
|
||||
}
|
||||
args.push("-j".to_string());
|
||||
args.push("CT".to_string());
|
||||
args.push("--notrack".to_string());
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
run_command(binary, &arg_refs, None).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn clear_notrack_rules_all_backends() {
|
||||
let _ = run_command(
|
||||
"nft",
|
||||
&["delete", "table", "inet", "telemt_conntrack"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command(
|
||||
"iptables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("iptables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command(
|
||||
"ip6tables",
|
||||
&["-t", "raw", "-D", "PREROUTING", "-j", "TELEMT_NOTRACK"],
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-F", "TELEMT_NOTRACK"], None).await;
|
||||
let _ = run_command("ip6tables", &["-t", "raw", "-X", "TELEMT_NOTRACK"], None).await;
|
||||
}
|
||||
|
||||
enum DeleteOutcome {
|
||||
Deleted,
|
||||
NotFound,
|
||||
Error,
|
||||
}
|
||||
|
||||
async fn delete_conntrack_entry(event: ConntrackCloseEvent) -> DeleteOutcome {
|
||||
if !command_exists("conntrack") {
|
||||
return DeleteOutcome::Error;
|
||||
}
|
||||
let args = vec![
|
||||
"-D".to_string(),
|
||||
"-p".to_string(),
|
||||
"tcp".to_string(),
|
||||
"-s".to_string(),
|
||||
event.src.ip().to_string(),
|
||||
"--sport".to_string(),
|
||||
event.src.port().to_string(),
|
||||
"-d".to_string(),
|
||||
event.dst.ip().to_string(),
|
||||
"--dport".to_string(),
|
||||
event.dst.port().to_string(),
|
||||
];
|
||||
let arg_refs: Vec<&str> = args.iter().map(String::as_str).collect();
|
||||
match run_command("conntrack", &arg_refs, None).await {
|
||||
Ok(()) => DeleteOutcome::Deleted,
|
||||
Err(error) => {
|
||||
if error.contains("0 flow entries have been deleted") {
|
||||
DeleteOutcome::NotFound
|
||||
} else {
|
||||
debug!(error = %error, "conntrack delete failed");
|
||||
DeleteOutcome::Error
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_command(binary: &str, args: &[&str], stdin: Option<String>) -> Result<(), String> {
|
||||
if !command_exists(binary) {
|
||||
return Err(format!("{binary} is not available"));
|
||||
}
|
||||
let mut command = Command::new(binary);
|
||||
command.args(args);
|
||||
if stdin.is_some() {
|
||||
command.stdin(std::process::Stdio::piped());
|
||||
}
|
||||
command.stdout(std::process::Stdio::null());
|
||||
command.stderr(std::process::Stdio::piped());
|
||||
let mut child = command
|
||||
.spawn()
|
||||
.map_err(|e| format!("spawn {binary} failed: {e}"))?;
|
||||
if let Some(blob) = stdin
|
||||
&& let Some(mut writer) = child.stdin.take()
|
||||
{
|
||||
writer
|
||||
.write_all(blob.as_bytes())
|
||||
.await
|
||||
.map_err(|e| format!("stdin write {binary} failed: {e}"))?;
|
||||
}
|
||||
let output = child
|
||||
.wait_with_output()
|
||||
.await
|
||||
.map_err(|e| format!("wait {binary} failed: {e}"))?;
|
||||
if output.status.success() {
|
||||
return Ok(());
|
||||
}
|
||||
let stderr = String::from_utf8_lossy(&output.stderr).trim().to_string();
|
||||
Err(if stderr.is_empty() {
|
||||
format!("{binary} exited with status {}", output.status)
|
||||
} else {
|
||||
stderr
|
||||
})
|
||||
}
|
||||
|
||||
fn fd_usage_pct() -> Option<u8> {
|
||||
let soft_limit = nofile_soft_limit()?;
|
||||
if soft_limit == 0 {
|
||||
return None;
|
||||
}
|
||||
let fd_count = std::fs::read_dir("/proc/self/fd").ok()?.count() as u64;
|
||||
Some(((fd_count.saturating_mul(100)) / soft_limit).min(100) as u8)
|
||||
}
|
||||
|
||||
fn nofile_soft_limit() -> Option<u64> {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let mut lim = libc::rlimit {
|
||||
rlim_cur: 0,
|
||||
rlim_max: 0,
|
||||
};
|
||||
let rc = unsafe { libc::getrlimit(libc::RLIMIT_NOFILE, &mut lim) };
|
||||
if rc != 0 {
|
||||
return None;
|
||||
}
|
||||
return Some(lim.rlim_cur);
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
fn has_cap_net_admin() -> bool {
|
||||
#[cfg(target_os = "linux")]
|
||||
{
|
||||
let Ok(status) = std::fs::read_to_string("/proc/self/status") else {
|
||||
return false;
|
||||
};
|
||||
for line in status.lines() {
|
||||
if let Some(raw) = line.strip_prefix("CapEff:") {
|
||||
let caps = raw.trim();
|
||||
if let Ok(bits) = u64::from_str_radix(caps, 16) {
|
||||
const CAP_NET_ADMIN_BIT: u64 = 12;
|
||||
return (bits & (1u64 << CAP_NET_ADMIN_BIT)) != 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
{
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
|
||||
#[test]
|
||||
fn pressure_activates_on_accept_timeout_spike() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 1,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(state.active);
|
||||
assert!(shared.conntrack_pressure_active());
|
||||
assert!(stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_releases_after_hysteresis_window() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = true;
|
||||
let mut state = PressureState::new(&stats);
|
||||
|
||||
let high_sample = PressureSample {
|
||||
conn_pct: Some(95),
|
||||
fd_pct: Some(95),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &high_sample, &mut state);
|
||||
assert!(state.active);
|
||||
|
||||
let low_sample = PressureSample {
|
||||
conn_pct: Some(10),
|
||||
fd_pct: Some(10),
|
||||
accept_timeout_delta: 0,
|
||||
me_queue_pressure_delta: 0,
|
||||
};
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
assert!(state.active);
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &low_sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pressure_does_not_activate_when_disabled() {
|
||||
let stats = Stats::new();
|
||||
let shared = ProxySharedState::new();
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.conntrack_control.inline_conntrack_control = false;
|
||||
let mut state = PressureState::new(&stats);
|
||||
let sample = PressureSample {
|
||||
conn_pct: Some(100),
|
||||
fd_pct: Some(100),
|
||||
accept_timeout_delta: 10,
|
||||
me_queue_pressure_delta: 10,
|
||||
};
|
||||
|
||||
update_pressure_state(&stats, shared.as_ref(), &cfg, &sample, &mut state);
|
||||
|
||||
assert!(!state.active);
|
||||
assert!(!shared.conntrack_pressure_active());
|
||||
assert!(!stats.get_conntrack_pressure_active());
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,541 @@
|
|||
//! Unix daemon support for telemt.
|
||||
//!
|
||||
//! Provides classic Unix daemonization (double-fork), PID file management,
|
||||
//! and privilege dropping for running telemt as a background service.
|
||||
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Read, Write};
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use nix::fcntl::{Flock, FlockArg};
|
||||
use nix::unistd::{self, ForkResult, Gid, Pid, Uid, chdir, close, fork, getpid, setsid};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// Default PID file location.
|
||||
pub const DEFAULT_PID_FILE: &str = "/var/run/telemt.pid";
|
||||
|
||||
/// Daemon configuration options parsed from CLI.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DaemonOptions {
|
||||
/// Run as daemon (fork to background).
|
||||
pub daemonize: bool,
|
||||
/// Path to PID file.
|
||||
pub pid_file: Option<PathBuf>,
|
||||
/// User to run as after binding sockets.
|
||||
pub user: Option<String>,
|
||||
/// Group to run as after binding sockets.
|
||||
pub group: Option<String>,
|
||||
/// Working directory for the daemon.
|
||||
pub working_dir: Option<PathBuf>,
|
||||
/// Explicit foreground mode (for systemd Type=simple).
|
||||
pub foreground: bool,
|
||||
}
|
||||
|
||||
impl DaemonOptions {
|
||||
/// Returns the effective PID file path.
|
||||
pub fn pid_file_path(&self) -> &Path {
|
||||
self.pid_file
|
||||
.as_deref()
|
||||
.unwrap_or(Path::new(DEFAULT_PID_FILE))
|
||||
}
|
||||
|
||||
/// Returns true if we should actually daemonize.
|
||||
/// Foreground flag takes precedence.
|
||||
pub fn should_daemonize(&self) -> bool {
|
||||
self.daemonize && !self.foreground
|
||||
}
|
||||
}
|
||||
|
||||
/// Error types for daemon operations.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum DaemonError {
|
||||
#[error("fork failed: {0}")]
|
||||
ForkFailed(#[source] nix::Error),
|
||||
|
||||
#[error("setsid failed: {0}")]
|
||||
SetsidFailed(#[source] nix::Error),
|
||||
|
||||
#[error("chdir failed: {0}")]
|
||||
ChdirFailed(#[source] nix::Error),
|
||||
|
||||
#[error("failed to open /dev/null: {0}")]
|
||||
DevNullFailed(#[source] io::Error),
|
||||
|
||||
#[error("failed to redirect stdio: {0}")]
|
||||
RedirectFailed(#[source] nix::Error),
|
||||
|
||||
#[error("PID file error: {0}")]
|
||||
PidFile(String),
|
||||
|
||||
#[error("another instance is already running (pid {0})")]
|
||||
AlreadyRunning(i32),
|
||||
|
||||
#[error("user '{0}' not found")]
|
||||
UserNotFound(String),
|
||||
|
||||
#[error("group '{0}' not found")]
|
||||
GroupNotFound(String),
|
||||
|
||||
#[error("failed to set uid/gid: {0}")]
|
||||
PrivilegeDrop(#[source] nix::Error),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
}
|
||||
|
||||
/// Result of a successful daemonize() call.
|
||||
#[derive(Debug)]
|
||||
pub enum DaemonizeResult {
|
||||
/// We are the parent process and should exit.
|
||||
Parent,
|
||||
/// We are the daemon child process and should continue.
|
||||
Child,
|
||||
}
|
||||
|
||||
/// Performs classic Unix double-fork daemonization.
|
||||
///
|
||||
/// This detaches the process from the controlling terminal:
|
||||
/// 1. First fork - parent exits, child continues
|
||||
/// 2. setsid() - become session leader
|
||||
/// 3. Second fork - ensure we can never acquire a controlling terminal
|
||||
/// 4. chdir("/") - don't hold any directory open
|
||||
/// 5. Redirect stdin/stdout/stderr to /dev/null
|
||||
///
|
||||
/// Returns `DaemonizeResult::Parent` in the original parent (which should exit),
|
||||
/// or `DaemonizeResult::Child` in the final daemon child.
|
||||
pub fn daemonize(working_dir: Option<&Path>) -> Result<DaemonizeResult, DaemonError> {
|
||||
// First fork
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Parent exits
|
||||
return Ok(DaemonizeResult::Parent);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Create new session, become session leader
|
||||
setsid().map_err(DaemonError::SetsidFailed)?;
|
||||
|
||||
// Second fork to ensure we can never acquire a controlling terminal
|
||||
match unsafe { fork() } {
|
||||
Ok(ForkResult::Parent { .. }) => {
|
||||
// Intermediate parent exits
|
||||
std::process::exit(0);
|
||||
}
|
||||
Ok(ForkResult::Child) => {
|
||||
// Final daemon child continues
|
||||
}
|
||||
Err(e) => return Err(DaemonError::ForkFailed(e)),
|
||||
}
|
||||
|
||||
// Change working directory
|
||||
let target_dir = working_dir.unwrap_or(Path::new("/"));
|
||||
chdir(target_dir).map_err(DaemonError::ChdirFailed)?;
|
||||
|
||||
// Redirect stdin, stdout, stderr to /dev/null
|
||||
redirect_stdio_to_devnull()?;
|
||||
|
||||
Ok(DaemonizeResult::Child)
|
||||
}
|
||||
|
||||
/// Redirects stdin, stdout, and stderr to /dev/null.
|
||||
fn redirect_stdio_to_devnull() -> Result<(), DaemonError> {
|
||||
let devnull = File::options()
|
||||
.read(true)
|
||||
.write(true)
|
||||
.open("/dev/null")
|
||||
.map_err(DaemonError::DevNullFailed)?;
|
||||
|
||||
let devnull_fd = std::os::unix::io::AsRawFd::as_raw_fd(&devnull);
|
||||
|
||||
// Use libc::dup2 directly for redirecting standard file descriptors
|
||||
// nix 0.31's dup2 requires OwnedFd which doesn't work well with stdio fds
|
||||
unsafe {
|
||||
// Redirect stdin (fd 0)
|
||||
if libc::dup2(devnull_fd, 0) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stdout (fd 1)
|
||||
if libc::dup2(devnull_fd, 1) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
// Redirect stderr (fd 2)
|
||||
if libc::dup2(devnull_fd, 2) < 0 {
|
||||
return Err(DaemonError::RedirectFailed(nix::errno::Errno::last()));
|
||||
}
|
||||
}
|
||||
|
||||
// Close original devnull fd if it's not one of the standard fds
|
||||
if devnull_fd > 2 {
|
||||
let _ = close(devnull_fd);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// PID file manager with flock-based locking.
|
||||
pub struct PidFile {
|
||||
path: PathBuf,
|
||||
file: Option<File>,
|
||||
locked: bool,
|
||||
}
|
||||
|
||||
impl PidFile {
|
||||
/// Creates a new PID file manager for the given path.
|
||||
pub fn new<P: AsRef<Path>>(path: P) -> Self {
|
||||
Self {
|
||||
path: path.as_ref().to_path_buf(),
|
||||
file: None,
|
||||
locked: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if another instance is already running.
|
||||
///
|
||||
/// Returns the PID of the running instance if one exists.
|
||||
pub fn check_running(&self) -> Result<Option<i32>, DaemonError> {
|
||||
if !self.path.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Try to read existing PID
|
||||
let mut contents = String::new();
|
||||
File::open(&self.path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot read {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
let pid: i32 = contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", self.path.display())))?;
|
||||
|
||||
// Check if process is still running
|
||||
if is_process_running(pid) {
|
||||
Ok(Some(pid))
|
||||
} else {
|
||||
// Stale PID file
|
||||
debug!(pid, path = %self.path.display(), "Removing stale PID file");
|
||||
let _ = fs::remove_file(&self.path);
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Acquires the PID file lock and writes the current PID.
|
||||
///
|
||||
/// Fails if another instance is already running.
|
||||
pub fn acquire(&mut self) -> Result<(), DaemonError> {
|
||||
// Check for running instance first
|
||||
if let Some(pid) = self.check_running()? {
|
||||
return Err(DaemonError::AlreadyRunning(pid));
|
||||
}
|
||||
|
||||
// Ensure parent directory exists
|
||||
if let Some(parent) = self.path.parent() {
|
||||
if !parent.exists() {
|
||||
fs::create_dir_all(parent).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot create directory {}: {}",
|
||||
parent.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
||||
// Open/create PID file with exclusive lock
|
||||
let file = OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(true)
|
||||
.mode(0o644)
|
||||
.open(&self.path)
|
||||
.map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot open {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
|
||||
// Try to acquire exclusive lock (non-blocking)
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
// Check if another instance grabbed the lock
|
||||
if let Some(pid) = self.check_running().ok().flatten() {
|
||||
DaemonError::AlreadyRunning(pid)
|
||||
} else {
|
||||
DaemonError::PidFile(format!("cannot lock {}: {}", self.path.display(), errno))
|
||||
}
|
||||
})?;
|
||||
|
||||
// Write our PID
|
||||
let pid = getpid();
|
||||
let mut file = flock
|
||||
.unlock()
|
||||
.map_err(|(_, errno)| DaemonError::PidFile(format!("unlock failed: {}", errno)))?;
|
||||
|
||||
writeln!(file, "{}", pid).map_err(|e| {
|
||||
DaemonError::PidFile(format!(
|
||||
"cannot write PID to {}: {}",
|
||||
self.path.display(),
|
||||
e
|
||||
))
|
||||
})?;
|
||||
|
||||
// Re-acquire lock and keep it
|
||||
let flock = Flock::lock(file, FlockArg::LockExclusiveNonblock).map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("cannot re-lock {}: {}", self.path.display(), errno))
|
||||
})?;
|
||||
|
||||
self.file = Some(flock.unlock().map_err(|(_, errno)| {
|
||||
DaemonError::PidFile(format!("unlock for storage failed: {}", errno))
|
||||
})?);
|
||||
self.locked = true;
|
||||
|
||||
info!(pid = pid.as_raw(), path = %self.path.display(), "PID file created");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Releases the PID file lock and removes the file.
|
||||
pub fn release(&mut self) -> Result<(), DaemonError> {
|
||||
if let Some(file) = self.file.take() {
|
||||
drop(file);
|
||||
}
|
||||
self.locked = false;
|
||||
|
||||
if self.path.exists() {
|
||||
fs::remove_file(&self.path).map_err(|e| {
|
||||
DaemonError::PidFile(format!("cannot remove {}: {}", self.path.display(), e))
|
||||
})?;
|
||||
debug!(path = %self.path.display(), "PID file removed");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the path to this PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn path(&self) -> &Path {
|
||||
&self.path
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PidFile {
|
||||
fn drop(&mut self) {
|
||||
if self.locked {
|
||||
if let Err(e) = self.release() {
|
||||
warn!(error = %e, "Failed to clean up PID file on drop");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a process with the given PID is running.
|
||||
fn is_process_running(pid: i32) -> bool {
|
||||
// kill(pid, 0) checks if process exists without sending a signal
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), None).is_ok()
|
||||
}
|
||||
|
||||
/// Drops privileges to the specified user and group.
|
||||
///
|
||||
/// This should be called after binding privileged ports but before
|
||||
/// entering the main event loop.
|
||||
pub fn drop_privileges(user: Option<&str>, group: Option<&str>) -> Result<(), DaemonError> {
|
||||
// Look up group first (need to do this while still root)
|
||||
let target_gid = if let Some(group_name) = group {
|
||||
Some(lookup_group(group_name)?)
|
||||
} else if let Some(user_name) = user {
|
||||
// If no group specified but user is, use user's primary group
|
||||
Some(lookup_user_primary_gid(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Look up user
|
||||
let target_uid = if let Some(user_name) = user {
|
||||
Some(lookup_user(user_name)?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Drop privileges: set GID first, then UID
|
||||
// (Setting UID first would prevent us from setting GID)
|
||||
if let Some(gid) = target_gid {
|
||||
unistd::setgid(gid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
// Also set supplementary groups to just this one
|
||||
unistd::setgroups(&[gid]).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(gid = gid.as_raw(), "Dropped group privileges");
|
||||
}
|
||||
|
||||
if let Some(uid) = target_uid {
|
||||
unistd::setuid(uid).map_err(DaemonError::PrivilegeDrop)?;
|
||||
info!(uid = uid.as_raw(), "Dropped user privileges");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Looks up a user by name and returns their UID.
|
||||
fn lookup_user(name: &str) -> Result<Uid, DaemonError> {
|
||||
// Use libc getpwnam
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Uid::from_raw((*pwd).pw_uid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a user's primary GID by username.
|
||||
fn lookup_user_primary_gid(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::UserNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let pwd = libc::getpwnam(c_name.as_ptr());
|
||||
if pwd.is_null() {
|
||||
Err(DaemonError::UserNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*pwd).pw_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Looks up a group by name and returns its GID.
|
||||
fn lookup_group(name: &str) -> Result<Gid, DaemonError> {
|
||||
let c_name =
|
||||
std::ffi::CString::new(name).map_err(|_| DaemonError::GroupNotFound(name.to_string()))?;
|
||||
|
||||
unsafe {
|
||||
let grp = libc::getgrnam(c_name.as_ptr());
|
||||
if grp.is_null() {
|
||||
Err(DaemonError::GroupNotFound(name.to_string()))
|
||||
} else {
|
||||
Ok(Gid::from_raw((*grp).gr_gid))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Reads PID from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn read_pid_file<P: AsRef<Path>>(path: P) -> Result<i32, DaemonError> {
|
||||
let path = path.as_ref();
|
||||
let mut contents = String::new();
|
||||
File::open(path)
|
||||
.and_then(|mut f| f.read_to_string(&mut contents))
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot read {}: {}", path.display(), e)))?;
|
||||
|
||||
contents
|
||||
.trim()
|
||||
.parse()
|
||||
.map_err(|_| DaemonError::PidFile(format!("invalid PID in {}", path.display())))
|
||||
}
|
||||
|
||||
/// Sends a signal to the process specified in a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn signal_pid_file<P: AsRef<Path>>(
|
||||
path: P,
|
||||
signal: nix::sys::signal::Signal,
|
||||
) -> Result<(), DaemonError> {
|
||||
let pid = read_pid_file(&path)?;
|
||||
|
||||
if !is_process_running(pid) {
|
||||
return Err(DaemonError::PidFile(format!(
|
||||
"process {} from {} is not running",
|
||||
pid,
|
||||
path.as_ref().display()
|
||||
)));
|
||||
}
|
||||
|
||||
nix::sys::signal::kill(Pid::from_raw(pid), signal)
|
||||
.map_err(|e| DaemonError::PidFile(format!("cannot signal process {}: {}", pid, e)))?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns the status of the daemon based on PID file.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DaemonStatus {
|
||||
/// Daemon is running with the given PID.
|
||||
Running(i32),
|
||||
/// PID file exists but process is not running.
|
||||
Stale(i32),
|
||||
/// No PID file exists.
|
||||
NotRunning,
|
||||
}
|
||||
|
||||
/// Checks the daemon status from a PID file.
|
||||
#[allow(dead_code)]
|
||||
pub fn check_status<P: AsRef<Path>>(path: P) -> DaemonStatus {
|
||||
let path = path.as_ref();
|
||||
|
||||
if !path.exists() {
|
||||
return DaemonStatus::NotRunning;
|
||||
}
|
||||
|
||||
match read_pid_file(path) {
|
||||
Ok(pid) => {
|
||||
if is_process_running(pid) {
|
||||
DaemonStatus::Running(pid)
|
||||
} else {
|
||||
DaemonStatus::Stale(pid)
|
||||
}
|
||||
}
|
||||
Err(_) => DaemonStatus::NotRunning,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_default() {
|
||||
let opts = DaemonOptions::default();
|
||||
assert!(!opts.daemonize);
|
||||
assert!(!opts.should_daemonize());
|
||||
assert_eq!(opts.pid_file_path(), Path::new(DEFAULT_PID_FILE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_daemon_options_foreground_overrides() {
|
||||
let opts = DaemonOptions {
|
||||
daemonize: true,
|
||||
foreground: true,
|
||||
..Default::default()
|
||||
};
|
||||
assert!(!opts.should_daemonize());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_status_not_running() {
|
||||
let path = "/tmp/telemt_test_nonexistent.pid";
|
||||
assert_eq!(check_status(path), DaemonStatus::NotRunning);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pid_file_basic() {
|
||||
let path = "/tmp/telemt_test_pidfile.pid";
|
||||
let _ = fs::remove_file(path);
|
||||
|
||||
let mut pf = PidFile::new(path);
|
||||
assert!(pf.check_running().unwrap().is_none());
|
||||
|
||||
pf.acquire().unwrap();
|
||||
assert!(Path::new(path).exists());
|
||||
|
||||
// Read it back
|
||||
let pid = read_pid_file(path).unwrap();
|
||||
assert_eq!(pid, std::process::id() as i32);
|
||||
|
||||
pf.release().unwrap();
|
||||
assert!(!Path::new(path).exists());
|
||||
}
|
||||
}
|
||||
|
|
@ -216,6 +216,9 @@ pub enum ProxyError {
|
|||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
#[error("Unknown TLS SNI")]
|
||||
UnknownTlsSni,
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
|
|
|
|||
|
|
@ -26,6 +26,15 @@ pub struct UserIpTracker {
|
|||
cleanup_drain_lock: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct UserIpTrackerMemoryStats {
|
||||
pub active_users: usize,
|
||||
pub recent_users: usize,
|
||||
pub active_entries: usize,
|
||||
pub recent_entries: usize,
|
||||
pub cleanup_queue_len: usize,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
|
|
@ -141,6 +150,13 @@ impl UserIpTracker {
|
|||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
|
||||
for user_recent in recent_ips.values_mut() {
|
||||
Self::prune_recent(user_recent, now, window);
|
||||
}
|
||||
|
||||
let mut users =
|
||||
Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
|
|
@ -166,6 +182,26 @@ impl UserIpTracker {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn memory_stats(&self) -> UserIpTrackerMemoryStats {
|
||||
let cleanup_queue_len = self
|
||||
.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len();
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
let active_entries = active_ips.values().map(HashMap::len).sum();
|
||||
let recent_entries = recent_ips.values().map(HashMap::len).sum();
|
||||
|
||||
UserIpTrackerMemoryStats {
|
||||
active_users: active_ips.len(),
|
||||
recent_users: recent_ips.len(),
|
||||
active_entries,
|
||||
recent_entries,
|
||||
cleanup_queue_len,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_limit_policy(&self, mode: UserMaxUniqueIpsMode, window_secs: u64) {
|
||||
{
|
||||
let mut current_mode = self.limit_mode.write().await;
|
||||
|
|
@ -451,6 +487,7 @@ impl Default for UserIpTracker {
|
|||
mod tests {
|
||||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
fn test_ipv4(oct1: u8, oct2: u8, oct3: u8, oct4: u8) -> IpAddr {
|
||||
IpAddr::V4(Ipv4Addr::new(oct1, oct2, oct3, oct4))
|
||||
|
|
@ -764,4 +801,54 @@ mod tests {
|
|||
tokio::time::sleep(Duration::from_millis(1100)).await;
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_memory_stats_reports_queue_and_entry_counts() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.set_user_limit("test_user", 4).await;
|
||||
let ip1 = test_ipv4(10, 2, 0, 1);
|
||||
let ip2 = test_ipv4(10, 2, 0, 2);
|
||||
|
||||
tracker.check_and_add("test_user", ip1).await.unwrap();
|
||||
tracker.check_and_add("test_user", ip2).await.unwrap();
|
||||
tracker.enqueue_cleanup("test_user".to_string(), ip1);
|
||||
|
||||
let snapshot = tracker.memory_stats().await;
|
||||
assert_eq!(snapshot.active_users, 1);
|
||||
assert_eq!(snapshot.recent_users, 1);
|
||||
assert_eq!(snapshot.active_entries, 2);
|
||||
assert_eq!(snapshot.recent_entries, 2);
|
||||
assert_eq!(snapshot.cleanup_queue_len, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_compact_prunes_stale_recent_entries() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker
|
||||
.set_limit_policy(UserMaxUniqueIpsMode::TimeWindow, 1)
|
||||
.await;
|
||||
|
||||
let stale_user = "stale-user".to_string();
|
||||
let stale_ip = test_ipv4(10, 3, 0, 1);
|
||||
{
|
||||
let mut recent_ips = tracker.recent_ips.write().await;
|
||||
recent_ips
|
||||
.entry(stale_user.clone())
|
||||
.or_insert_with(HashMap::new)
|
||||
.insert(stale_ip, Instant::now() - Duration::from_secs(5));
|
||||
}
|
||||
|
||||
tracker.last_compact_epoch_secs.store(0, Ordering::Relaxed);
|
||||
tracker
|
||||
.check_and_add("trigger-user", test_ipv4(10, 3, 0, 2))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let recent_ips = tracker.recent_ips.read().await;
|
||||
let stale_exists = recent_ips
|
||||
.get(&stale_user)
|
||||
.map(|ips| ips.contains_key(&stale_ip))
|
||||
.unwrap_or(false);
|
||||
assert!(!stale_exists);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,343 @@
|
|||
//! Logging configuration for telemt.
|
||||
//!
|
||||
//! Supports multiple log destinations:
|
||||
//! - stderr (default, works with systemd journald)
|
||||
//! - syslog (Unix only, for traditional init systems)
|
||||
//! - file (with optional rotation)
|
||||
|
||||
#![allow(dead_code)] // Infrastructure module - used via CLI flags
|
||||
|
||||
use std::path::Path;
|
||||
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
use tracing_subscriber::{EnvFilter, fmt, reload};
|
||||
|
||||
/// Log destination configuration.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LogDestination {
|
||||
/// Log to stderr (default, captured by systemd journald).
|
||||
#[default]
|
||||
Stderr,
|
||||
/// Log to syslog (Unix only).
|
||||
#[cfg(unix)]
|
||||
Syslog,
|
||||
/// Log to a file with optional rotation.
|
||||
File {
|
||||
path: String,
|
||||
/// Rotate daily if true.
|
||||
rotate_daily: bool,
|
||||
},
|
||||
}
|
||||
|
||||
/// Logging options parsed from CLI/config.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LoggingOptions {
|
||||
/// Where to send logs.
|
||||
pub destination: LogDestination,
|
||||
/// Disable ANSI colors.
|
||||
pub disable_colors: bool,
|
||||
}
|
||||
|
||||
/// Guard that must be held to keep file logging active.
|
||||
/// When dropped, flushes and closes log files.
|
||||
pub struct LoggingGuard {
|
||||
_guard: Option<tracing_appender::non_blocking::WorkerGuard>,
|
||||
}
|
||||
|
||||
impl LoggingGuard {
|
||||
fn new(guard: Option<tracing_appender::non_blocking::WorkerGuard>) -> Self {
|
||||
Self { _guard: guard }
|
||||
}
|
||||
|
||||
/// Creates a no-op guard for stderr/syslog logging.
|
||||
pub fn noop() -> Self {
|
||||
Self { _guard: None }
|
||||
}
|
||||
}
|
||||
|
||||
/// Initialize the tracing subscriber with the specified options.
|
||||
///
|
||||
/// Returns a reload handle for dynamic log level changes and a guard
|
||||
/// that must be kept alive for file logging.
|
||||
pub fn init_logging(
|
||||
opts: &LoggingOptions,
|
||||
initial_filter: &str,
|
||||
) -> (
|
||||
reload::Handle<EnvFilter, impl tracing::Subscriber + Send + Sync>,
|
||||
LoggingGuard,
|
||||
) {
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new(initial_filter));
|
||||
|
||||
match &opts.destination {
|
||||
LogDestination::Stderr => {
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(!opts.disable_colors)
|
||||
.with_target(true);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
LogDestination::Syslog => {
|
||||
// Use a custom fmt layer that writes to syslog
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(false)
|
||||
.with_level(false)
|
||||
.without_time()
|
||||
.with_writer(SyslogMakeWriter::new());
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::noop())
|
||||
}
|
||||
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
let (non_blocking, guard) = if *rotate_daily {
|
||||
// Extract directory and filename prefix
|
||||
let path = Path::new(path);
|
||||
let dir = path.parent().unwrap_or(Path::new("/var/log"));
|
||||
let prefix = path
|
||||
.file_name()
|
||||
.and_then(|s| s.to_str())
|
||||
.unwrap_or("telemt");
|
||||
|
||||
let file_appender = tracing_appender::rolling::daily(dir, prefix);
|
||||
tracing_appender::non_blocking(file_appender)
|
||||
} else {
|
||||
let file = std::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(path)
|
||||
.expect("Failed to open log file");
|
||||
tracing_appender::non_blocking(file)
|
||||
};
|
||||
|
||||
let fmt_layer = fmt::Layer::default()
|
||||
.with_ansi(false)
|
||||
.with_target(true)
|
||||
.with_writer(non_blocking);
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
|
||||
(filter_handle, LoggingGuard::new(Some(guard)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Syslog writer for tracing.
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogMakeWriter;
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Clone, Copy)]
|
||||
struct SyslogWriter {
|
||||
priority: libc::c_int,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl SyslogMakeWriter {
|
||||
fn new() -> Self {
|
||||
// Open syslog connection on first use
|
||||
static INIT: std::sync::Once = std::sync::Once::new();
|
||||
INIT.call_once(|| {
|
||||
unsafe {
|
||||
// Open syslog with ident "telemt", LOG_PID, LOG_DAEMON facility
|
||||
let ident = b"telemt\0".as_ptr() as *const libc::c_char;
|
||||
libc::openlog(ident, libc::LOG_PID | libc::LOG_NDELAY, libc::LOG_DAEMON);
|
||||
}
|
||||
});
|
||||
Self
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn syslog_priority_for_level(level: &tracing::Level) -> libc::c_int {
|
||||
match *level {
|
||||
tracing::Level::ERROR => libc::LOG_ERR,
|
||||
tracing::Level::WARN => libc::LOG_WARNING,
|
||||
tracing::Level::INFO => libc::LOG_INFO,
|
||||
tracing::Level::DEBUG => libc::LOG_DEBUG,
|
||||
tracing::Level::TRACE => libc::LOG_DEBUG,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl std::io::Write for SyslogWriter {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
// Convert to C string, stripping newlines
|
||||
let msg = String::from_utf8_lossy(buf);
|
||||
let msg = msg.trim_end();
|
||||
|
||||
if msg.is_empty() {
|
||||
return Ok(buf.len());
|
||||
}
|
||||
|
||||
// Write to syslog
|
||||
let c_msg = std::ffi::CString::new(msg.as_bytes())
|
||||
.unwrap_or_else(|_| std::ffi::CString::new("(invalid utf8)").unwrap());
|
||||
|
||||
unsafe {
|
||||
libc::syslog(
|
||||
self.priority,
|
||||
b"%s\0".as_ptr() as *const libc::c_char,
|
||||
c_msg.as_ptr(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
impl<'a> tracing_subscriber::fmt::MakeWriter<'a> for SyslogMakeWriter {
|
||||
type Writer = SyslogWriter;
|
||||
|
||||
fn make_writer(&'a self) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: libc::LOG_INFO,
|
||||
}
|
||||
}
|
||||
|
||||
fn make_writer_for(&'a self, meta: &tracing::Metadata<'_>) -> Self::Writer {
|
||||
SyslogWriter {
|
||||
priority: syslog_priority_for_level(meta.level()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse log destination from CLI arguments.
|
||||
pub fn parse_log_destination(args: &[String]) -> LogDestination {
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
#[cfg(unix)]
|
||||
"--syslog" => {
|
||||
return LogDestination::Syslog;
|
||||
}
|
||||
"--log-file" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file=").to_string(),
|
||||
rotate_daily: false,
|
||||
};
|
||||
}
|
||||
"--log-file-daily" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
return LogDestination::File {
|
||||
path: args[i].clone(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--log-file-daily=") => {
|
||||
return LogDestination::File {
|
||||
path: s.trim_start_matches("--log-file-daily=").to_string(),
|
||||
rotate_daily: true,
|
||||
};
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
LogDestination::Stderr
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_default() {
|
||||
let args: Vec<String> = vec![];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Stderr
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file() {
|
||||
let args = vec!["--log-file".to_string(), "/var/log/telemt.log".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt.log");
|
||||
assert!(!rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_log_destination_file_daily() {
|
||||
let args = vec!["--log-file-daily=/var/log/telemt".to_string()];
|
||||
match parse_log_destination(&args) {
|
||||
LogDestination::File { path, rotate_daily } => {
|
||||
assert_eq!(path, "/var/log/telemt");
|
||||
assert!(rotate_daily);
|
||||
}
|
||||
_ => panic!("Expected File destination"),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_parse_log_destination_syslog() {
|
||||
let args = vec!["--syslog".to_string()];
|
||||
assert!(matches!(
|
||||
parse_log_destination(&args),
|
||||
LogDestination::Syslog
|
||||
));
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[test]
|
||||
fn test_syslog_priority_for_level_mapping() {
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::ERROR),
|
||||
libc::LOG_ERR
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::WARN),
|
||||
libc::LOG_WARNING
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::INFO),
|
||||
libc::LOG_INFO
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::DEBUG),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
assert_eq!(
|
||||
syslog_priority_for_level(&tracing::Level::TRACE),
|
||||
libc::LOG_DEBUG
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -21,10 +21,29 @@ pub(crate) async fn configure_admission_gate(
|
|||
if config.general.use_middle_proxy {
|
||||
if let Some(pool) = me_pool.as_ref() {
|
||||
let initial_ready = pool.admission_ready_conditional_cast().await;
|
||||
admission_tx.send_replace(initial_ready);
|
||||
let _ = route_runtime.set_mode(RelayRouteMode::Middle);
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
let mut fast_fallback_enabled = fallback_enabled && config.general.me2dc_fast;
|
||||
let (initial_gate_open, initial_route_mode, initial_fallback_reason) = if initial_ready
|
||||
{
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
};
|
||||
admission_tx.send_replace(initial_gate_open);
|
||||
let _ = route_runtime.set_mode(initial_route_mode);
|
||||
if initial_ready {
|
||||
info!("Conditional-admission gate: open / ME pool READY");
|
||||
} else if let Some(reason) = initial_fallback_reason {
|
||||
warn!(
|
||||
fallback_reason = reason,
|
||||
"Conditional-admission gate opened in ME fast fallback mode"
|
||||
);
|
||||
} else {
|
||||
warn!("Conditional-admission gate: closed / ME pool is NOT ready)");
|
||||
}
|
||||
|
|
@ -34,10 +53,9 @@ pub(crate) async fn configure_admission_gate(
|
|||
let route_runtime_gate = route_runtime.clone();
|
||||
let mut config_rx_gate = config_rx.clone();
|
||||
let mut admission_poll_ms = config.general.me_admission_poll_ms.max(1);
|
||||
let mut fallback_enabled = config.general.me2dc_fallback;
|
||||
tokio::spawn(async move {
|
||||
let mut gate_open = initial_ready;
|
||||
let mut route_mode = RelayRouteMode::Middle;
|
||||
let mut gate_open = initial_gate_open;
|
||||
let mut route_mode = initial_route_mode;
|
||||
let mut ready_observed = initial_ready;
|
||||
let mut not_ready_since = if initial_ready {
|
||||
None
|
||||
|
|
@ -53,16 +71,23 @@ pub(crate) async fn configure_admission_gate(
|
|||
let cfg = config_rx_gate.borrow_and_update().clone();
|
||||
admission_poll_ms = cfg.general.me_admission_poll_ms.max(1);
|
||||
fallback_enabled = cfg.general.me2dc_fallback;
|
||||
fast_fallback_enabled = cfg.general.me2dc_fallback && cfg.general.me2dc_fast;
|
||||
continue;
|
||||
}
|
||||
_ = tokio::time::sleep(Duration::from_millis(admission_poll_ms)) => {}
|
||||
}
|
||||
let ready = pool_for_gate.admission_ready_conditional_cast().await;
|
||||
let now = Instant::now();
|
||||
let (next_gate_open, next_route_mode, next_fallback_active) = if ready {
|
||||
let (next_gate_open, next_route_mode, next_fallback_reason) = if ready {
|
||||
ready_observed = true;
|
||||
not_ready_since = None;
|
||||
(true, RelayRouteMode::Middle, false)
|
||||
(true, RelayRouteMode::Middle, None)
|
||||
} else if fast_fallback_enabled {
|
||||
(
|
||||
true,
|
||||
RelayRouteMode::Direct,
|
||||
Some("fast_not_ready_fallback"),
|
||||
)
|
||||
} else {
|
||||
let not_ready_started_at = *not_ready_since.get_or_insert(now);
|
||||
let not_ready_for = now.saturating_duration_since(not_ready_started_at);
|
||||
|
|
@ -72,11 +97,12 @@ pub(crate) async fn configure_admission_gate(
|
|||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
if fallback_enabled && not_ready_for > fallback_after {
|
||||
(true, RelayRouteMode::Direct, true)
|
||||
(true, RelayRouteMode::Direct, Some("strict_grace_fallback"))
|
||||
} else {
|
||||
(false, RelayRouteMode::Middle, false)
|
||||
(false, RelayRouteMode::Middle, None)
|
||||
}
|
||||
};
|
||||
let next_fallback_active = next_fallback_reason.is_some();
|
||||
|
||||
if next_route_mode != route_mode {
|
||||
route_mode = next_route_mode;
|
||||
|
|
@ -88,17 +114,28 @@ pub(crate) async fn configure_admission_gate(
|
|||
"Middle-End routing restored for new sessions"
|
||||
);
|
||||
} else {
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
let fallback_reason = next_fallback_reason.unwrap_or("unknown");
|
||||
if fallback_reason == "strict_grace_fallback" {
|
||||
let fallback_after = if ready_observed {
|
||||
RUNTIME_FALLBACK_AFTER
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
grace_secs = fallback_after.as_secs(),
|
||||
fallback_reason,
|
||||
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||
);
|
||||
} else {
|
||||
STARTUP_FALLBACK_AFTER
|
||||
};
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
grace_secs = fallback_after.as_secs(),
|
||||
"ME pool stayed not-ready beyond grace; routing new sessions via Direct-DC"
|
||||
);
|
||||
warn!(
|
||||
target_mode = route_mode.as_str(),
|
||||
cutover_generation = snapshot.generation,
|
||||
fallback_reason,
|
||||
"ME pool not-ready; routing new sessions via Direct-DC (fast mode)"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -108,7 +145,10 @@ pub(crate) async fn configure_admission_gate(
|
|||
admission_tx_gate.send_replace(gate_open);
|
||||
if gate_open {
|
||||
if next_fallback_active {
|
||||
warn!("Conditional-admission gate opened in ME fallback mode");
|
||||
warn!(
|
||||
fallback_reason = next_fallback_reason.unwrap_or("unknown"),
|
||||
"Conditional-admission gate opened in ME fallback mode"
|
||||
);
|
||||
} else {
|
||||
info!("Conditional-admission gate opened / ME pool READY");
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,31 +8,66 @@ use tracing::{debug, error, info, warn};
|
|||
|
||||
use crate::cli;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::logging::LogDestination;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
||||
ProxyConfigData, fetch_proxy_config_with_raw_via_upstream, load_proxy_config_cache,
|
||||
save_proxy_config_cache,
|
||||
};
|
||||
|
||||
pub(crate) fn resolve_runtime_config_path(
|
||||
config_path_cli: &str,
|
||||
startup_cwd: &std::path::Path,
|
||||
config_path_explicit: bool,
|
||||
) -> PathBuf {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
absolute.canonicalize().unwrap_or(absolute)
|
||||
if config_path_explicit {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
return absolute.canonicalize().unwrap_or(absolute);
|
||||
}
|
||||
|
||||
let etc_telemt = std::path::Path::new("/etc/telemt");
|
||||
let candidates = [
|
||||
startup_cwd.join("config.toml"),
|
||||
startup_cwd.join("telemt.toml"),
|
||||
etc_telemt.join("telemt.toml"),
|
||||
etc_telemt.join("config.toml"),
|
||||
];
|
||||
for candidate in candidates {
|
||||
if candidate.is_file() {
|
||||
return candidate.canonicalize().unwrap_or(candidate);
|
||||
}
|
||||
}
|
||||
|
||||
startup_cwd.join("config.toml")
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
||||
/// Parsed CLI arguments.
|
||||
pub(crate) struct CliArgs {
|
||||
pub config_path: String,
|
||||
pub config_path_explicit: bool,
|
||||
pub data_path: Option<PathBuf>,
|
||||
pub silent: bool,
|
||||
pub log_level: Option<String>,
|
||||
pub log_destination: LogDestination,
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> CliArgs {
|
||||
let mut config_path = "config.toml".to_string();
|
||||
let mut config_path_explicit = false;
|
||||
let mut data_path: Option<PathBuf> = None;
|
||||
let mut silent = false;
|
||||
let mut log_level: Option<String> = None;
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
|
||||
// Parse log destination
|
||||
let log_destination = crate::logging::parse_log_destination(&args);
|
||||
|
||||
// Check for --init first (handled before tokio)
|
||||
if let Some(init_opts) = cli::parse_init_args(&args) {
|
||||
if let Err(e) = cli::run_init(init_opts) {
|
||||
|
|
@ -59,6 +94,20 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
s.trim_start_matches("--data-path=").to_string(),
|
||||
));
|
||||
}
|
||||
"--working-dir" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --working-dir");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--working-dir=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--working-dir=").to_string(),
|
||||
));
|
||||
}
|
||||
"--silent" | "-s" => {
|
||||
silent = true;
|
||||
}
|
||||
|
|
@ -72,38 +121,35 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
log_level = Some(s.trim_start_matches("--log-level=").to_string());
|
||||
}
|
||||
"--help" | "-h" => {
|
||||
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!();
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(
|
||||
" --init Generate config, install systemd service, start"
|
||||
);
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(
|
||||
" --domain <DOMAIN> TLS domain for masking (default: www.google.com)"
|
||||
);
|
||||
eprintln!(
|
||||
" --secret <HEX> 32-char hex secret (auto-generated if omitted)"
|
||||
);
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
print_help();
|
||||
std::process::exit(0);
|
||||
}
|
||||
"--version" | "-V" => {
|
||||
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||
std::process::exit(0);
|
||||
}
|
||||
// Skip daemon-related flags (already parsed)
|
||||
"--daemon" | "-d" | "--foreground" | "-f" => {}
|
||||
s if s.starts_with("--pid-file") => {
|
||||
if !s.contains('=') {
|
||||
i += 1; // skip value
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-user") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--run-as-group") => {
|
||||
if !s.contains('=') {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
s if !s.starts_with('-') => {
|
||||
config_path = s.to_string();
|
||||
if !matches!(s, "run" | "start" | "stop" | "reload" | "status") {
|
||||
config_path = s.to_string();
|
||||
config_path_explicit = true;
|
||||
}
|
||||
}
|
||||
other => {
|
||||
eprintln!("Unknown option: {}", other);
|
||||
|
|
@ -112,7 +158,75 @@ pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
|||
i += 1;
|
||||
}
|
||||
|
||||
(config_path, data_path, silent, log_level)
|
||||
CliArgs {
|
||||
config_path,
|
||||
config_path_explicit,
|
||||
data_path,
|
||||
silent,
|
||||
log_level,
|
||||
log_destination,
|
||||
}
|
||||
}
|
||||
|
||||
fn print_help() {
|
||||
eprintln!("Usage: telemt [COMMAND] [OPTIONS] [config.toml]");
|
||||
eprintln!();
|
||||
eprintln!("Commands:");
|
||||
eprintln!(" run Run in foreground (default if no command given)");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!(" start Start as background daemon");
|
||||
eprintln!(" stop Stop a running daemon");
|
||||
eprintln!(" reload Reload configuration (send SIGHUP)");
|
||||
eprintln!(" status Check if daemon is running");
|
||||
}
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --working-dir <DIR> Alias for --data-path");
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
eprintln!(" --version, -V Show version");
|
||||
eprintln!();
|
||||
eprintln!("Logging options:");
|
||||
eprintln!(" --log-file <PATH> Log to file (default: stderr)");
|
||||
eprintln!(" --log-file-daily <PATH> Log to file with daily rotation");
|
||||
#[cfg(unix)]
|
||||
eprintln!(" --syslog Log to syslog (Unix only)");
|
||||
eprintln!();
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!("Daemon options (Unix only):");
|
||||
eprintln!(" --daemon, -d Fork to background (daemonize)");
|
||||
eprintln!(" --foreground, -f Explicit foreground mode (for systemd)");
|
||||
eprintln!(" --pid-file <PATH> PID file path (default: /var/run/telemt.pid)");
|
||||
eprintln!(" --run-as-user <USER> Drop privileges to this user after binding");
|
||||
eprintln!(" --run-as-group <GROUP> Drop privileges to this group after binding");
|
||||
eprintln!(" --working-dir <DIR> Working directory for daemon mode");
|
||||
eprintln!();
|
||||
}
|
||||
eprintln!("Setup (fire-and-forget):");
|
||||
eprintln!(" --init Generate config, install systemd service, start");
|
||||
eprintln!(" --port <PORT> Listen port (default: 443)");
|
||||
eprintln!(" --domain <DOMAIN> TLS domain for masking (default: www.google.com)");
|
||||
eprintln!(" --secret <HEX> 32-char hex secret (auto-generated if omitted)");
|
||||
eprintln!(" --user <NAME> Username (default: user)");
|
||||
eprintln!(" --config-dir <DIR> Config directory (default: /etc/telemt)");
|
||||
eprintln!(" --no-start Don't start the service after install");
|
||||
#[cfg(unix)]
|
||||
{
|
||||
eprintln!();
|
||||
eprintln!("Examples:");
|
||||
eprintln!(" telemt config.toml Run in foreground");
|
||||
eprintln!(" telemt start config.toml Start as daemon");
|
||||
eprintln!(" telemt start --pid-file /tmp/t.pid Start with custom PID file");
|
||||
eprintln!(" telemt stop Stop daemon");
|
||||
eprintln!(" telemt reload Reload configuration");
|
||||
eprintln!(" telemt status Check daemon status");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
@ -130,7 +244,7 @@ mod tests {
|
|||
let target = startup_cwd.join("config.toml");
|
||||
std::fs::write(&target, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd);
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, target.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&target);
|
||||
|
|
@ -146,11 +260,45 @@ mod tests {
|
|||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd);
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd, true);
|
||||
assert_eq!(resolved, startup_cwd.join("missing.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_uses_startup_candidates_when_not_explicit() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd =
|
||||
std::env::temp_dir().join(format!("telemt_cfg_startup_candidates_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let telemt = startup_cwd.join("telemt.toml");
|
||||
std::fs::write(&telemt, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, telemt.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&telemt);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_defaults_to_startup_config_when_none_found() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_startup_default_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd, false);
|
||||
assert_eq!(resolved, startup_cwd.join("config.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||
|
|
@ -288,9 +436,10 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
|
|||
cache_path: Option<&str>,
|
||||
me2dc_fallback: bool,
|
||||
label: &'static str,
|
||||
upstream: Option<std::sync::Arc<UpstreamManager>>,
|
||||
) -> Option<ProxyConfigData> {
|
||||
loop {
|
||||
match fetch_proxy_config_with_raw(url).await {
|
||||
match fetch_proxy_config_with_raw_via_upstream(url, upstream.clone()).await {
|
||||
Ok((cfg, raw)) => {
|
||||
if !cfg.map.is_empty() {
|
||||
if let Some(path) = cache_path
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ use crate::crypto::SecureRandom;
|
|||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::ClientHandler;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
|
|
@ -49,6 +50,7 @@ pub(crate) async fn bind_listeners(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) -> Result<BoundListeners, Box<dyn Error>> {
|
||||
startup_tracker
|
||||
|
|
@ -72,6 +74,7 @@ pub(crate) async fn bind_listeners(
|
|||
let options = ListenOptions {
|
||||
reuse_port: listener_conf.reuse_allow,
|
||||
ipv6_only: listener_conf.ip.is_ipv6(),
|
||||
backlog: config.server.listen_backlog,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
|
@ -223,6 +226,7 @@ pub(crate) async fn bind_listeners(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_unix = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
|
@ -258,6 +262,7 @@ pub(crate) async fn bind_listeners(
|
|||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted unix connection: permit wait timeout"
|
||||
|
|
@ -283,11 +288,12 @@ pub(crate) async fn bind_listeners(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = config.server.proxy_protocol;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||
if let Err(e) = crate::proxy::client::handle_client_stream_with_shared(
|
||||
stream,
|
||||
fake_peer,
|
||||
config,
|
||||
|
|
@ -301,6 +307,7 @@ pub(crate) async fn bind_listeners(
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
|
|
@ -350,6 +357,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
max_connections: Arc<Semaphore>,
|
||||
) {
|
||||
for (listener, listener_proxy_protocol) in listeners {
|
||||
|
|
@ -365,6 +373,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let max_connections_tcp = max_connections.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
|
|
@ -399,6 +408,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_accept_permit_timeout_total();
|
||||
debug!(
|
||||
peer = %peer_addr,
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
|
|
@ -420,13 +430,14 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
let tls_cache = tls_cache.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
let shared = shared.clone();
|
||||
let proxy_protocol_enabled = listener_proxy_protocol;
|
||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||
let real_peer_report_for_handler = real_peer_report.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let _permit = permit;
|
||||
if let Err(e) = ClientHandler::new(
|
||||
if let Err(e) = ClientHandler::new_with_shared(
|
||||
stream,
|
||||
peer_addr,
|
||||
config,
|
||||
|
|
@ -440,6 +451,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report_for_handler,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -63,9 +63,10 @@ pub(crate) async fn initialize_me_pool(
|
|||
let proxy_secret_path = config.general.proxy_secret_path.as_deref();
|
||||
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||
let proxy_secret = loop {
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret(
|
||||
match crate::transport::middle_proxy::fetch_proxy_secret_with_upstream(
|
||||
proxy_secret_path,
|
||||
config.general.proxy_secret_len_max,
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -129,6 +130,7 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.proxy_config_v4_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfig",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v4.is_some() {
|
||||
|
|
@ -160,6 +162,7 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.proxy_config_v6_cache_path.as_deref(),
|
||||
me2dc_fallback,
|
||||
"getProxyConfigV6",
|
||||
Some(upstream_manager.clone()),
|
||||
)
|
||||
.await;
|
||||
if cfg_v6.is_some() {
|
||||
|
|
@ -274,6 +277,8 @@ pub(crate) async fn initialize_me_pool(
|
|||
config.general.me_warn_rate_limit_ms,
|
||||
config.general.me_route_no_writer_mode,
|
||||
config.general.me_route_no_writer_wait_ms,
|
||||
config.general.me_route_hybrid_max_wait_ms,
|
||||
config.general.me_route_blocking_send_timeout_ms,
|
||||
config.general.me_route_inline_recovery_attempts,
|
||||
config.general.me_route_inline_recovery_wait_ms,
|
||||
);
|
||||
|
|
|
|||
|
|
@ -29,10 +29,12 @@ use tracing_subscriber::{EnvFilter, fmt, prelude::*, reload};
|
|||
|
||||
use crate::api;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::conntrack_control;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
|
|
@ -47,8 +49,55 @@ use crate::transport::UpstreamManager;
|
|||
use crate::transport::middle_proxy::MePool;
|
||||
use helpers::{parse_cli, resolve_runtime_config_path};
|
||||
|
||||
#[cfg(unix)]
|
||||
use crate::daemon::{DaemonOptions, PidFile, drop_privileges};
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// On Unix, daemon options should be handled before calling this function
|
||||
/// (daemonization must happen before tokio runtime starts).
|
||||
#[cfg(unix)]
|
||||
pub async fn run_with_daemon(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
///
|
||||
/// This is the main entry point for non-daemon mode or when called as a library.
|
||||
#[allow(dead_code)]
|
||||
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Parse CLI to get daemon options even in simple run() path
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
let daemon_opts = crate::cli::parse_daemon_args(&args);
|
||||
run_inner(daemon_opts).await
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
run_inner().await
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn run_inner(
|
||||
daemon_opts: DaemonOptions,
|
||||
) -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Acquire PID file if daemonizing or if explicitly requested
|
||||
// Keep it alive until shutdown (underscore prefix = intentionally kept for RAII cleanup)
|
||||
let _pid_file = if daemon_opts.daemonize || daemon_opts.pid_file.is_some() {
|
||||
let mut pf = PidFile::new(daemon_opts.pid_file_path());
|
||||
if let Err(e) = pf.acquire() {
|
||||
eprintln!("[telemt] {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
Some(pf)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let process_started_at = Instant::now();
|
||||
let process_started_at_epoch_secs = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
|
|
@ -61,7 +110,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
Some("load and validate config".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_path_cli, data_path, cli_silent, cli_log_level) = parse_cli();
|
||||
let cli_args = parse_cli();
|
||||
let config_path_cli = cli_args.config_path;
|
||||
let config_path_explicit = cli_args.config_path_explicit;
|
||||
let data_path = cli_args.data_path;
|
||||
let cli_silent = cli_args.silent;
|
||||
let cli_log_level = cli_args.log_level;
|
||||
let log_destination = cli_args.log_destination;
|
||||
let startup_cwd = match std::env::current_dir() {
|
||||
Ok(cwd) => cwd,
|
||||
Err(e) => {
|
||||
|
|
@ -69,7 +124,8 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let config_path = resolve_runtime_config_path(&config_path_cli, &startup_cwd);
|
||||
let mut config_path =
|
||||
resolve_runtime_config_path(&config_path_cli, &startup_cwd, config_path_explicit);
|
||||
|
||||
let mut config = match ProxyConfig::load(&config_path) {
|
||||
Ok(c) => c,
|
||||
|
|
@ -79,11 +135,99 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
} else {
|
||||
let default = ProxyConfig::default();
|
||||
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
|
||||
let serialized =
|
||||
match toml::to_string_pretty(&default).or_else(|_| toml::to_string(&default)) {
|
||||
Ok(value) => Some(value),
|
||||
Err(serialize_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to serialize default config: {}",
|
||||
serialize_error
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
if config_path_explicit {
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
if let Err(write_error) = std::fs::write(&config_path, serialized) {
|
||||
eprintln!(
|
||||
"[telemt] Error: failed to create explicit config at {}: {}",
|
||||
config_path.display(),
|
||||
write_error
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
} else {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
let system_dir = std::path::Path::new("/etc/telemt");
|
||||
let system_config_path = system_dir.join("telemt.toml");
|
||||
let startup_config_path = startup_cwd.join("config.toml");
|
||||
let mut persisted = false;
|
||||
|
||||
if let Some(serialized) = serialized.as_ref() {
|
||||
match std::fs::create_dir_all(system_dir) {
|
||||
Ok(()) => match std::fs::write(&system_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = system_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
system_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
},
|
||||
Err(create_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to create {}: {}",
|
||||
system_dir.display(),
|
||||
create_error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
match std::fs::write(&startup_config_path, serialized) {
|
||||
Ok(()) => {
|
||||
config_path = startup_config_path;
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
persisted = true;
|
||||
}
|
||||
Err(write_error) => {
|
||||
eprintln!(
|
||||
"[telemt] Warning: failed to write default config at {}: {}",
|
||||
startup_config_path.display(),
|
||||
write_error
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !persisted {
|
||||
eprintln!(
|
||||
"[telemt] Warning: running with in-memory default config without writing to disk"
|
||||
);
|
||||
}
|
||||
}
|
||||
default
|
||||
}
|
||||
}
|
||||
|
|
@ -115,15 +259,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||
|
|
@ -161,17 +303,43 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
)
|
||||
.await;
|
||||
|
||||
// Configure color output based on config
|
||||
let fmt_layer = if config.general.disable_colors {
|
||||
fmt::Layer::default().with_ansi(false)
|
||||
} else {
|
||||
fmt::Layer::default().with_ansi(true)
|
||||
};
|
||||
// Initialize logging based on destination
|
||||
let _logging_guard: Option<crate::logging::LoggingGuard>;
|
||||
match log_destination {
|
||||
crate::logging::LogDestination::Stderr => {
|
||||
// Default: log to stderr (works with systemd journald)
|
||||
let fmt_layer = if config.general.disable_colors {
|
||||
fmt::Layer::default().with_ansi(false)
|
||||
} else {
|
||||
fmt::Layer::default().with_ansi(true)
|
||||
};
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
_logging_guard = None;
|
||||
}
|
||||
#[cfg(unix)]
|
||||
crate::logging::LogDestination::Syslog => {
|
||||
// Syslog: for OpenRC/FreeBSD
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
crate::logging::LogDestination::File { .. } => {
|
||||
// File logging with optional rotation
|
||||
let logging_opts = crate::logging::LoggingOptions {
|
||||
destination: log_destination,
|
||||
disable_colors: true,
|
||||
};
|
||||
let (_, guard) = crate::logging::init_logging(&logging_opts, "info");
|
||||
_logging_guard = Some(guard);
|
||||
}
|
||||
}
|
||||
|
||||
tracing_subscriber::registry()
|
||||
.with(filter_layer)
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
|
|
@ -225,6 +393,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
config.general.upstream_connect_retry_attempts,
|
||||
config.general.upstream_connect_retry_backoff_ms,
|
||||
config.general.upstream_connect_budget_ms,
|
||||
config.general.tg_connect,
|
||||
config.general.upstream_unhealthy_fail_threshold,
|
||||
config.general.upstream_connect_failfast_hard_errors,
|
||||
stats.clone(),
|
||||
|
|
@ -554,6 +723,12 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
)
|
||||
.await;
|
||||
let _admission_tx_hold = admission_tx;
|
||||
let shared_state = ProxySharedState::new();
|
||||
conntrack_control::spawn_conntrack_controller(
|
||||
config_rx.clone(),
|
||||
stats.clone(),
|
||||
shared_state.clone(),
|
||||
);
|
||||
|
||||
let bound = listeners::bind_listeners(
|
||||
&config,
|
||||
|
|
@ -574,6 +749,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state.clone(),
|
||||
max_connections.clone(),
|
||||
)
|
||||
.await?;
|
||||
|
|
@ -585,6 +761,14 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
std::process::exit(1);
|
||||
}
|
||||
|
||||
// Drop privileges after binding sockets (which may require root for port < 1024)
|
||||
if daemon_opts.user.is_some() || daemon_opts.group.is_some() {
|
||||
if let Err(e) = drop_privileges(daemon_opts.user.as_deref(), daemon_opts.group.as_deref()) {
|
||||
error!(error = %e, "Failed to drop privileges");
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
runtime_tasks::apply_runtime_log_filter(
|
||||
has_rust_log,
|
||||
&effective_log_level,
|
||||
|
|
@ -605,6 +789,9 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
|
||||
runtime_tasks::mark_runtime_ready(&startup_tracker).await;
|
||||
|
||||
// Spawn signal handlers for SIGUSR1/SIGUSR2 (non-shutdown signals)
|
||||
shutdown::spawn_signal_handlers(stats.clone(), process_started_at);
|
||||
|
||||
listeners::spawn_tcp_accept_loops(
|
||||
listeners,
|
||||
config_rx.clone(),
|
||||
|
|
@ -619,10 +806,11 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||
tls_cache.clone(),
|
||||
ip_tracker.clone(),
|
||||
beobachten.clone(),
|
||||
shared_state,
|
||||
max_connections.clone(),
|
||||
);
|
||||
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool).await;
|
||||
shutdown::wait_for_shutdown(process_started_at, me_pool, stats).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -323,10 +323,12 @@ pub(crate) async fn spawn_metrics_if_configured(
|
|||
let config_rx_metrics = config_rx.clone();
|
||||
let ip_tracker_metrics = ip_tracker.clone();
|
||||
let whitelist = config.server.metrics_whitelist.clone();
|
||||
let listen_backlog = config.server.listen_backlog;
|
||||
tokio::spawn(async move {
|
||||
metrics::serve(
|
||||
port,
|
||||
listen,
|
||||
listen_backlog,
|
||||
stats,
|
||||
beobachten,
|
||||
ip_tracker_metrics,
|
||||
|
|
|
|||
|
|
@ -1,45 +1,206 @@
|
|||
//! Shutdown and signal handling for telemt.
|
||||
//!
|
||||
//! Handles graceful shutdown on various signals:
|
||||
//! - SIGINT (Ctrl+C) / SIGTERM: Graceful shutdown
|
||||
//! - SIGQUIT: Graceful shutdown with stats dump
|
||||
//! - SIGUSR1: Reserved for log rotation (logs acknowledgment)
|
||||
//! - SIGUSR2: Dump runtime status to log
|
||||
//!
|
||||
//! SIGHUP is handled separately in config/hot_reload.rs for config reload.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
use tokio::signal;
|
||||
use tracing::{error, info, warn};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tracing::{info, warn};
|
||||
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::{format_uptime, unit_label};
|
||||
|
||||
pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Option<Arc<MePool>>) {
|
||||
match signal::ctrl_c().await {
|
||||
Ok(()) => {
|
||||
let shutdown_started_at = Instant::now();
|
||||
info!("Shutting down...");
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
pool.shutdown_send_close_conn_all(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
info!(
|
||||
close_conn_sent = total,
|
||||
"ME shutdown: RPC_CLOSE_CONN broadcast completed"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("ME shutdown: RPC_CLOSE_CONN broadcast timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||
info!(
|
||||
"Shutdown completed successfully in {} {}.",
|
||||
shutdown_secs,
|
||||
unit_label(shutdown_secs, "second", "seconds")
|
||||
);
|
||||
/// Signal that triggered shutdown.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ShutdownSignal {
|
||||
/// SIGINT (Ctrl+C)
|
||||
Interrupt,
|
||||
/// SIGTERM
|
||||
Terminate,
|
||||
/// SIGQUIT (with stats dump)
|
||||
Quit,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ShutdownSignal {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ShutdownSignal::Interrupt => write!(f, "SIGINT"),
|
||||
ShutdownSignal::Terminate => write!(f, "SIGTERM"),
|
||||
ShutdownSignal::Quit => write!(f, "SIGQUIT"),
|
||||
}
|
||||
Err(e) => error!("Signal error: {}", e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Waits for a shutdown signal and performs graceful shutdown.
|
||||
pub(crate) async fn wait_for_shutdown(
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: Arc<Stats>,
|
||||
) {
|
||||
let signal = wait_for_shutdown_signal().await;
|
||||
perform_shutdown(signal, process_started_at, me_pool, &stats).await;
|
||||
}
|
||||
|
||||
/// Waits for any shutdown signal (SIGINT, SIGTERM, SIGQUIT).
|
||||
#[cfg(unix)]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
let mut sigint = signal(SignalKind::interrupt()).expect("Failed to register SIGINT handler");
|
||||
let mut sigterm = signal(SignalKind::terminate()).expect("Failed to register SIGTERM handler");
|
||||
let mut sigquit = signal(SignalKind::quit()).expect("Failed to register SIGQUIT handler");
|
||||
|
||||
tokio::select! {
|
||||
_ = sigint.recv() => ShutdownSignal::Interrupt,
|
||||
_ = sigterm.recv() => ShutdownSignal::Terminate,
|
||||
_ = sigquit.recv() => ShutdownSignal::Quit,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn wait_for_shutdown_signal() -> ShutdownSignal {
|
||||
signal::ctrl_c().await.expect("Failed to listen for Ctrl+C");
|
||||
ShutdownSignal::Interrupt
|
||||
}
|
||||
|
||||
/// Performs graceful shutdown sequence.
|
||||
async fn perform_shutdown(
|
||||
signal: ShutdownSignal,
|
||||
process_started_at: Instant,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
stats: &Stats,
|
||||
) {
|
||||
let shutdown_started_at = Instant::now();
|
||||
info!(signal = %signal, "Received shutdown signal");
|
||||
|
||||
// Dump stats if SIGQUIT
|
||||
if signal == ShutdownSignal::Quit {
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
||||
info!("Shutting down...");
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Graceful ME pool shutdown
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
info!(
|
||||
close_conn_sent = total,
|
||||
"ME shutdown: RPC_CLOSE_CONN broadcast completed"
|
||||
);
|
||||
}
|
||||
Err(_) => {
|
||||
warn!("ME shutdown: RPC_CLOSE_CONN broadcast timed out");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let shutdown_secs = shutdown_started_at.elapsed().as_secs();
|
||||
info!(
|
||||
"Shutdown completed successfully in {} {}.",
|
||||
shutdown_secs,
|
||||
unit_label(shutdown_secs, "second", "seconds")
|
||||
);
|
||||
}
|
||||
|
||||
/// Dumps runtime statistics to the log.
|
||||
fn dump_stats(stats: &Stats, process_started_at: Instant) {
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
|
||||
info!("=== Runtime Statistics Dump ===");
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
|
||||
// Connection stats
|
||||
info!(
|
||||
"Connections: total={}, current={} (direct={}, me={}), bad={}",
|
||||
stats.get_connects_all(),
|
||||
stats.get_current_connections_total(),
|
||||
stats.get_current_connections_direct(),
|
||||
stats.get_current_connections_me(),
|
||||
stats.get_connects_bad(),
|
||||
);
|
||||
|
||||
// ME pool stats
|
||||
info!(
|
||||
"ME keepalive: sent={}, pong={}, failed={}, timeout={}",
|
||||
stats.get_me_keepalive_sent(),
|
||||
stats.get_me_keepalive_pong(),
|
||||
stats.get_me_keepalive_failed(),
|
||||
stats.get_me_keepalive_timeout(),
|
||||
);
|
||||
|
||||
// Relay stats
|
||||
info!(
|
||||
"Relay idle: soft_mark={}, hard_close={}, pressure_evict={}",
|
||||
stats.get_relay_idle_soft_mark_total(),
|
||||
stats.get_relay_idle_hard_close_total(),
|
||||
stats.get_relay_pressure_evict_total(),
|
||||
);
|
||||
|
||||
info!("=== End Statistics Dump ===");
|
||||
}
|
||||
|
||||
/// Spawns a background task to handle operational signals (SIGUSR1, SIGUSR2).
|
||||
///
|
||||
/// These signals don't trigger shutdown but perform specific actions:
|
||||
/// - SIGUSR1: Log rotation acknowledgment (for external log rotation tools)
|
||||
/// - SIGUSR2: Dump runtime status to log
|
||||
#[cfg(unix)]
|
||||
pub(crate) fn spawn_signal_handlers(stats: Arc<Stats>, process_started_at: Instant) {
|
||||
tokio::spawn(async move {
|
||||
let mut sigusr1 =
|
||||
signal(SignalKind::user_defined1()).expect("Failed to register SIGUSR1 handler");
|
||||
let mut sigusr2 =
|
||||
signal(SignalKind::user_defined2()).expect("Failed to register SIGUSR2 handler");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = sigusr1.recv() => {
|
||||
handle_sigusr1();
|
||||
}
|
||||
_ = sigusr2.recv() => {
|
||||
handle_sigusr2(&stats, process_started_at);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// No-op on non-Unix platforms.
|
||||
#[cfg(not(unix))]
|
||||
pub(crate) fn spawn_signal_handlers(_stats: Arc<Stats>, _process_started_at: Instant) {
|
||||
// No SIGUSR1/SIGUSR2 on non-Unix
|
||||
}
|
||||
|
||||
/// Handles SIGUSR1 - log rotation signal.
|
||||
///
|
||||
/// This signal is typically sent by logrotate or similar tools after
|
||||
/// rotating log files. Since tracing-subscriber doesn't natively support
|
||||
/// reopening files, we just acknowledge the signal. If file logging is
|
||||
/// added in the future, this would reopen log file handles.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr1() {
|
||||
info!("SIGUSR1 received - log rotation acknowledged");
|
||||
// Future: If using file-based logging, reopen file handles here
|
||||
}
|
||||
|
||||
/// Handles SIGUSR2 - dump runtime status.
|
||||
#[cfg(unix)]
|
||||
fn handle_sigusr2(stats: &Stats, process_started_at: Instant) {
|
||||
info!("SIGUSR2 received - dumping runtime status");
|
||||
dump_stats(stats, process_started_at);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@ use tracing::warn;
|
|||
use crate::config::ProxyConfig;
|
||||
use crate::startup::{COMPONENT_TLS_FRONT_BOOTSTRAP, StartupTracker};
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::tls_front::fetcher::TlsFetchStrategy;
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn bootstrap_tls_front(
|
||||
|
|
@ -40,7 +41,17 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
|
||||
.then(|| config.censorship.tls_fetch_scope.clone());
|
||||
let fetch_timeout = Duration::from_secs(5);
|
||||
let tls_fetch = config.censorship.tls_fetch.clone();
|
||||
let fetch_strategy = TlsFetchStrategy {
|
||||
profiles: tls_fetch.profiles,
|
||||
strict_route: tls_fetch.strict_route,
|
||||
attempt_timeout: Duration::from_millis(tls_fetch.attempt_timeout_ms.max(1)),
|
||||
total_budget: Duration::from_millis(tls_fetch.total_budget_ms.max(1)),
|
||||
grease_enabled: tls_fetch.grease_enabled,
|
||||
deterministic: tls_fetch.deterministic,
|
||||
profile_cache_ttl: Duration::from_secs(tls_fetch.profile_cache_ttl_secs),
|
||||
};
|
||||
let fetch_timeout = fetch_strategy.total_budget;
|
||||
|
||||
let cache_initial = cache.clone();
|
||||
let domains_initial = tls_domains.to_vec();
|
||||
|
|
@ -48,6 +59,7 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_initial = mask_unix_sock.clone();
|
||||
let scope_initial = tls_fetch_scope.clone();
|
||||
let upstream_initial = upstream_manager.clone();
|
||||
let strategy_initial = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
for domain in domains_initial {
|
||||
|
|
@ -56,12 +68,13 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_domain = unix_sock_initial.clone();
|
||||
let scope_domain = scope_initial.clone();
|
||||
let upstream_domain = upstream_initial.clone();
|
||||
let strategy_domain = strategy_initial.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
|
|
@ -107,6 +120,7 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_refresh = mask_unix_sock.clone();
|
||||
let scope_refresh = tls_fetch_scope.clone();
|
||||
let upstream_refresh = upstream_manager.clone();
|
||||
let strategy_refresh = fetch_strategy.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600);
|
||||
|
|
@ -120,12 +134,13 @@ pub(crate) async fn bootstrap_tls_front(
|
|||
let unix_sock_domain = unix_sock_refresh.clone();
|
||||
let scope_domain = scope_refresh.clone();
|
||||
let upstream_domain = upstream_refresh.clone();
|
||||
let strategy_domain = strategy_refresh.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
match crate::tls_front::fetcher::fetch_real_tls_with_strategy(
|
||||
&host_domain,
|
||||
port,
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
&strategy_domain,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
|
|
|
|||
59
src/main.rs
59
src/main.rs
|
|
@ -3,23 +3,28 @@
|
|||
mod api;
|
||||
mod cli;
|
||||
mod config;
|
||||
mod conntrack_control;
|
||||
mod crypto;
|
||||
#[cfg(unix)]
|
||||
mod daemon;
|
||||
mod error;
|
||||
mod ip_tracker;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
|
||||
mod ip_tracker_hotpath_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_encapsulation_adversarial_tests.rs"]
|
||||
mod ip_tracker_encapsulation_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
|
||||
mod ip_tracker_hotpath_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_regression_tests.rs"]
|
||||
mod ip_tracker_regression_tests;
|
||||
mod logging;
|
||||
mod maestro;
|
||||
mod metrics;
|
||||
mod network;
|
||||
mod protocol;
|
||||
mod proxy;
|
||||
mod service;
|
||||
mod startup;
|
||||
mod stats;
|
||||
mod stream;
|
||||
|
|
@ -27,7 +32,49 @@ mod tls_front;
|
|||
mod transport;
|
||||
mod util;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
maestro::run().await
|
||||
fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
// Install rustls crypto provider early
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
|
||||
let args: Vec<String> = std::env::args().skip(1).collect();
|
||||
let cmd = cli::parse_command(&args);
|
||||
|
||||
// Handle subcommands that don't need the server (stop, reload, status, init)
|
||||
if let Some(exit_code) = cli::execute_subcommand(&cmd) {
|
||||
std::process::exit(exit_code);
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let daemon_opts = cmd.daemon_opts;
|
||||
|
||||
// Daemonize BEFORE runtime
|
||||
if daemon_opts.should_daemonize() {
|
||||
match daemon::daemonize(daemon_opts.working_dir.as_deref()) {
|
||||
Ok(daemon::DaemonizeResult::Parent) => {
|
||||
std::process::exit(0);
|
||||
}
|
||||
Ok(daemon::DaemonizeResult::Child) => {
|
||||
// continue
|
||||
}
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Daemonization failed: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()?
|
||||
.block_on(maestro::run_with_daemon(daemon_opts))
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::runtime::Builder::new_multi_thread()
|
||||
.enable_all()
|
||||
.build()?
|
||||
.block_on(maestro::run())
|
||||
}
|
||||
}
|
||||
|
|
|
|||
783
src/metrics.rs
783
src/metrics.rs
|
|
@ -22,6 +22,7 @@ use crate::transport::{ListenOptions, create_listener};
|
|||
pub async fn serve(
|
||||
port: u16,
|
||||
listen: Option<String>,
|
||||
listen_backlog: u32,
|
||||
stats: Arc<Stats>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
|
|
@ -40,7 +41,7 @@ pub async fn serve(
|
|||
}
|
||||
};
|
||||
let is_ipv6 = addr.is_ipv6();
|
||||
match bind_metrics_listener(addr, is_ipv6) {
|
||||
match bind_metrics_listener(addr, is_ipv6, listen_backlog) {
|
||||
Ok(listener) => {
|
||||
info!("Metrics endpoint: http://{}/metrics and /beobachten", addr);
|
||||
serve_listener(
|
||||
|
|
@ -60,7 +61,7 @@ pub async fn serve(
|
|||
let mut listener_v6 = None;
|
||||
|
||||
let addr_v4 = SocketAddr::from(([0, 0, 0, 0], port));
|
||||
match bind_metrics_listener(addr_v4, false) {
|
||||
match bind_metrics_listener(addr_v4, false, listen_backlog) {
|
||||
Ok(listener) => {
|
||||
info!(
|
||||
"Metrics endpoint: http://{}/metrics and /beobachten",
|
||||
|
|
@ -74,7 +75,7 @@ pub async fn serve(
|
|||
}
|
||||
|
||||
let addr_v6 = SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], port));
|
||||
match bind_metrics_listener(addr_v6, true) {
|
||||
match bind_metrics_listener(addr_v6, true, listen_backlog) {
|
||||
Ok(listener) => {
|
||||
info!(
|
||||
"Metrics endpoint: http://[::]:{}/metrics and /beobachten",
|
||||
|
|
@ -122,10 +123,15 @@ pub async fn serve(
|
|||
}
|
||||
}
|
||||
|
||||
fn bind_metrics_listener(addr: SocketAddr, ipv6_only: bool) -> std::io::Result<TcpListener> {
|
||||
fn bind_metrics_listener(
|
||||
addr: SocketAddr,
|
||||
ipv6_only: bool,
|
||||
listen_backlog: u32,
|
||||
) -> std::io::Result<TcpListener> {
|
||||
let options = ListenOptions {
|
||||
reuse_port: false,
|
||||
ipv6_only,
|
||||
backlog: listen_backlog,
|
||||
..Default::default()
|
||||
};
|
||||
let socket = create_listener(addr, &options)?;
|
||||
|
|
@ -287,6 +293,27 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_buffer_pool_buffers_total Snapshot of pooled and allocated buffers"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_buffer_pool_buffers_total gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_buffer_pool_buffers_total{{kind=\"pooled\"}} {}",
|
||||
stats.get_buffer_pool_pooled_gauge()
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_buffer_pool_buffers_total{{kind=\"allocated\"}} {}",
|
||||
stats.get_buffer_pool_allocated_gauge()
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_buffer_pool_buffers_total{{kind=\"in_use\"}} {}",
|
||||
stats.get_buffer_pool_in_use_gauge()
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_connections_total Total accepted connections"
|
||||
|
|
@ -332,6 +359,134 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_accept_permit_timeout_total Accepted connections dropped due to permit wait timeout"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_accept_permit_timeout_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_accept_permit_timeout_total {}",
|
||||
if core_enabled {
|
||||
stats.get_accept_permit_timeout_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_conntrack_control_state Runtime conntrack control state flags"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_conntrack_control_state gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_control_state{{flag=\"enabled\"}} {}",
|
||||
if stats.get_conntrack_control_enabled() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_control_state{{flag=\"available\"}} {}",
|
||||
if stats.get_conntrack_control_available() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_control_state{{flag=\"pressure_active\"}} {}",
|
||||
if stats.get_conntrack_pressure_active() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_control_state{{flag=\"rule_apply_ok\"}} {}",
|
||||
if stats.get_conntrack_rule_apply_ok() {
|
||||
1
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_conntrack_event_queue_depth Pending close events in conntrack control queue"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_conntrack_event_queue_depth gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_event_queue_depth {}",
|
||||
stats.get_conntrack_event_queue_depth()
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_conntrack_delete_total Conntrack delete attempts by outcome"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_conntrack_delete_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_delete_total{{result=\"attempt\"}} {}",
|
||||
if core_enabled {
|
||||
stats.get_conntrack_delete_attempt_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_delete_total{{result=\"success\"}} {}",
|
||||
if core_enabled {
|
||||
stats.get_conntrack_delete_success_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_delete_total{{result=\"not_found\"}} {}",
|
||||
if core_enabled {
|
||||
stats.get_conntrack_delete_not_found_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_delete_total{{result=\"error\"}} {}",
|
||||
if core_enabled {
|
||||
stats.get_conntrack_delete_error_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_conntrack_close_event_drop_total Dropped conntrack close events due to queue pressure or unavailable sender"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_conntrack_close_event_drop_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_conntrack_close_event_drop_total {}",
|
||||
if core_enabled {
|
||||
stats.get_conntrack_close_event_drop_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_upstream_connect_attempt_total Upstream connect attempts across all requests"
|
||||
|
|
@ -935,6 +1090,492 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_c2me_enqueue_events_total ME client->ME enqueue outcomes"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_c2me_enqueue_events_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_c2me_enqueue_events_total{{event=\"full\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_c2me_send_full_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_c2me_enqueue_events_total{{event=\"high_water\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_c2me_send_high_water_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_c2me_enqueue_events_total{{event=\"timeout\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_c2me_send_timeout_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batches_total Total DC->Client flush batches"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_batches_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batches_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_batches_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_frames_total Total DC->Client frames flushed in batches"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_batch_frames_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_batch_frames_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_bytes_total Total DC->Client bytes flushed in batches"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_batch_bytes_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_batch_bytes_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_flush_reason_total DC->Client flush reasons"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_flush_reason_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"queue_drain\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_queue_drain_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"batch_frames\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_batch_frames_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"batch_bytes\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_batch_bytes_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"max_delay\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_max_delay_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"ack_immediate\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_ack_immediate_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_reason_total{{reason=\"close\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_flush_reason_close_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_data_frames_total DC->Client data frames"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_data_frames_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_data_frames_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_data_frames_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_ack_frames_total DC->Client quick-ack frames"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_ack_frames_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_ack_frames_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_ack_frames_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_payload_bytes_total DC->Client payload bytes before transport framing"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_payload_bytes_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_payload_bytes_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_payload_bytes_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_write_mode_total DC->Client writer mode selection"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_write_mode_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_write_mode_total{{mode=\"coalesced\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_write_mode_coalesced_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_write_mode_total{{mode=\"split\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_write_mode_split_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_quota_reject_total DC->Client quota rejects"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_quota_reject_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_quota_reject_total{{stage=\"pre_write\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_quota_reject_pre_write_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_quota_reject_total{{stage=\"post_write\"}} {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_quota_reject_post_write_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_frame_buf_shrink_total DC->Client reusable frame buffer shrink events"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_frame_buf_shrink_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_frame_buf_shrink_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_frame_buf_shrink_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_frame_buf_shrink_bytes_total DC->Client reusable frame buffer bytes released"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_d2c_frame_buf_shrink_bytes_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_frame_buf_shrink_bytes_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_d2c_frame_buf_shrink_bytes_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_frames_bucket_total DC->Client batch frame count buckets"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_d2c_batch_frames_bucket_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"1\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_1()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"2_4\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_2_4()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"5_8\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_5_8()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"9_16\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_9_16()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"17_32\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_17_32()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_frames_bucket_total{{bucket=\"gt_32\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_frames_bucket_gt_32()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_bytes_bucket_total DC->Client batch byte size buckets"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_d2c_batch_bytes_bucket_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"0_1k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_0_1k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"1k_4k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_1k_4k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"4k_16k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_4k_16k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"16k_64k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_16k_64k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"64k_128k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_64k_128k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"gt_128k\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_bytes_bucket_gt_128k()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_flush_duration_us_bucket_total DC->Client flush duration buckets"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_d2c_flush_duration_us_bucket_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"0_50\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_0_50()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"51_200\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_51_200()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"201_1000\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_201_1000()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"1001_5000\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_1001_5000()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"5001_20000\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_5001_20000()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"gt_20000\"}} {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_flush_duration_us_bucket_gt_20000()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_timeout_armed_total DC->Client max-delay timer armed events"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_d2c_batch_timeout_armed_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_timeout_armed_total {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_timeout_armed_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_d2c_batch_timeout_fired_total DC->Client max-delay timer fired events"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_d2c_batch_timeout_fired_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_d2c_batch_timeout_fired_total {}",
|
||||
if me_allows_debug {
|
||||
stats.get_me_d2c_batch_timeout_fired_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_writer_pick_total ME writer-pick outcomes by mode and result"
|
||||
|
|
@ -1105,6 +1746,40 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_endpoint_quarantine_unexpected_total ME endpoint quarantines caused by unexpected writer removals"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_endpoint_quarantine_unexpected_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_endpoint_quarantine_unexpected_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_endpoint_quarantine_unexpected_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_endpoint_quarantine_draining_suppressed_total Draining writer removals that skipped endpoint quarantine"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_endpoint_quarantine_draining_suppressed_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_endpoint_quarantine_draining_suppressed_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_endpoint_quarantine_draining_suppressed_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
|
|
@ -1865,6 +2540,20 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_hybrid_timeout_total ME hybrid route timeouts after bounded retry window"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_hybrid_timeout_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_hybrid_timeout_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_me_hybrid_timeout_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_async_recovery_trigger_total Async ME recovery trigger attempts from route path"
|
||||
|
|
@ -1983,6 +2672,48 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
|||
if user_enabled { 0 } else { 1 }
|
||||
);
|
||||
|
||||
let ip_memory = ip_tracker.memory_stats().await;
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_ip_tracker_users Number of users tracked by IP limiter state"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_ip_tracker_users gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_ip_tracker_users{{scope=\"active\"}} {}",
|
||||
ip_memory.active_users
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_ip_tracker_users{{scope=\"recent\"}} {}",
|
||||
ip_memory.recent_users
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_ip_tracker_entries Number of IP entries tracked by limiter state"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_ip_tracker_entries gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_ip_tracker_entries{{scope=\"active\"}} {}",
|
||||
ip_memory.active_entries
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_ip_tracker_entries{{scope=\"recent\"}} {}",
|
||||
ip_memory.recent_entries
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_ip_tracker_cleanup_queue_len Deferred disconnect cleanup queue length"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_ip_tracker_cleanup_queue_len gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_ip_tracker_cleanup_queue_len {}",
|
||||
ip_memory.cleanup_queue_len
|
||||
);
|
||||
|
||||
if user_enabled {
|
||||
for entry in stats.iter_user_stats() {
|
||||
let user = entry.key();
|
||||
|
|
@ -2145,6 +2876,19 @@ mod tests {
|
|||
stats.increment_relay_idle_hard_close_total();
|
||||
stats.increment_relay_pressure_evict_total();
|
||||
stats.increment_relay_protocol_desync_close_total();
|
||||
stats.increment_me_d2c_batches_total();
|
||||
stats.add_me_d2c_batch_frames_total(3);
|
||||
stats.add_me_d2c_batch_bytes_total(2048);
|
||||
stats.increment_me_d2c_flush_reason(crate::stats::MeD2cFlushReason::AckImmediate);
|
||||
stats.increment_me_d2c_data_frames_total();
|
||||
stats.increment_me_d2c_ack_frames_total();
|
||||
stats.add_me_d2c_payload_bytes_total(1800);
|
||||
stats.increment_me_d2c_write_mode(crate::stats::MeD2cWriteMode::Coalesced);
|
||||
stats.increment_me_d2c_quota_reject_total(crate::stats::MeD2cQuotaRejectStage::PostWrite);
|
||||
stats.observe_me_d2c_frame_buf_shrink(4096);
|
||||
stats.increment_me_endpoint_quarantine_total();
|
||||
stats.increment_me_endpoint_quarantine_unexpected_total();
|
||||
stats.increment_me_endpoint_quarantine_draining_suppressed_total();
|
||||
stats.increment_user_connects("alice");
|
||||
stats.increment_user_curr_connects("alice");
|
||||
stats.add_user_octets_from("alice", 1024);
|
||||
|
|
@ -2184,6 +2928,20 @@ mod tests {
|
|||
assert!(output.contains("telemt_relay_idle_hard_close_total 1"));
|
||||
assert!(output.contains("telemt_relay_pressure_evict_total 1"));
|
||||
assert!(output.contains("telemt_relay_protocol_desync_close_total 1"));
|
||||
assert!(output.contains("telemt_me_d2c_batches_total 1"));
|
||||
assert!(output.contains("telemt_me_d2c_batch_frames_total 3"));
|
||||
assert!(output.contains("telemt_me_d2c_batch_bytes_total 2048"));
|
||||
assert!(output.contains("telemt_me_d2c_flush_reason_total{reason=\"ack_immediate\"} 1"));
|
||||
assert!(output.contains("telemt_me_d2c_data_frames_total 1"));
|
||||
assert!(output.contains("telemt_me_d2c_ack_frames_total 1"));
|
||||
assert!(output.contains("telemt_me_d2c_payload_bytes_total 1800"));
|
||||
assert!(output.contains("telemt_me_d2c_write_mode_total{mode=\"coalesced\"} 1"));
|
||||
assert!(output.contains("telemt_me_d2c_quota_reject_total{stage=\"post_write\"} 1"));
|
||||
assert!(output.contains("telemt_me_d2c_frame_buf_shrink_total 1"));
|
||||
assert!(output.contains("telemt_me_d2c_frame_buf_shrink_bytes_total 4096"));
|
||||
assert!(output.contains("telemt_me_endpoint_quarantine_total 1"));
|
||||
assert!(output.contains("telemt_me_endpoint_quarantine_unexpected_total 1"));
|
||||
assert!(output.contains("telemt_me_endpoint_quarantine_draining_suppressed_total 1"));
|
||||
assert!(output.contains("telemt_user_connections_total{user=\"alice\"} 1"));
|
||||
assert!(output.contains("telemt_user_connections_current{user=\"alice\"} 1"));
|
||||
assert!(output.contains("telemt_user_octets_from_client{user=\"alice\"} 1024"));
|
||||
|
|
@ -2194,6 +2952,9 @@ mod tests {
|
|||
assert!(output.contains("telemt_user_unique_ips_recent_window{user=\"alice\"} 1"));
|
||||
assert!(output.contains("telemt_user_unique_ips_limit{user=\"alice\"} 4"));
|
||||
assert!(output.contains("telemt_user_unique_ips_utilization{user=\"alice\"} 0.250000"));
|
||||
assert!(output.contains("telemt_ip_tracker_users{scope=\"active\"} 1"));
|
||||
assert!(output.contains("telemt_ip_tracker_entries{scope=\"active\"} 1"));
|
||||
assert!(output.contains("telemt_ip_tracker_cleanup_queue_len 0"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
@ -2245,6 +3006,17 @@ mod tests {
|
|||
assert!(output.contains("# TYPE telemt_relay_idle_hard_close_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_pressure_evict_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_protocol_desync_close_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_d2c_batches_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_d2c_flush_reason_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_d2c_write_mode_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_d2c_batch_frames_bucket_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_d2c_flush_duration_us_bucket_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_endpoint_quarantine_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_endpoint_quarantine_unexpected_total counter"));
|
||||
assert!(
|
||||
output
|
||||
.contains("# TYPE telemt_me_endpoint_quarantine_draining_suppressed_total counter")
|
||||
);
|
||||
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
|
||||
assert!(
|
||||
output
|
||||
|
|
@ -2254,6 +3026,9 @@ mod tests {
|
|||
assert!(output.contains("# TYPE telemt_user_unique_ips_recent_window gauge"));
|
||||
assert!(output.contains("# TYPE telemt_user_unique_ips_limit gauge"));
|
||||
assert!(output.contains("# TYPE telemt_user_unique_ips_utilization gauge"));
|
||||
assert!(output.contains("# TYPE telemt_ip_tracker_users gauge"));
|
||||
assert!(output.contains("# TYPE telemt_ip_tracker_entries gauge"));
|
||||
assert!(output.contains("# TYPE telemt_ip_tracker_cleanup_queue_len gauge"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
|
|
|
|||
|
|
@ -24,6 +24,8 @@ const DIRECT_S2C_CAP_BYTES: usize = 512 * 1024;
|
|||
const ME_FRAMES_CAP: usize = 96;
|
||||
const ME_BYTES_CAP: usize = 384 * 1024;
|
||||
const ME_DELAY_MIN_US: u64 = 150;
|
||||
const MAX_USER_PROFILES_ENTRIES: usize = 50_000;
|
||||
const MAX_USER_KEY_BYTES: usize = 512;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum AdaptiveTier {
|
||||
|
|
@ -234,32 +236,50 @@ fn profiles() -> &'static DashMap<String, UserAdaptiveProfile> {
|
|||
}
|
||||
|
||||
pub fn seed_tier_for_user(user: &str) -> AdaptiveTier {
|
||||
if user.len() > MAX_USER_KEY_BYTES {
|
||||
return AdaptiveTier::Base;
|
||||
}
|
||||
let now = Instant::now();
|
||||
if let Some(entry) = profiles().get(user) {
|
||||
let value = entry.value();
|
||||
if now.duration_since(value.seen_at) <= PROFILE_TTL {
|
||||
let value = *entry.value();
|
||||
drop(entry);
|
||||
if now.saturating_duration_since(value.seen_at) <= PROFILE_TTL {
|
||||
return value.tier;
|
||||
}
|
||||
profiles().remove_if(user, |_, v| {
|
||||
now.saturating_duration_since(v.seen_at) > PROFILE_TTL
|
||||
});
|
||||
}
|
||||
AdaptiveTier::Base
|
||||
}
|
||||
|
||||
pub fn record_user_tier(user: &str, tier: AdaptiveTier) {
|
||||
let now = Instant::now();
|
||||
if let Some(mut entry) = profiles().get_mut(user) {
|
||||
let existing = *entry;
|
||||
let effective = if now.duration_since(existing.seen_at) > PROFILE_TTL {
|
||||
tier
|
||||
} else {
|
||||
max(existing.tier, tier)
|
||||
};
|
||||
*entry = UserAdaptiveProfile {
|
||||
tier: effective,
|
||||
seen_at: now,
|
||||
};
|
||||
if user.len() > MAX_USER_KEY_BYTES {
|
||||
return;
|
||||
}
|
||||
profiles().insert(user.to_string(), UserAdaptiveProfile { tier, seen_at: now });
|
||||
let now = Instant::now();
|
||||
let mut was_vacant = false;
|
||||
match profiles().entry(user.to_string()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(mut entry) => {
|
||||
let existing = *entry.get();
|
||||
let effective = if now.saturating_duration_since(existing.seen_at) > PROFILE_TTL {
|
||||
tier
|
||||
} else {
|
||||
max(existing.tier, tier)
|
||||
};
|
||||
entry.insert(UserAdaptiveProfile {
|
||||
tier: effective,
|
||||
seen_at: now,
|
||||
});
|
||||
}
|
||||
dashmap::mapref::entry::Entry::Vacant(slot) => {
|
||||
slot.insert(UserAdaptiveProfile { tier, seen_at: now });
|
||||
was_vacant = true;
|
||||
}
|
||||
}
|
||||
if was_vacant && profiles().len() > MAX_USER_PROFILES_ENTRIES {
|
||||
profiles().retain(|_, v| now.saturating_duration_since(v.seen_at) <= PROFILE_TTL);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn direct_copy_buffers_for_tier(
|
||||
|
|
@ -310,6 +330,14 @@ fn scale(base: usize, numerator: usize, denominator: usize, cap: usize) -> usize
|
|||
scaled.min(cap).max(1)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/adaptive_buffers_security_tests.rs"]
|
||||
mod adaptive_buffers_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/adaptive_buffers_record_race_security_tests.rs"]
|
||||
mod adaptive_buffers_record_race_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
|
|
|||
|
|
@ -80,11 +80,16 @@ use crate::transport::middle_proxy::MePool;
|
|||
use crate::transport::socket::normalize_ip;
|
||||
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
||||
|
||||
use crate::proxy::direct_relay::handle_via_direct;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
||||
use crate::proxy::direct_relay::handle_via_direct_with_shared;
|
||||
use crate::proxy::handshake::{
|
||||
HandshakeSuccess, handle_mtproto_handshake_with_shared, handle_tls_handshake_with_shared,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use crate::proxy::handshake::{handle_mtproto_handshake, handle_tls_handshake};
|
||||
use crate::proxy::masking::handle_bad_client;
|
||||
use crate::proxy::middle_relay::handle_via_middle_proxy;
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::proxy::shared_state::ProxySharedState;
|
||||
|
||||
fn beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||
const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60;
|
||||
|
|
@ -186,6 +191,90 @@ fn handshake_timeout_with_mask_grace(config: &ProxyConfig) -> Duration {
|
|||
}
|
||||
}
|
||||
|
||||
fn effective_client_first_byte_idle_secs(config: &ProxyConfig, shared: &ProxySharedState) -> u64 {
|
||||
let idle_secs = config.timeouts.client_first_byte_idle_secs;
|
||||
if idle_secs == 0 {
|
||||
return 0;
|
||||
}
|
||||
if shared.conntrack_pressure_active() {
|
||||
idle_secs.min(
|
||||
config
|
||||
.server
|
||||
.conntrack_control
|
||||
.profile
|
||||
.client_first_byte_idle_cap_secs(),
|
||||
)
|
||||
} else {
|
||||
idle_secs
|
||||
}
|
||||
}
|
||||
|
||||
const MASK_CLASSIFIER_PREFETCH_WINDOW: usize = 16;
|
||||
#[cfg(test)]
|
||||
const MASK_CLASSIFIER_PREFETCH_TIMEOUT: Duration = Duration::from_millis(5);
|
||||
|
||||
fn mask_classifier_prefetch_timeout(config: &ProxyConfig) -> Duration {
|
||||
Duration::from_millis(config.censorship.mask_classifier_prefetch_timeout_ms)
|
||||
}
|
||||
|
||||
fn should_prefetch_mask_classifier_window(initial_data: &[u8]) -> bool {
|
||||
if initial_data.len() >= MASK_CLASSIFIER_PREFETCH_WINDOW {
|
||||
return false;
|
||||
}
|
||||
|
||||
if initial_data.is_empty() {
|
||||
// Empty initial_data means there is no client probe prefix to refine.
|
||||
// Prefetching in this case can consume fallback relay payload bytes and
|
||||
// accidentally route them through shaping heuristics.
|
||||
return false;
|
||||
}
|
||||
|
||||
if initial_data[0] == 0x16 || initial_data.starts_with(b"SSH-") {
|
||||
return false;
|
||||
}
|
||||
|
||||
initial_data
|
||||
.iter()
|
||||
.all(|b| b.is_ascii_alphabetic() || *b == b' ')
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn extend_masking_initial_window<R>(reader: &mut R, initial_data: &mut Vec<u8>)
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
extend_masking_initial_window_with_timeout(
|
||||
reader,
|
||||
initial_data,
|
||||
MASK_CLASSIFIER_PREFETCH_TIMEOUT,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn extend_masking_initial_window_with_timeout<R>(
|
||||
reader: &mut R,
|
||||
initial_data: &mut Vec<u8>,
|
||||
prefetch_timeout: Duration,
|
||||
) where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if !should_prefetch_mask_classifier_window(initial_data) {
|
||||
return;
|
||||
}
|
||||
|
||||
let need = MASK_CLASSIFIER_PREFETCH_WINDOW.saturating_sub(initial_data.len());
|
||||
if need == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut extra = [0u8; MASK_CLASSIFIER_PREFETCH_WINDOW];
|
||||
if let Ok(Ok(n)) = timeout(prefetch_timeout, reader.read(&mut extra[..need])).await
|
||||
&& n > 0
|
||||
{
|
||||
initial_data.extend_from_slice(&extra[..n]);
|
||||
}
|
||||
}
|
||||
|
||||
fn masking_outcome<R, W>(
|
||||
reader: R,
|
||||
writer: W,
|
||||
|
|
@ -200,6 +289,15 @@ where
|
|||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
HandshakeOutcome::NeedsMasking(Box::pin(async move {
|
||||
let mut reader = reader;
|
||||
let mut initial_data = initial_data;
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
mask_classifier_prefetch_timeout(&config),
|
||||
)
|
||||
.await;
|
||||
|
||||
handle_bad_client(
|
||||
reader,
|
||||
writer,
|
||||
|
|
@ -242,13 +340,20 @@ fn record_handshake_failure_class(
|
|||
record_beobachten_class(beobachten, config, peer_ip, class);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn increment_bad_on_unknown_tls_sni(stats: &Stats, error: &ProxyError) {
|
||||
if matches!(error, ProxyError::UnknownTlsSni) {
|
||||
stats.increment_connects_bad();
|
||||
}
|
||||
}
|
||||
|
||||
fn is_trusted_proxy_source(peer_ip: IpAddr, trusted: &[IpNetwork]) -> bool {
|
||||
if trusted.is_empty() {
|
||||
static EMPTY_PROXY_TRUST_WARNED: OnceLock<AtomicBool> = OnceLock::new();
|
||||
let warned = EMPTY_PROXY_TRUST_WARNED.get_or_init(|| AtomicBool::new(false));
|
||||
if !warned.swap(true, Ordering::Relaxed) {
|
||||
warn!(
|
||||
"PROXY protocol enabled but server.proxy_protocol_trusted_cidrs is empty; rejecting all PROXY headers by default"
|
||||
"PROXY protocol enabled but server.proxy_protocol_trusted_cidrs is empty; rejecting all PROXY headers"
|
||||
);
|
||||
}
|
||||
return false;
|
||||
|
|
@ -260,7 +365,48 @@ fn synthetic_local_addr(port: u16) -> SocketAddr {
|
|||
SocketAddr::from(([0, 0, 0, 0], port))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub async fn handle_client_stream<S>(
|
||||
stream: S,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
handle_client_stream_with_shared(
|
||||
stream,
|
||||
peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
ProxySharedState::new(),
|
||||
proxy_protocol_enabled,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub async fn handle_client_stream_with_shared<S>(
|
||||
mut stream: S,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
|
|
@ -274,6 +420,7 @@ pub async fn handle_client_stream<S>(
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
) -> Result<()>
|
||||
where
|
||||
|
|
@ -334,16 +481,69 @@ where
|
|||
|
||||
debug!(peer = %real_peer, "New connection (generic stream)");
|
||||
|
||||
let first_byte_idle_secs = effective_client_first_byte_idle_secs(&config, shared.as_ref());
|
||||
let first_byte = if first_byte_idle_secs == 0 {
|
||||
None
|
||||
} else {
|
||||
let idle_timeout = Duration::from_secs(first_byte_idle_secs);
|
||||
let mut first_byte = [0u8; 1];
|
||||
match timeout(idle_timeout, stream.read(&mut first_byte)).await {
|
||||
Ok(Ok(0)) => {
|
||||
debug!(peer = %real_peer, "Connection closed before first client byte");
|
||||
return Ok(());
|
||||
}
|
||||
Ok(Ok(_)) => Some(first_byte[0]),
|
||||
Ok(Err(e))
|
||||
if matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::UnexpectedEof
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
) =>
|
||||
{
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
error = %e,
|
||||
"Connection closed before first client byte"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
error = %e,
|
||||
"Failed while waiting for first client byte"
|
||||
);
|
||||
return Err(ProxyError::Io(e));
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %real_peer,
|
||||
idle_secs = first_byte_idle_secs,
|
||||
"Closing idle pooled connection before first client byte"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&config);
|
||||
let stats_for_timeout = stats.clone();
|
||||
let config_for_timeout = config.clone();
|
||||
let beobachten_for_timeout = beobachten.clone();
|
||||
let peer_for_timeout = real_peer.ip();
|
||||
|
||||
// Phase 1: handshake (with timeout)
|
||||
// Phase 2: active handshake (with timeout after the first client byte)
|
||||
let outcome = match timeout(handshake_timeout, async {
|
||||
let mut first_bytes = [0u8; 5];
|
||||
stream.read_exact(&mut first_bytes).await?;
|
||||
if let Some(first_byte) = first_byte {
|
||||
first_bytes[0] = first_byte;
|
||||
stream.read_exact(&mut first_bytes[1..]).await?;
|
||||
} else {
|
||||
stream.read_exact(&mut first_bytes).await?;
|
||||
}
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
debug!(peer = %real_peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
|
@ -416,9 +616,10 @@ where
|
|||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -433,7 +634,10 @@ where
|
|||
beobachten.clone(),
|
||||
));
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
HandshakeResult::Error(e) => {
|
||||
increment_bad_on_unknown_tls_sni(stats.as_ref(), &e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||
|
|
@ -441,9 +645,10 @@ where
|
|||
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into()
|
||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||
&config, &replay_checker, true, Some(tls_user.as_str()),
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -477,11 +682,12 @@ where
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||
crypto_reader, crypto_writer, success,
|
||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||
route_runtime.clone(),
|
||||
local_addr, real_peer, ip_tracker.clone(),
|
||||
shared.clone(),
|
||||
),
|
||||
)))
|
||||
} else {
|
||||
|
|
@ -507,9 +713,10 @@ where
|
|||
|
||||
let (read_half, write_half) = tokio::io::split(stream);
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&handshake, read_half, write_half, real_peer,
|
||||
&config, &replay_checker, false, None,
|
||||
shared.as_ref(),
|
||||
).await {
|
||||
HandshakeResult::Success(result) => result,
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
|
|
@ -528,7 +735,7 @@ where
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
RunningClientHandler::handle_authenticated_static(
|
||||
RunningClientHandler::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -542,6 +749,7 @@ where
|
|||
local_addr,
|
||||
real_peer,
|
||||
ip_tracker.clone(),
|
||||
shared.clone(),
|
||||
)
|
||||
)))
|
||||
}
|
||||
|
|
@ -594,10 +802,12 @@ pub struct RunningClientHandler {
|
|||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
}
|
||||
|
||||
impl ClientHandler {
|
||||
#[cfg(test)]
|
||||
pub fn new(
|
||||
stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
|
|
@ -614,6 +824,45 @@ impl ClientHandler {
|
|||
beobachten: Arc<BeobachtenStore>,
|
||||
proxy_protocol_enabled: bool,
|
||||
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||
) -> RunningClientHandler {
|
||||
Self::new_with_shared(
|
||||
stream,
|
||||
peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
ProxySharedState::new(),
|
||||
proxy_protocol_enabled,
|
||||
real_peer_report,
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new_with_shared(
|
||||
stream: TcpStream,
|
||||
peer: SocketAddr,
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
proxy_protocol_enabled: bool,
|
||||
real_peer_report: Arc<std::sync::Mutex<Option<SocketAddr>>>,
|
||||
) -> RunningClientHandler {
|
||||
let normalized_peer = normalize_ip(peer);
|
||||
RunningClientHandler {
|
||||
|
|
@ -632,6 +881,7 @@ impl ClientHandler {
|
|||
tls_cache,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
shared,
|
||||
proxy_protocol_enabled,
|
||||
}
|
||||
}
|
||||
|
|
@ -651,36 +901,9 @@ impl RunningClientHandler {
|
|||
debug!(peer = %peer, error = %e, "Failed to configure client socket");
|
||||
}
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&self.config);
|
||||
let stats = self.stats.clone();
|
||||
let config_for_timeout = self.config.clone();
|
||||
let beobachten_for_timeout = self.beobachten.clone();
|
||||
let peer_for_timeout = peer.ip();
|
||||
|
||||
// Phase 1: handshake (with timeout)
|
||||
let outcome = match timeout(handshake_timeout, self.do_handshake()).await {
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer, error = %e, "Handshake failed");
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_handshake_timeouts();
|
||||
debug!(peer = %peer, "Handshake timeout");
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
let outcome = match self.do_handshake().await? {
|
||||
Some(outcome) => outcome,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Phase 2: relay (WITHOUT handshake timeout — relay has its own activity timeouts)
|
||||
|
|
@ -689,7 +912,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
}
|
||||
|
||||
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||
async fn do_handshake(mut self) -> Result<Option<HandshakeOutcome>> {
|
||||
let mut local_addr = self.stream.local_addr().map_err(ProxyError::Io)?;
|
||||
|
||||
if self.proxy_protocol_enabled {
|
||||
|
|
@ -764,19 +987,109 @@ impl RunningClientHandler {
|
|||
}
|
||||
}
|
||||
|
||||
let mut first_bytes = [0u8; 5];
|
||||
self.stream.read_exact(&mut first_bytes).await?;
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
let peer = self.peer;
|
||||
|
||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
||||
if is_tls {
|
||||
self.handle_tls_client(first_bytes, local_addr).await
|
||||
let first_byte_idle_secs =
|
||||
effective_client_first_byte_idle_secs(&self.config, self.shared.as_ref());
|
||||
let first_byte = if first_byte_idle_secs == 0 {
|
||||
None
|
||||
} else {
|
||||
self.handle_direct_client(first_bytes, local_addr).await
|
||||
}
|
||||
let idle_timeout = Duration::from_secs(first_byte_idle_secs);
|
||||
let mut first_byte = [0u8; 1];
|
||||
match timeout(idle_timeout, self.stream.read(&mut first_byte)).await {
|
||||
Ok(Ok(0)) => {
|
||||
debug!(peer = %self.peer, "Connection closed before first client byte");
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Ok(_)) => Some(first_byte[0]),
|
||||
Ok(Err(e))
|
||||
if matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::UnexpectedEof
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
) =>
|
||||
{
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
error = %e,
|
||||
"Connection closed before first client byte"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
error = %e,
|
||||
"Failed while waiting for first client byte"
|
||||
);
|
||||
return Err(ProxyError::Io(e));
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %self.peer,
|
||||
idle_secs = first_byte_idle_secs,
|
||||
"Closing idle pooled connection before first client byte"
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let handshake_timeout = handshake_timeout_with_mask_grace(&self.config);
|
||||
let stats = self.stats.clone();
|
||||
let config_for_timeout = self.config.clone();
|
||||
let beobachten_for_timeout = self.beobachten.clone();
|
||||
let peer_for_timeout = self.peer.ip();
|
||||
let peer_for_log = self.peer;
|
||||
|
||||
let outcome = match timeout(handshake_timeout, async {
|
||||
let mut first_bytes = [0u8; 5];
|
||||
if let Some(first_byte) = first_byte {
|
||||
first_bytes[0] = first_byte;
|
||||
self.stream.read_exact(&mut first_bytes[1..]).await?;
|
||||
} else {
|
||||
self.stream.read_exact(&mut first_bytes).await?;
|
||||
}
|
||||
|
||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||
let peer = self.peer;
|
||||
|
||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
||||
|
||||
if is_tls {
|
||||
self.handle_tls_client(first_bytes, local_addr).await
|
||||
} else {
|
||||
self.handle_direct_client(first_bytes, local_addr).await
|
||||
}
|
||||
})
|
||||
.await
|
||||
{
|
||||
Ok(Ok(outcome)) => outcome,
|
||||
Ok(Err(e)) => {
|
||||
debug!(peer = %peer_for_log, error = %e, "Handshake failed");
|
||||
record_handshake_failure_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
&e,
|
||||
);
|
||||
return Err(e);
|
||||
}
|
||||
Err(_) => {
|
||||
stats.increment_handshake_timeouts();
|
||||
debug!(peer = %peer_for_log, "Handshake timeout");
|
||||
record_beobachten_class(
|
||||
&beobachten_for_timeout,
|
||||
&config_for_timeout,
|
||||
peer_for_timeout,
|
||||
"other",
|
||||
);
|
||||
return Err(ProxyError::TgHandshakeTimeout);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Some(outcome))
|
||||
}
|
||||
|
||||
async fn handle_tls_client(
|
||||
|
|
@ -859,7 +1172,7 @@ impl RunningClientHandler {
|
|||
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake_with_shared(
|
||||
&handshake,
|
||||
read_half,
|
||||
write_half,
|
||||
|
|
@ -868,6 +1181,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
&self.rng,
|
||||
self.tls_cache.clone(),
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -884,7 +1198,10 @@ impl RunningClientHandler {
|
|||
self.beobachten.clone(),
|
||||
));
|
||||
}
|
||||
HandshakeResult::Error(e) => return Err(e),
|
||||
HandshakeResult::Error(e) => {
|
||||
increment_bad_on_unknown_tls_sni(stats.as_ref(), &e);
|
||||
return Err(e);
|
||||
}
|
||||
};
|
||||
|
||||
debug!(peer = %peer, "Reading MTProto handshake through TLS");
|
||||
|
|
@ -893,7 +1210,7 @@ impl RunningClientHandler {
|
|||
.try_into()
|
||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&mtproto_handshake,
|
||||
tls_reader,
|
||||
tls_writer,
|
||||
|
|
@ -902,6 +1219,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
true,
|
||||
Some(tls_user.as_str()),
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -938,7 +1256,7 @@ impl RunningClientHandler {
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -952,6 +1270,7 @@ impl RunningClientHandler {
|
|||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
self.shared,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
|
@ -990,7 +1309,7 @@ impl RunningClientHandler {
|
|||
|
||||
let (read_half, write_half) = self.stream.into_split();
|
||||
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake_with_shared(
|
||||
&handshake,
|
||||
read_half,
|
||||
write_half,
|
||||
|
|
@ -999,6 +1318,7 @@ impl RunningClientHandler {
|
|||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
self.shared.as_ref(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
|
|
@ -1019,7 +1339,7 @@ impl RunningClientHandler {
|
|||
};
|
||||
|
||||
Ok(HandshakeOutcome::NeedsRelay(Box::pin(
|
||||
Self::handle_authenticated_static(
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
crypto_reader,
|
||||
crypto_writer,
|
||||
success,
|
||||
|
|
@ -1033,6 +1353,7 @@ impl RunningClientHandler {
|
|||
local_addr,
|
||||
peer,
|
||||
self.ip_tracker,
|
||||
self.shared,
|
||||
),
|
||||
)))
|
||||
}
|
||||
|
|
@ -1041,6 +1362,7 @@ impl RunningClientHandler {
|
|||
/// Two modes:
|
||||
/// - Direct: TCP relay to TG DC (existing behavior)
|
||||
/// - Middle Proxy: RPC multiplex through ME pool (new — supports CDN DCs)
|
||||
#[cfg(test)]
|
||||
async fn handle_authenticated_static<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
|
|
@ -1056,6 +1378,45 @@ impl RunningClientHandler {
|
|||
peer_addr: SocketAddr,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
Self::handle_authenticated_static_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config,
|
||||
buffer_pool,
|
||||
rng,
|
||||
me_pool,
|
||||
route_runtime,
|
||||
local_addr,
|
||||
peer_addr,
|
||||
ip_tracker,
|
||||
ProxySharedState::new(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
async fn handle_authenticated_static_with_shared<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
me_pool: Option<Arc<MePool>>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
local_addr: SocketAddr,
|
||||
peer_addr: SocketAddr,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
|
|
@ -1097,11 +1458,12 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
warn!("use_middle_proxy=true but MePool not initialized, falling back to direct");
|
||||
handle_via_direct(
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
|
|
@ -1113,12 +1475,14 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
local_addr,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
} else {
|
||||
// Direct mode (original behavior)
|
||||
handle_via_direct(
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
|
|
@ -1130,6 +1494,8 @@ impl RunningClientHandler {
|
|||
route_runtime.subscribe(),
|
||||
route_snapshot,
|
||||
session_id,
|
||||
local_addr,
|
||||
shared.clone(),
|
||||
)
|
||||
.await
|
||||
};
|
||||
|
|
@ -1153,7 +1519,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||
&& stats.get_user_total_octets(user) >= *quota
|
||||
&& stats.get_user_quota_used(user) >= *quota
|
||||
{
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1164,7 +1530,11 @@ impl RunningClientHandler {
|
|||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(user)
|
||||
.map(|v| *v as u64);
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((config.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(config.access.user_max_tcp_conns_global_each))
|
||||
.map(|v| v as u64);
|
||||
if !stats.try_acquire_user_curr_connects(user, limit) {
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1212,7 +1582,7 @@ impl RunningClientHandler {
|
|||
}
|
||||
|
||||
if let Some(quota) = config.access.user_data_quota.get(user)
|
||||
&& stats.get_user_total_octets(user) >= *quota
|
||||
&& stats.get_user_quota_used(user) >= *quota
|
||||
{
|
||||
return Err(ProxyError::DataQuotaExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1223,7 +1593,11 @@ impl RunningClientHandler {
|
|||
.access
|
||||
.user_max_tcp_conns
|
||||
.get(user)
|
||||
.map(|v| *v as u64);
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((config.access.user_max_tcp_conns_global_each > 0)
|
||||
.then_some(config.access.user_max_tcp_conns_global_each))
|
||||
.map(|v| v as u64);
|
||||
if !stats.try_acquire_user_curr_connects(user, limit) {
|
||||
return Err(ProxyError::ConnectionLimitExceeded {
|
||||
user: user.to_string(),
|
||||
|
|
@ -1321,6 +1695,38 @@ mod masking_shape_classifier_fuzz_redteam_expected_fail_tests;
|
|||
#[path = "tests/client_masking_probe_evasion_blackhat_tests.rs"]
|
||||
mod masking_probe_evasion_blackhat_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_fragmented_classifier_security_tests.rs"]
|
||||
mod masking_fragmented_classifier_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_replay_timing_security_tests.rs"]
|
||||
mod masking_replay_timing_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_http2_fragmented_preface_security_tests.rs"]
|
||||
mod masking_http2_fragmented_preface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_invariant_security_tests.rs"]
|
||||
mod masking_prefetch_invariant_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_timing_matrix_security_tests.rs"]
|
||||
mod masking_prefetch_timing_matrix_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_config_runtime_security_tests.rs"]
|
||||
mod masking_prefetch_config_runtime_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_config_pipeline_integration_security_tests.rs"]
|
||||
mod masking_prefetch_config_pipeline_integration_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_masking_prefetch_strict_boundary_security_tests.rs"]
|
||||
mod masking_prefetch_strict_boundary_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_beobachten_ttl_bounds_security_tests.rs"]
|
||||
mod beobachten_ttl_bounds_security_tests;
|
||||
|
|
@ -1328,3 +1734,15 @@ mod beobachten_ttl_bounds_security_tests;
|
|||
#[cfg(test)]
|
||||
#[path = "tests/client_tls_record_wrap_hardening_security_tests.rs"]
|
||||
mod tls_record_wrap_hardening_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_clever_advanced_tests.rs"]
|
||||
mod client_clever_advanced_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_more_advanced_tests.rs"]
|
||||
mod client_more_advanced_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/client_deep_invariants_tests.rs"]
|
||||
mod client_deep_invariants_tests;
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ use std::net::SocketAddr;
|
|||
use std::path::{Component, Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, split};
|
||||
use tokio::sync::watch;
|
||||
|
|
@ -16,11 +17,13 @@ use crate::crypto::SecureRandom;
|
|||
use crate::error::{ProxyError, Result};
|
||||
use crate::protocol::constants::*;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
||||
use crate::proxy::relay::relay_bidirectional;
|
||||
use crate::proxy::route_mode::{
|
||||
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
|
||||
cutover_stagger_delay,
|
||||
};
|
||||
use crate::proxy::shared_state::{
|
||||
ConntrackCloseEvent, ConntrackClosePublishResult, ConntrackCloseReason, ProxySharedState,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
|
@ -225,7 +228,43 @@ fn unknown_dc_test_lock() -> &'static Mutex<()> {
|
|||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn handle_via_direct<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
stats: Arc<Stats>,
|
||||
config: Arc<ProxyConfig>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_rx: watch::Receiver<RouteCutoverState>,
|
||||
route_snapshot: RouteCutoverState,
|
||||
session_id: u64,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
handle_via_direct_with_shared(
|
||||
client_reader,
|
||||
client_writer,
|
||||
success,
|
||||
upstream_manager,
|
||||
stats,
|
||||
config.clone(),
|
||||
buffer_pool,
|
||||
rng,
|
||||
route_rx,
|
||||
route_snapshot,
|
||||
session_id,
|
||||
SocketAddr::from(([0, 0, 0, 0], config.server.port)),
|
||||
ProxySharedState::new(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_via_direct_with_shared<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
client_writer: CryptoWriter<W>,
|
||||
success: HandshakeSuccess,
|
||||
|
|
@ -237,6 +276,8 @@ pub(crate) async fn handle_via_direct<R, W>(
|
|||
mut route_rx: watch::Receiver<RouteCutoverState>,
|
||||
route_snapshot: RouteCutoverState,
|
||||
session_id: u64,
|
||||
local_addr: SocketAddr,
|
||||
shared: Arc<ProxySharedState>,
|
||||
) -> Result<()>
|
||||
where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
|
|
@ -276,7 +317,19 @@ where
|
|||
stats.increment_user_connects(user);
|
||||
let _direct_connection_lease = stats.acquire_direct_connection_lease();
|
||||
|
||||
let relay_result = relay_bidirectional(
|
||||
let buffer_pool_trim = Arc::clone(&buffer_pool);
|
||||
let relay_activity_timeout = if shared.conntrack_pressure_active() {
|
||||
Duration::from_secs(
|
||||
config
|
||||
.server
|
||||
.conntrack_control
|
||||
.profile
|
||||
.direct_activity_timeout_secs(),
|
||||
)
|
||||
} else {
|
||||
Duration::from_secs(1800)
|
||||
};
|
||||
let relay_result = crate::proxy::relay::relay_bidirectional_with_activity_timeout(
|
||||
client_reader,
|
||||
client_writer,
|
||||
tg_reader,
|
||||
|
|
@ -287,6 +340,7 @@ where
|
|||
Arc::clone(&stats),
|
||||
config.access.user_data_quota.get(user).copied(),
|
||||
buffer_pool,
|
||||
relay_activity_timeout,
|
||||
);
|
||||
tokio::pin!(relay_result);
|
||||
let relay_result = loop {
|
||||
|
|
@ -321,9 +375,59 @@ where
|
|||
Err(e) => debug!(user = %user, error = %e, "Direct relay ended with error"),
|
||||
}
|
||||
|
||||
buffer_pool_trim.trim_to(buffer_pool_trim.max_buffers().min(64));
|
||||
let pool_snapshot = buffer_pool_trim.stats();
|
||||
stats.set_buffer_pool_gauges(
|
||||
pool_snapshot.pooled,
|
||||
pool_snapshot.allocated,
|
||||
pool_snapshot.allocated.saturating_sub(pool_snapshot.pooled),
|
||||
);
|
||||
|
||||
let close_reason = classify_conntrack_close_reason(&relay_result);
|
||||
let publish_result = shared.publish_conntrack_close_event(ConntrackCloseEvent {
|
||||
src: success.peer,
|
||||
dst: local_addr,
|
||||
reason: close_reason,
|
||||
});
|
||||
if !matches!(
|
||||
publish_result,
|
||||
ConntrackClosePublishResult::Sent | ConntrackClosePublishResult::Disabled
|
||||
) {
|
||||
stats.increment_conntrack_close_event_drop_total();
|
||||
}
|
||||
|
||||
relay_result
|
||||
}
|
||||
|
||||
fn classify_conntrack_close_reason(result: &Result<()>) -> ConntrackCloseReason {
|
||||
match result {
|
||||
Ok(()) => ConntrackCloseReason::NormalEof,
|
||||
Err(crate::error::ProxyError::Io(error))
|
||||
if matches!(error.kind(), std::io::ErrorKind::TimedOut) =>
|
||||
{
|
||||
ConntrackCloseReason::Timeout
|
||||
}
|
||||
Err(crate::error::ProxyError::Io(error))
|
||||
if matches!(
|
||||
error.kind(),
|
||||
std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::NotConnected
|
||||
| std::io::ErrorKind::UnexpectedEof
|
||||
) =>
|
||||
{
|
||||
ConntrackCloseReason::Reset
|
||||
}
|
||||
Err(crate::error::ProxyError::Proxy(message))
|
||||
if message.contains("pressure") || message.contains("evicted") =>
|
||||
{
|
||||
ConntrackCloseReason::Pressure
|
||||
}
|
||||
Err(_) => ConntrackCloseReason::Other,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let prefer_v6 = config.network.prefer == 6 && config.network.ipv6.unwrap_or(true);
|
||||
let datacenters = if prefer_v6 {
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -4,14 +4,23 @@ use crate::config::ProxyConfig;
|
|||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
use rand::{Rng, RngExt};
|
||||
use std::net::SocketAddr;
|
||||
#[cfg(unix)]
|
||||
use nix::ifaddrs::getifaddrs;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, RngExt, SeedableRng};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::str;
|
||||
use std::time::Duration;
|
||||
#[cfg(test)]
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
#[cfg(unix)]
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{Duration, Instant as StdInstant};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::sync::Mutex as AsyncMutex;
|
||||
use tokio::time::{Instant, timeout};
|
||||
use tracing::debug;
|
||||
|
||||
|
|
@ -30,28 +39,55 @@ const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_secs(5);
|
|||
#[cfg(test)]
|
||||
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
const MASK_BUFFER_SIZE: usize = 8192;
|
||||
#[cfg(unix)]
|
||||
#[cfg(not(test))]
|
||||
const LOCAL_INTERFACE_CACHE_TTL: Duration = Duration::from_secs(300);
|
||||
#[cfg(all(unix, test))]
|
||||
const LOCAL_INTERFACE_CACHE_TTL: Duration = Duration::from_secs(1);
|
||||
|
||||
struct CopyOutcome {
|
||||
total: usize,
|
||||
ended_by_eof: bool,
|
||||
}
|
||||
|
||||
async fn copy_with_idle_timeout<R, W>(reader: &mut R, writer: &mut W) -> CopyOutcome
|
||||
async fn copy_with_idle_timeout<R, W>(
|
||||
reader: &mut R,
|
||||
writer: &mut W,
|
||||
byte_cap: usize,
|
||||
shutdown_on_eof: bool,
|
||||
) -> CopyOutcome
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = [0u8; MASK_BUFFER_SIZE];
|
||||
let mut buf = Box::new([0u8; MASK_BUFFER_SIZE]);
|
||||
let mut total = 0usize;
|
||||
let mut ended_by_eof = false;
|
||||
|
||||
if byte_cap == 0 {
|
||||
return CopyOutcome {
|
||||
total,
|
||||
ended_by_eof,
|
||||
};
|
||||
}
|
||||
|
||||
loop {
|
||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf)).await;
|
||||
let remaining_budget = byte_cap.saturating_sub(total);
|
||||
if remaining_budget == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let read_len = remaining_budget.min(MASK_BUFFER_SIZE);
|
||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf[..read_len])).await;
|
||||
let n = match read_res {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
};
|
||||
if n == 0 {
|
||||
ended_by_eof = true;
|
||||
if shutdown_on_eof {
|
||||
let _ = timeout(MASK_RELAY_IDLE_TIMEOUT, writer.shutdown()).await;
|
||||
}
|
||||
break;
|
||||
}
|
||||
total = total.saturating_add(n);
|
||||
|
|
@ -68,6 +104,31 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
fn is_http_probe(data: &[u8]) -> bool {
|
||||
// RFC 7540 section 3.5: HTTP/2 client preface starts with "PRI ".
|
||||
const HTTP_METHODS: [&[u8]; 10] = [
|
||||
b"GET ", b"POST", b"HEAD", b"PUT ", b"DELETE", b"OPTIONS", b"CONNECT", b"TRACE", b"PATCH",
|
||||
b"PRI ",
|
||||
];
|
||||
|
||||
if data.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let window = &data[..data.len().min(16)];
|
||||
for method in HTTP_METHODS {
|
||||
if data.len() >= method.len() && window.starts_with(method) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (2..=3).contains(&window.len()) && method.starts_with(window) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn next_mask_shape_bucket(total: usize, floor: usize, cap: usize) -> usize {
|
||||
if total == 0 || floor == 0 || cap < floor {
|
||||
return total;
|
||||
|
|
@ -125,6 +186,11 @@ async fn maybe_write_shape_padding<W>(
|
|||
let mut remaining = target_total - total_sent;
|
||||
let mut pad_chunk = [0u8; 1024];
|
||||
let deadline = Instant::now() + MASK_TIMEOUT;
|
||||
// Use a Send RNG so relay futures remain spawn-safe under Tokio.
|
||||
let mut rng = {
|
||||
let mut seed_source = rand::rng();
|
||||
StdRng::from_rng(&mut seed_source)
|
||||
};
|
||||
|
||||
while remaining > 0 {
|
||||
let now = Instant::now();
|
||||
|
|
@ -133,10 +199,7 @@ async fn maybe_write_shape_padding<W>(
|
|||
}
|
||||
|
||||
let write_len = remaining.min(pad_chunk.len());
|
||||
{
|
||||
let mut rng = rand::rng();
|
||||
rng.fill_bytes(&mut pad_chunk[..write_len]);
|
||||
}
|
||||
rng.fill_bytes(&mut pad_chunk[..write_len]);
|
||||
let write_budget = deadline.saturating_duration_since(now);
|
||||
match timeout(write_budget, mask_write.write_all(&pad_chunk[..write_len])).await {
|
||||
Ok(Ok(())) => {}
|
||||
|
|
@ -167,11 +230,11 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
async fn consume_client_data_with_timeout<R>(reader: R)
|
||||
async fn consume_client_data_with_timeout_and_cap<R>(reader: R, byte_cap: usize)
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if timeout(MASK_RELAY_TIMEOUT, consume_client_data(reader))
|
||||
if timeout(MASK_RELAY_TIMEOUT, consume_client_data(reader, byte_cap))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
|
|
@ -186,15 +249,63 @@ async fn wait_mask_connect_budget(started: Instant) {
|
|||
}
|
||||
}
|
||||
|
||||
// Log-normal sample bounded to [floor, ceiling]. Median = sqrt(floor * ceiling).
|
||||
// Implements Box-Muller transform for standard normal sampling — no external
|
||||
// dependency on rand_distr (which is incompatible with rand 0.10).
|
||||
// sigma is chosen so ~99% of raw samples land inside [floor, ceiling] before clamp.
|
||||
// When floor > ceiling (misconfiguration), returns ceiling (the smaller value).
|
||||
// When floor == ceiling, returns that value. When both are 0, returns 0.
|
||||
pub(crate) fn sample_lognormal_percentile_bounded(
|
||||
floor: u64,
|
||||
ceiling: u64,
|
||||
rng: &mut impl Rng,
|
||||
) -> u64 {
|
||||
if ceiling == 0 && floor == 0 {
|
||||
return 0;
|
||||
}
|
||||
if floor > ceiling {
|
||||
return ceiling;
|
||||
}
|
||||
if floor == ceiling {
|
||||
return floor;
|
||||
}
|
||||
let floor_f = floor.max(1) as f64;
|
||||
let ceiling_f = ceiling.max(1) as f64;
|
||||
let mu = (floor_f.ln() + ceiling_f.ln()) / 2.0;
|
||||
// 4.65 ≈ 2 * 2.326 (double-sided z-score for 99th percentile)
|
||||
let sigma = ((ceiling_f / floor_f).ln() / 4.65).max(0.01);
|
||||
// Box-Muller transform: two uniform samples → one standard normal sample
|
||||
let u1: f64 = rng.random_range(f64::MIN_POSITIVE..1.0);
|
||||
let u2: f64 = rng.random_range(0.0_f64..std::f64::consts::TAU);
|
||||
let normal_sample = (-2.0_f64 * u1.ln()).sqrt() * u2.cos();
|
||||
let raw = (mu + sigma * normal_sample).exp();
|
||||
if raw.is_finite() {
|
||||
(raw as u64).clamp(floor, ceiling)
|
||||
} else {
|
||||
((floor_f * ceiling_f).sqrt()) as u64
|
||||
}
|
||||
}
|
||||
|
||||
fn mask_outcome_target_budget(config: &ProxyConfig) -> Duration {
|
||||
if config.censorship.mask_timing_normalization_enabled {
|
||||
let floor = config.censorship.mask_timing_normalization_floor_ms;
|
||||
let ceiling = config.censorship.mask_timing_normalization_ceiling_ms;
|
||||
if floor == 0 {
|
||||
if ceiling == 0 {
|
||||
return Duration::from_millis(0);
|
||||
}
|
||||
// floor=0 stays uniform: log-normal cannot model distribution anchored at zero
|
||||
let mut rng = rand::rng();
|
||||
return Duration::from_millis(rng.random_range(0..=ceiling));
|
||||
}
|
||||
if ceiling > floor {
|
||||
let mut rng = rand::rng();
|
||||
return Duration::from_millis(rng.random_range(floor..=ceiling));
|
||||
return Duration::from_millis(sample_lognormal_percentile_bounded(
|
||||
floor, ceiling, &mut rng,
|
||||
));
|
||||
}
|
||||
return Duration::from_millis(floor);
|
||||
// ceiling <= floor: use the larger value (fail-closed: preserve longer delay)
|
||||
return Duration::from_millis(floor.max(ceiling));
|
||||
}
|
||||
|
||||
MASK_TIMEOUT
|
||||
|
|
@ -219,14 +330,7 @@ async fn wait_mask_outcome_budget(started: Instant, config: &ProxyConfig) {
|
|||
/// Detect client type based on initial data
|
||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
// Check for HTTP request
|
||||
if data.len() > 4
|
||||
&& (data.starts_with(b"GET ")
|
||||
|| data.starts_with(b"POST")
|
||||
|| data.starts_with(b"HEAD")
|
||||
|| data.starts_with(b"PUT ")
|
||||
|| data.starts_with(b"DELETE")
|
||||
|| data.starts_with(b"OPTIONS"))
|
||||
{
|
||||
if is_http_probe(data) {
|
||||
return "HTTP";
|
||||
}
|
||||
|
||||
|
|
@ -248,6 +352,247 @@ fn detect_client_type(data: &[u8]) -> &'static str {
|
|||
"unknown"
|
||||
}
|
||||
|
||||
fn parse_mask_host_ip_literal(host: &str) -> Option<IpAddr> {
|
||||
if host.starts_with('[') && host.ends_with(']') {
|
||||
return host[1..host.len() - 1].parse::<IpAddr>().ok();
|
||||
}
|
||||
host.parse::<IpAddr>().ok()
|
||||
}
|
||||
|
||||
fn canonical_ip(ip: IpAddr) -> IpAddr {
|
||||
match ip {
|
||||
IpAddr::V6(v6) => v6
|
||||
.to_ipv4_mapped()
|
||||
.map(IpAddr::V4)
|
||||
.unwrap_or(IpAddr::V6(v6)),
|
||||
IpAddr::V4(v4) => IpAddr::V4(v4),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
fn collect_local_interface_ips() -> Vec<IpAddr> {
|
||||
#[cfg(test)]
|
||||
LOCAL_INTERFACE_ENUMERATIONS.fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
let mut out = Vec::new();
|
||||
if let Ok(addrs) = getifaddrs() {
|
||||
for iface in addrs {
|
||||
if let Some(address) = iface.address {
|
||||
if let Some(v4) = address.as_sockaddr_in() {
|
||||
out.push(canonical_ip(IpAddr::V4(v4.ip())));
|
||||
} else if let Some(v6) = address.as_sockaddr_in6() {
|
||||
out.push(canonical_ip(IpAddr::V6(v6.ip())));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
fn choose_interface_snapshot(previous: &[IpAddr], refreshed: Vec<IpAddr>) -> Vec<IpAddr> {
|
||||
if refreshed.is_empty() && !previous.is_empty() {
|
||||
return previous.to_vec();
|
||||
}
|
||||
|
||||
refreshed
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
#[derive(Default)]
|
||||
struct LocalInterfaceCache {
|
||||
ips: Vec<IpAddr>,
|
||||
refreshed_at: Option<StdInstant>,
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
static LOCAL_INTERFACE_CACHE: OnceLock<Mutex<LocalInterfaceCache>> = OnceLock::new();
|
||||
|
||||
#[cfg(unix)]
|
||||
static LOCAL_INTERFACE_REFRESH_LOCK: OnceLock<AsyncMutex<()>> = OnceLock::new();
|
||||
|
||||
#[cfg(all(unix, test))]
|
||||
fn local_interface_ips() -> Vec<IpAddr> {
|
||||
let cache = LOCAL_INTERFACE_CACHE.get_or_init(|| Mutex::new(LocalInterfaceCache::default()));
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if stale {
|
||||
let refreshed = collect_local_interface_ips();
|
||||
guard.ips = choose_interface_snapshot(&guard.ips, refreshed);
|
||||
guard.refreshed_at = Some(StdInstant::now());
|
||||
}
|
||||
|
||||
guard.ips.clone()
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
async fn local_interface_ips_async() -> Vec<IpAddr> {
|
||||
let cache = LOCAL_INTERFACE_CACHE.get_or_init(|| Mutex::new(LocalInterfaceCache::default()));
|
||||
|
||||
{
|
||||
let guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if !stale {
|
||||
return guard.ips.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let refresh_lock = LOCAL_INTERFACE_REFRESH_LOCK.get_or_init(|| AsyncMutex::new(()));
|
||||
let _refresh_guard = refresh_lock.lock().await;
|
||||
|
||||
{
|
||||
let guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if !stale {
|
||||
return guard.ips.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let refreshed = tokio::task::spawn_blocking(collect_local_interface_ips)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
let stale = guard
|
||||
.refreshed_at
|
||||
.is_none_or(|at| at.elapsed() >= LOCAL_INTERFACE_CACHE_TTL);
|
||||
if stale {
|
||||
guard.ips = choose_interface_snapshot(&guard.ips, refreshed);
|
||||
guard.refreshed_at = Some(StdInstant::now());
|
||||
}
|
||||
|
||||
guard.ips.clone()
|
||||
}
|
||||
|
||||
#[cfg(all(not(unix), test))]
|
||||
fn local_interface_ips() -> Vec<IpAddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
#[cfg(not(unix))]
|
||||
async fn local_interface_ips_async() -> Vec<IpAddr> {
|
||||
Vec::new()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
static LOCAL_INTERFACE_ENUMERATIONS: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
#[cfg(test)]
|
||||
fn reset_local_interface_enumerations_for_tests() {
|
||||
LOCAL_INTERFACE_ENUMERATIONS.store(0, Ordering::Relaxed);
|
||||
|
||||
#[cfg(unix)]
|
||||
if let Some(cache) = LOCAL_INTERFACE_CACHE.get() {
|
||||
let mut guard = cache.lock().unwrap_or_else(|poison| poison.into_inner());
|
||||
guard.ips.clear();
|
||||
guard.refreshed_at = None;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn local_interface_enumerations_for_tests() -> usize {
|
||||
LOCAL_INTERFACE_ENUMERATIONS.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
fn is_mask_target_local_listener_with_interfaces(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
interface_ips: &[IpAddr],
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let local_ip = canonical_ip(local_addr.ip());
|
||||
let literal_mask_ip = parse_mask_host_ip_literal(mask_host).map(canonical_ip);
|
||||
|
||||
if let Some(addr) = resolved_override {
|
||||
let resolved_ip = canonical_ip(addr.ip());
|
||||
if resolved_ip == local_ip {
|
||||
return true;
|
||||
}
|
||||
|
||||
if local_ip.is_unspecified()
|
||||
&& (resolved_ip.is_loopback()
|
||||
|| resolved_ip.is_unspecified()
|
||||
|| interface_ips.contains(&resolved_ip))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mask_ip) = literal_mask_ip {
|
||||
if mask_ip == local_ip {
|
||||
return true;
|
||||
}
|
||||
|
||||
if local_ip.is_unspecified()
|
||||
&& (mask_ip.is_loopback()
|
||||
|| mask_ip.is_unspecified()
|
||||
|| interface_ips.contains(&mask_ip))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn is_mask_target_local_listener(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let interfaces = local_interface_ips();
|
||||
is_mask_target_local_listener_with_interfaces(
|
||||
mask_host,
|
||||
mask_port,
|
||||
local_addr,
|
||||
resolved_override,
|
||||
&interfaces,
|
||||
)
|
||||
}
|
||||
|
||||
async fn is_mask_target_local_listener_async(
|
||||
mask_host: &str,
|
||||
mask_port: u16,
|
||||
local_addr: SocketAddr,
|
||||
resolved_override: Option<SocketAddr>,
|
||||
) -> bool {
|
||||
if mask_port != local_addr.port() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let interfaces = local_interface_ips_async().await;
|
||||
is_mask_target_local_listener_with_interfaces(
|
||||
mask_host,
|
||||
mask_port,
|
||||
local_addr,
|
||||
resolved_override,
|
||||
&interfaces,
|
||||
)
|
||||
}
|
||||
|
||||
fn masking_beobachten_ttl(config: &ProxyConfig) -> Duration {
|
||||
let minutes = config.general.beobachten_minutes;
|
||||
let clamped = minutes.clamp(1, 24 * 60);
|
||||
Duration::from_secs(clamped.saturating_mul(60))
|
||||
}
|
||||
|
||||
fn build_mask_proxy_header(
|
||||
version: u8,
|
||||
peer: SocketAddr,
|
||||
|
|
@ -290,13 +635,14 @@ pub async fn handle_bad_client<R, W>(
|
|||
{
|
||||
let client_type = detect_client_type(initial_data);
|
||||
if config.general.beobachten {
|
||||
let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60));
|
||||
let ttl = masking_beobachten_ttl(config);
|
||||
beobachten.record(client_type, peer.ip(), ttl);
|
||||
}
|
||||
|
||||
if !config.censorship.mask {
|
||||
// Masking disabled, just consume data
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(reader, config.censorship.mask_relay_max_bytes)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -341,6 +687,7 @@ pub async fn handle_bad_client<R, W>(
|
|||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
),
|
||||
)
|
||||
.await
|
||||
|
|
@ -353,12 +700,20 @@ pub async fn handle_bad_client<R, W>(
|
|||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask unix socket");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask unix socket");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -372,6 +727,29 @@ pub async fn handle_bad_client<R, W>(
|
|||
.unwrap_or(&config.censorship.tls_domain);
|
||||
let mask_port = config.censorship.mask_port;
|
||||
|
||||
// Fail closed when fallback points at our own listener endpoint.
|
||||
// Self-referential masking can create recursive proxy loops under
|
||||
// misconfiguration and leak distinguishable load spikes to adversaries.
|
||||
let resolved_mask_addr = resolve_socket_addr(mask_host, mask_port);
|
||||
if is_mask_target_local_listener_async(mask_host, mask_port, local_addr, resolved_mask_addr)
|
||||
.await
|
||||
{
|
||||
let outcome_started = Instant::now();
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
host = %mask_host,
|
||||
port = mask_port,
|
||||
local = %local_addr,
|
||||
"Mask target resolves to local listener; refusing self-referential masking fallback"
|
||||
);
|
||||
consume_client_data_with_timeout_and_cap(reader, config.censorship.mask_relay_max_bytes)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
return;
|
||||
}
|
||||
|
||||
let outcome_started = Instant::now();
|
||||
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
host = %mask_host,
|
||||
|
|
@ -381,10 +759,9 @@ pub async fn handle_bad_client<R, W>(
|
|||
);
|
||||
|
||||
// Apply runtime DNS override for mask target when configured.
|
||||
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||
let mask_addr = resolved_mask_addr
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||
let outcome_started = Instant::now();
|
||||
let connect_started = Instant::now();
|
||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||
match connect_result {
|
||||
|
|
@ -413,6 +790,7 @@ pub async fn handle_bad_client<R, W>(
|
|||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
),
|
||||
)
|
||||
.await
|
||||
|
|
@ -425,12 +803,20 @@ pub async fn handle_bad_client<R, W>(
|
|||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask host");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask host");
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
consume_client_data_with_timeout_and_cap(
|
||||
reader,
|
||||
config.censorship.mask_relay_max_bytes,
|
||||
)
|
||||
.await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -449,6 +835,7 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
shape_above_cap_blur: bool,
|
||||
shape_above_cap_blur_max_bytes: usize,
|
||||
shape_hardening_aggressive_mode: bool,
|
||||
mask_relay_max_bytes: usize,
|
||||
) where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
|
|
@ -464,8 +851,18 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
}
|
||||
|
||||
let (upstream_copy, downstream_copy) = tokio::join!(
|
||||
async { copy_with_idle_timeout(&mut reader, &mut mask_write).await },
|
||||
async { copy_with_idle_timeout(&mut mask_read, &mut writer).await }
|
||||
async {
|
||||
copy_with_idle_timeout(
|
||||
&mut reader,
|
||||
&mut mask_write,
|
||||
mask_relay_max_bytes,
|
||||
!shape_hardening_enabled,
|
||||
)
|
||||
.await
|
||||
},
|
||||
async {
|
||||
copy_with_idle_timeout(&mut mask_read, &mut writer, mask_relay_max_bytes, true).await
|
||||
}
|
||||
);
|
||||
|
||||
let total_sent = initial_data.len().saturating_add(upstream_copy.total);
|
||||
|
|
@ -491,13 +888,36 @@ async fn relay_to_mask<R, W, MR, MW>(
|
|||
let _ = writer.shutdown().await;
|
||||
}
|
||||
|
||||
/// Just consume all data from client without responding
|
||||
async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R) {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
while let Ok(n) = reader.read(&mut buf).await {
|
||||
/// Just consume all data from client without responding.
|
||||
async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R, byte_cap: usize) {
|
||||
if byte_cap == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Keep drain path fail-closed under slow-loris stalls.
|
||||
let mut buf = Box::new([0u8; MASK_BUFFER_SIZE]);
|
||||
let mut total = 0usize;
|
||||
|
||||
loop {
|
||||
let remaining_budget = byte_cap.saturating_sub(total);
|
||||
if remaining_budget == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
let read_len = remaining_budget.min(MASK_BUFFER_SIZE);
|
||||
let n = match timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf[..read_len])).await {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
};
|
||||
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
|
||||
total = total.saturating_add(n);
|
||||
if total >= byte_cap {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -521,6 +941,10 @@ mod masking_shape_above_cap_blur_security_tests;
|
|||
#[path = "tests/masking_timing_normalization_security_tests.rs"]
|
||||
mod masking_timing_normalization_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_budget_coupling_security_tests.rs"]
|
||||
mod masking_timing_budget_coupling_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_ab_envelope_blur_integration_security_tests.rs"]
|
||||
mod masking_ab_envelope_blur_integration_security_tests;
|
||||
|
|
@ -548,3 +972,83 @@ mod masking_aggressive_mode_security_tests;
|
|||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_sidechannel_redteam_expected_fail_tests.rs"]
|
||||
mod masking_timing_sidechannel_redteam_expected_fail_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_self_target_loop_security_tests.rs"]
|
||||
mod masking_self_target_loop_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_classification_completeness_security_tests.rs"]
|
||||
mod masking_classification_completeness_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_relay_guardrails_security_tests.rs"]
|
||||
mod masking_relay_guardrails_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_connect_failure_close_matrix_security_tests.rs"]
|
||||
mod masking_connect_failure_close_matrix_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_additional_hardening_security_tests.rs"]
|
||||
mod masking_additional_hardening_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_consume_idle_timeout_security_tests.rs"]
|
||||
mod masking_consume_idle_timeout_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http2_probe_classification_security_tests.rs"]
|
||||
mod masking_http2_probe_classification_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http_probe_boundary_security_tests.rs"]
|
||||
mod masking_http_probe_boundary_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_rng_hoist_perf_regression_tests.rs"]
|
||||
mod masking_rng_hoist_perf_regression_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_http2_preface_integration_security_tests.rs"]
|
||||
mod masking_http2_preface_integration_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_consume_stress_adversarial_tests.rs"]
|
||||
mod masking_consume_stress_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_security_tests.rs"]
|
||||
mod masking_interface_cache_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_defense_in_depth_security_tests.rs"]
|
||||
mod masking_interface_cache_defense_in_depth_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_interface_cache_concurrency_security_tests.rs"]
|
||||
mod masking_interface_cache_concurrency_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_production_cap_regression_security_tests.rs"]
|
||||
mod masking_production_cap_regression_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_extended_attack_surface_security_tests.rs"]
|
||||
mod masking_extended_attack_surface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_padding_timeout_adversarial_tests.rs"]
|
||||
mod masking_padding_timeout_adversarial_tests;
|
||||
|
||||
#[cfg(all(test, feature = "redteam_offline_expected_fail"))]
|
||||
#[path = "tests/masking_offline_target_redteam_expected_fail_tests.rs"]
|
||||
mod masking_offline_target_redteam_expected_fail_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_baseline_invariant_tests.rs"]
|
||||
mod masking_baseline_invariant_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_lognormal_timing_security_tests.rs"]
|
||||
mod masking_lognormal_timing_security_tests;
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
113
src/proxy/mod.rs
113
src/proxy/mod.rs
|
|
@ -4,58 +4,58 @@
|
|||
#![cfg_attr(test, allow(warnings))]
|
||||
#![cfg_attr(not(test), forbid(clippy::undocumented_unsafe_blocks))]
|
||||
#![cfg_attr(
|
||||
not(test),
|
||||
deny(
|
||||
clippy::unwrap_used,
|
||||
clippy::expect_used,
|
||||
clippy::panic,
|
||||
clippy::todo,
|
||||
clippy::unimplemented,
|
||||
clippy::correctness,
|
||||
clippy::option_if_let_else,
|
||||
clippy::or_fun_call,
|
||||
clippy::branches_sharing_code,
|
||||
clippy::single_option_map,
|
||||
clippy::useless_let_if_seq,
|
||||
clippy::redundant_locals,
|
||||
clippy::cloned_ref_to_slice_refs,
|
||||
unsafe_code,
|
||||
clippy::await_holding_lock,
|
||||
clippy::await_holding_refcell_ref,
|
||||
clippy::debug_assert_with_mut_call,
|
||||
clippy::macro_use_imports,
|
||||
clippy::cast_ptr_alignment,
|
||||
clippy::cast_lossless,
|
||||
clippy::ptr_as_ptr,
|
||||
clippy::large_stack_arrays,
|
||||
clippy::same_functions_in_if_condition,
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unused_extern_crates,
|
||||
unused_import_braces,
|
||||
rust_2018_idioms
|
||||
)
|
||||
not(test),
|
||||
deny(
|
||||
clippy::unwrap_used,
|
||||
clippy::expect_used,
|
||||
clippy::panic,
|
||||
clippy::todo,
|
||||
clippy::unimplemented,
|
||||
clippy::correctness,
|
||||
clippy::option_if_let_else,
|
||||
clippy::or_fun_call,
|
||||
clippy::branches_sharing_code,
|
||||
clippy::single_option_map,
|
||||
clippy::useless_let_if_seq,
|
||||
clippy::redundant_locals,
|
||||
clippy::cloned_ref_to_slice_refs,
|
||||
unsafe_code,
|
||||
clippy::await_holding_lock,
|
||||
clippy::await_holding_refcell_ref,
|
||||
clippy::debug_assert_with_mut_call,
|
||||
clippy::macro_use_imports,
|
||||
clippy::cast_ptr_alignment,
|
||||
clippy::cast_lossless,
|
||||
clippy::ptr_as_ptr,
|
||||
clippy::large_stack_arrays,
|
||||
clippy::same_functions_in_if_condition,
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unused_extern_crates,
|
||||
unused_import_braces,
|
||||
rust_2018_idioms
|
||||
)
|
||||
)]
|
||||
#![cfg_attr(
|
||||
not(test),
|
||||
allow(
|
||||
clippy::use_self,
|
||||
clippy::redundant_closure,
|
||||
clippy::too_many_arguments,
|
||||
clippy::doc_markdown,
|
||||
clippy::missing_const_for_fn,
|
||||
clippy::unnecessary_operation,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::derive_partial_eq_without_eq,
|
||||
clippy::type_complexity,
|
||||
clippy::new_ret_no_self,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::cast_possible_wrap,
|
||||
clippy::significant_drop_tightening,
|
||||
clippy::significant_drop_in_scrutinee,
|
||||
clippy::float_cmp,
|
||||
clippy::nursery
|
||||
)
|
||||
not(test),
|
||||
allow(
|
||||
clippy::use_self,
|
||||
clippy::redundant_closure,
|
||||
clippy::too_many_arguments,
|
||||
clippy::doc_markdown,
|
||||
clippy::missing_const_for_fn,
|
||||
clippy::unnecessary_operation,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::derive_partial_eq_without_eq,
|
||||
clippy::type_complexity,
|
||||
clippy::new_ret_no_self,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::cast_possible_wrap,
|
||||
clippy::significant_drop_tightening,
|
||||
clippy::significant_drop_in_scrutinee,
|
||||
clippy::float_cmp,
|
||||
clippy::nursery
|
||||
)
|
||||
)]
|
||||
|
||||
pub mod adaptive_buffers;
|
||||
|
|
@ -67,6 +67,7 @@ pub mod middle_relay;
|
|||
pub mod relay;
|
||||
pub mod route_mode;
|
||||
pub mod session_eviction;
|
||||
pub mod shared_state;
|
||||
|
||||
pub use client::ClientHandler;
|
||||
#[allow(unused_imports)]
|
||||
|
|
@ -75,3 +76,15 @@ pub use handshake::*;
|
|||
pub use masking::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use relay::*;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/test_harness_common.rs"]
|
||||
mod test_harness_common;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/proxy_shared_state_isolation_tests.rs"]
|
||||
mod proxy_shared_state_isolation_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/proxy_shared_state_parallel_execution_tests.rs"]
|
||||
mod proxy_shared_state_parallel_execution_tests;
|
||||
|
|
|
|||
|
|
@ -52,13 +52,12 @@
|
|||
//! - `SharedCounters` (atomics) let the watchdog read stats without locking
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::stats::Stats;
|
||||
use crate::stats::{Stats, UserStats};
|
||||
use crate::stream::BufferPool;
|
||||
use dashmap::DashMap;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional_with_sizes};
|
||||
|
|
@ -71,6 +70,7 @@ use tracing::{debug, trace, warn};
|
|||
///
|
||||
/// iOS keeps Telegram connections alive in background for up to 30 minutes.
|
||||
/// Closing earlier causes unnecessary reconnects and handshake overhead.
|
||||
#[allow(dead_code)]
|
||||
const ACTIVITY_TIMEOUT: Duration = Duration::from_secs(1800);
|
||||
|
||||
/// Watchdog check interval — also used for periodic rate logging.
|
||||
|
|
@ -209,12 +209,10 @@ struct StatsIo<S> {
|
|||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
user_stats: Arc<UserStats>,
|
||||
quota_limit: Option<u64>,
|
||||
quota_exceeded: Arc<AtomicBool>,
|
||||
quota_read_wake_scheduled: bool,
|
||||
quota_write_wake_scheduled: bool,
|
||||
quota_read_retry_active: Arc<AtomicBool>,
|
||||
quota_write_retry_active: Arc<AtomicBool>,
|
||||
quota_bytes_since_check: u64,
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
|
|
@ -230,30 +228,21 @@ impl<S> StatsIo<S> {
|
|||
) -> Self {
|
||||
// Mark initial activity so the watchdog doesn't fire before data flows
|
||||
counters.touch(Instant::now(), epoch);
|
||||
let user_stats = stats.get_or_create_user_stats_handle(&user);
|
||||
Self {
|
||||
inner,
|
||||
counters,
|
||||
stats,
|
||||
user,
|
||||
user_stats,
|
||||
quota_limit,
|
||||
quota_exceeded,
|
||||
quota_read_wake_scheduled: false,
|
||||
quota_write_wake_scheduled: false,
|
||||
quota_read_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
quota_write_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
quota_bytes_since_check: 0,
|
||||
epoch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Drop for StatsIo<S> {
|
||||
fn drop(&mut self) {
|
||||
self.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
self.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct QuotaIoSentinel;
|
||||
|
||||
|
|
@ -277,84 +266,22 @@ fn is_quota_io_error(err: &io::Error) -> bool {
|
|||
.is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(1);
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(2);
|
||||
const QUOTA_NEAR_LIMIT_BYTES: u64 = 64 * 1024;
|
||||
const QUOTA_LARGE_CHARGE_BYTES: u64 = 16 * 1024;
|
||||
const QUOTA_ADAPTIVE_INTERVAL_MIN_BYTES: u64 = 4 * 1024;
|
||||
const QUOTA_ADAPTIVE_INTERVAL_MAX_BYTES: u64 = 64 * 1024;
|
||||
|
||||
fn spawn_quota_retry_waker(retry_active: Arc<AtomicBool>, waker: std::task::Waker) {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(QUOTA_CONTENTION_RETRY_INTERVAL).await;
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
waker.wake_by_ref();
|
||||
}
|
||||
});
|
||||
#[inline]
|
||||
fn quota_adaptive_interval_bytes(remaining_before: u64) -> u64 {
|
||||
remaining_before.saturating_div(2).clamp(
|
||||
QUOTA_ADAPTIVE_INTERVAL_MIN_BYTES,
|
||||
QUOTA_ADAPTIVE_INTERVAL_MAX_BYTES,
|
||||
)
|
||||
}
|
||||
|
||||
static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<Mutex<()>>>> = OnceLock::new();
|
||||
static QUOTA_USER_OVERFLOW_LOCKS: OnceLock<Vec<Arc<Mutex<()>>>> = OnceLock::new();
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 64;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 4_096;
|
||||
#[cfg(test)]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 16;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 256;
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_guard() -> &'static Mutex<()> {
|
||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_scope() -> std::sync::MutexGuard<'static, ()> {
|
||||
quota_user_lock_test_guard()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
fn quota_overflow_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let stripes = QUOTA_USER_OVERFLOW_LOCKS.get_or_init(|| {
|
||||
(0..QUOTA_OVERFLOW_LOCK_STRIPES)
|
||||
.map(|_| Arc::new(Mutex::new(())))
|
||||
.collect()
|
||||
});
|
||||
|
||||
let hash = crc32fast::hash(user.as_bytes()) as usize;
|
||||
Arc::clone(&stripes[hash % stripes.len()])
|
||||
}
|
||||
|
||||
fn quota_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let locks = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
|
||||
if let Some(existing) = locks.get(user) {
|
||||
return Arc::clone(existing.value());
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
locks.retain(|_, value| Arc::strong_count(value) > 1);
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
return quota_overflow_user_lock(user);
|
||||
}
|
||||
|
||||
let created = Arc::new(Mutex::new(()));
|
||||
match locks.entry(user.to_string()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(entry) => Arc::clone(entry.get()),
|
||||
dashmap::mapref::entry::Entry::Vacant(entry) => {
|
||||
entry.insert(Arc::clone(&created));
|
||||
created
|
||||
}
|
||||
}
|
||||
#[inline]
|
||||
fn should_immediate_quota_check(remaining_before: u64, charge_bytes: u64) -> bool {
|
||||
remaining_before <= QUOTA_NEAR_LIMIT_BYTES || charge_bytes >= QUOTA_LARGE_CHARGE_BYTES
|
||||
}
|
||||
|
||||
impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
||||
|
|
@ -364,80 +291,60 @@ impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
|||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
if this.quota_exceeded.load(Ordering::Acquire) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_read_wake_scheduled = false;
|
||||
this.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_read_wake_scheduled {
|
||||
this.quota_read_wake_scheduled = true;
|
||||
this.quota_read_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_read_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
let mut remaining_before = None;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used_before = this.user_stats.quota_used();
|
||||
let remaining = limit.saturating_sub(used_before);
|
||||
if remaining == 0 {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
remaining_before = Some(remaining);
|
||||
}
|
||||
|
||||
let before = buf.filled().len();
|
||||
|
||||
match Pin::new(&mut this.inner).poll_read(cx, buf) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = buf.filled().len() - before;
|
||||
if n > 0 {
|
||||
let mut reached_quota_boundary = false;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let remaining = limit - used;
|
||||
if (n as u64) > remaining {
|
||||
// Fail closed: when a single read chunk would cross quota,
|
||||
// stop relay immediately without accounting beyond the cap.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
reached_quota_boundary = (n as u64) == remaining;
|
||||
}
|
||||
let n_to_charge = n as u64;
|
||||
|
||||
// C→S: client sent data
|
||||
this.counters
|
||||
.c2s_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
.fetch_add(n_to_charge, Ordering::Relaxed);
|
||||
this.counters.c2s_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_from(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_from(&this.user);
|
||||
this.stats
|
||||
.add_user_octets_from_handle(this.user_stats.as_ref(), n_to_charge);
|
||||
this.stats
|
||||
.increment_user_msgs_from_handle(this.user_stats.as_ref());
|
||||
|
||||
if reached_quota_boundary {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
if let (Some(limit), Some(remaining)) = (this.quota_limit, remaining_before) {
|
||||
this.stats
|
||||
.quota_charge_post_write(this.user_stats.as_ref(), n_to_charge);
|
||||
if should_immediate_quota_check(remaining, n_to_charge) {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
} else {
|
||||
this.quota_bytes_since_check =
|
||||
this.quota_bytes_since_check.saturating_add(n_to_charge);
|
||||
let interval = quota_adaptive_interval_bytes(remaining);
|
||||
if this.quota_bytes_since_check >= interval {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(user = %this.user, bytes = n, "C->S");
|
||||
|
|
@ -456,75 +363,57 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
|||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
if this.quota_exceeded.load(Ordering::Acquire) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_write_wake_scheduled = false;
|
||||
this.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_write_wake_scheduled {
|
||||
this.quota_write_wake_scheduled = true;
|
||||
this.quota_write_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_write_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let write_buf = if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
let mut remaining_before = None;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used_before = this.user_stats.quota_used();
|
||||
let remaining = limit.saturating_sub(used_before);
|
||||
if remaining == 0 {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
remaining_before = Some(remaining);
|
||||
}
|
||||
|
||||
let remaining = (limit - used) as usize;
|
||||
if buf.len() > remaining {
|
||||
// Fail closed: do not emit partial S->C payload when remaining
|
||||
// quota cannot accommodate the pending write request.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
buf
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
|
||||
match Pin::new(&mut this.inner).poll_write(cx, write_buf) {
|
||||
match Pin::new(&mut this.inner).poll_write(cx, buf) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if n > 0 {
|
||||
let n_to_charge = n as u64;
|
||||
|
||||
// S→C: data written to client
|
||||
this.counters
|
||||
.s2c_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
.fetch_add(n_to_charge, Ordering::Relaxed);
|
||||
this.counters.s2c_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_to(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_to(&this.user);
|
||||
this.stats
|
||||
.add_user_octets_to_handle(this.user_stats.as_ref(), n_to_charge);
|
||||
this.stats
|
||||
.increment_user_msgs_to_handle(this.user_stats.as_ref());
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
if let (Some(limit), Some(remaining)) = (this.quota_limit, remaining_before) {
|
||||
this.stats
|
||||
.quota_charge_post_write(this.user_stats.as_ref(), n_to_charge);
|
||||
if should_immediate_quota_check(remaining, n_to_charge) {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
} else {
|
||||
this.quota_bytes_since_check =
|
||||
this.quota_bytes_since_check.saturating_add(n_to_charge);
|
||||
let interval = quota_adaptive_interval_bytes(remaining);
|
||||
if this.quota_bytes_since_check >= interval {
|
||||
this.quota_bytes_since_check = 0;
|
||||
if this.user_stats.quota_used() >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Release);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
trace!(user = %this.user, bytes = n, "S->C");
|
||||
|
|
@ -565,6 +454,7 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
|||
/// - Clean shutdown: both write sides are shut down on exit
|
||||
/// - Error propagation: quota exits return `ProxyError::DataQuotaExceeded`,
|
||||
/// other I/O failures are returned as `ProxyError::Io`
|
||||
#[allow(dead_code)]
|
||||
pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
|
|
@ -583,6 +473,42 @@ where
|
|||
SR: AsyncRead + Unpin + Send + 'static,
|
||||
SW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
relay_bidirectional_with_activity_timeout(
|
||||
client_reader,
|
||||
client_writer,
|
||||
server_reader,
|
||||
server_writer,
|
||||
c2s_buf_size,
|
||||
s2c_buf_size,
|
||||
user,
|
||||
stats,
|
||||
quota_limit,
|
||||
_buffer_pool,
|
||||
ACTIVITY_TIMEOUT,
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn relay_bidirectional_with_activity_timeout<CR, CW, SR, SW>(
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
server_reader: SR,
|
||||
server_writer: SW,
|
||||
c2s_buf_size: usize,
|
||||
s2c_buf_size: usize,
|
||||
user: &str,
|
||||
stats: Arc<Stats>,
|
||||
quota_limit: Option<u64>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
activity_timeout: Duration,
|
||||
) -> Result<()>
|
||||
where
|
||||
CR: AsyncRead + Unpin + Send + 'static,
|
||||
CW: AsyncWrite + Unpin + Send + 'static,
|
||||
SR: AsyncRead + Unpin + Send + 'static,
|
||||
SW: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
let activity_timeout = activity_timeout.max(Duration::from_secs(1));
|
||||
let epoch = Instant::now();
|
||||
let counters = Arc::new(SharedCounters::new());
|
||||
let quota_exceeded = Arc::new(AtomicBool::new(false));
|
||||
|
|
@ -618,13 +544,13 @@ where
|
|||
let now = Instant::now();
|
||||
let idle = wd_counters.idle_duration(now, epoch);
|
||||
|
||||
if wd_quota_exceeded.load(Ordering::Relaxed) {
|
||||
if wd_quota_exceeded.load(Ordering::Acquire) {
|
||||
warn!(user = %wd_user, "User data quota reached, closing relay");
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Activity timeout ────────────────────────────────────
|
||||
if idle >= ACTIVITY_TIMEOUT {
|
||||
if idle >= activity_timeout {
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
warn!(
|
||||
|
|
@ -756,18 +682,10 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_adversarial_tests.rs"]
|
||||
mod adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_lock_pressure_adversarial_tests.rs"]
|
||||
mod relay_quota_lock_pressure_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_boundary_blackhat_tests.rs"]
|
||||
mod relay_quota_boundary_blackhat_tests;
|
||||
|
|
@ -780,14 +698,18 @@ mod relay_quota_model_adversarial_tests;
|
|||
#[path = "tests/relay_quota_overflow_regression_tests.rs"]
|
||||
mod relay_quota_overflow_regression_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_extended_attack_surface_security_tests.rs"]
|
||||
mod relay_quota_extended_attack_surface_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_watchdog_delta_security_tests.rs"]
|
||||
mod relay_watchdog_delta_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_waker_storm_adversarial_tests.rs"]
|
||||
mod relay_quota_waker_storm_adversarial_tests;
|
||||
#[path = "tests/relay_atomic_quota_invariant_tests.rs"]
|
||||
mod relay_atomic_quota_invariant_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_wake_liveness_regression_tests.rs"]
|
||||
mod relay_quota_wake_liveness_regression_tests;
|
||||
#[path = "tests/relay_baseline_invariant_tests.rs"]
|
||||
mod relay_baseline_invariant_tests;
|
||||
|
|
|
|||
|
|
@ -0,0 +1,146 @@
|
|||
use std::collections::HashSet;
|
||||
use std::collections::hash_map::RandomState;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::Instant;
|
||||
|
||||
use dashmap::DashMap;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::proxy::handshake::{AuthProbeSaturationState, AuthProbeState};
|
||||
use crate::proxy::middle_relay::{DesyncDedupRotationState, RelayIdleCandidateRegistry};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum ConntrackCloseReason {
|
||||
NormalEof,
|
||||
Timeout,
|
||||
Pressure,
|
||||
Reset,
|
||||
Other,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub(crate) struct ConntrackCloseEvent {
|
||||
pub(crate) src: SocketAddr,
|
||||
pub(crate) dst: SocketAddr,
|
||||
pub(crate) reason: ConntrackCloseReason,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub(crate) enum ConntrackClosePublishResult {
|
||||
Sent,
|
||||
Disabled,
|
||||
QueueFull,
|
||||
QueueClosed,
|
||||
}
|
||||
|
||||
pub(crate) struct HandshakeSharedState {
|
||||
pub(crate) auth_probe: DashMap<IpAddr, AuthProbeState>,
|
||||
pub(crate) auth_probe_saturation: Mutex<Option<AuthProbeSaturationState>>,
|
||||
pub(crate) auth_probe_eviction_hasher: RandomState,
|
||||
pub(crate) invalid_secret_warned: Mutex<HashSet<(String, String)>>,
|
||||
pub(crate) unknown_sni_warn_next_allowed: Mutex<Option<Instant>>,
|
||||
}
|
||||
|
||||
pub(crate) struct MiddleRelaySharedState {
|
||||
pub(crate) desync_dedup: DashMap<u64, Instant>,
|
||||
pub(crate) desync_dedup_previous: DashMap<u64, Instant>,
|
||||
pub(crate) desync_hasher: RandomState,
|
||||
pub(crate) desync_full_cache_last_emit_at: Mutex<Option<Instant>>,
|
||||
pub(crate) desync_dedup_rotation_state: Mutex<DesyncDedupRotationState>,
|
||||
pub(crate) relay_idle_registry: Mutex<RelayIdleCandidateRegistry>,
|
||||
pub(crate) relay_idle_mark_seq: AtomicU64,
|
||||
}
|
||||
|
||||
pub(crate) struct ProxySharedState {
|
||||
pub(crate) handshake: HandshakeSharedState,
|
||||
pub(crate) middle_relay: MiddleRelaySharedState,
|
||||
pub(crate) conntrack_pressure_active: AtomicBool,
|
||||
pub(crate) conntrack_close_tx: Mutex<Option<mpsc::Sender<ConntrackCloseEvent>>>,
|
||||
}
|
||||
|
||||
impl ProxySharedState {
|
||||
pub(crate) fn new() -> Arc<Self> {
|
||||
Arc::new(Self {
|
||||
handshake: HandshakeSharedState {
|
||||
auth_probe: DashMap::new(),
|
||||
auth_probe_saturation: Mutex::new(None),
|
||||
auth_probe_eviction_hasher: RandomState::new(),
|
||||
invalid_secret_warned: Mutex::new(HashSet::new()),
|
||||
unknown_sni_warn_next_allowed: Mutex::new(None),
|
||||
},
|
||||
middle_relay: MiddleRelaySharedState {
|
||||
desync_dedup: DashMap::new(),
|
||||
desync_dedup_previous: DashMap::new(),
|
||||
desync_hasher: RandomState::new(),
|
||||
desync_full_cache_last_emit_at: Mutex::new(None),
|
||||
desync_dedup_rotation_state: Mutex::new(DesyncDedupRotationState::default()),
|
||||
relay_idle_registry: Mutex::new(RelayIdleCandidateRegistry::default()),
|
||||
relay_idle_mark_seq: AtomicU64::new(0),
|
||||
},
|
||||
conntrack_pressure_active: AtomicBool::new(false),
|
||||
conntrack_close_tx: Mutex::new(None),
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn set_conntrack_close_sender(&self, tx: mpsc::Sender<ConntrackCloseEvent>) {
|
||||
match self.conntrack_close_tx.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = Some(tx);
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut guard = poisoned.into_inner();
|
||||
*guard = Some(tx);
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn disable_conntrack_close_sender(&self) {
|
||||
match self.conntrack_close_tx.lock() {
|
||||
Ok(mut guard) => {
|
||||
*guard = None;
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut guard = poisoned.into_inner();
|
||||
*guard = None;
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn publish_conntrack_close_event(
|
||||
&self,
|
||||
event: ConntrackCloseEvent,
|
||||
) -> ConntrackClosePublishResult {
|
||||
let tx = match self.conntrack_close_tx.lock() {
|
||||
Ok(guard) => guard.clone(),
|
||||
Err(poisoned) => {
|
||||
let guard = poisoned.into_inner();
|
||||
let cloned = guard.clone();
|
||||
self.conntrack_close_tx.clear_poison();
|
||||
cloned
|
||||
}
|
||||
};
|
||||
|
||||
let Some(tx) = tx else {
|
||||
return ConntrackClosePublishResult::Disabled;
|
||||
};
|
||||
|
||||
match tx.try_send(event) {
|
||||
Ok(()) => ConntrackClosePublishResult::Sent,
|
||||
Err(mpsc::error::TrySendError::Full(_)) => ConntrackClosePublishResult::QueueFull,
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => ConntrackClosePublishResult::QueueClosed,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn set_conntrack_pressure_active(&self, active: bool) {
|
||||
self.conntrack_pressure_active
|
||||
.store(active, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub(crate) fn conntrack_pressure_active(&self) -> bool {
|
||||
self.conntrack_pressure_active.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,260 @@
|
|||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
static RACE_TEST_KEY_COUNTER: AtomicUsize = AtomicUsize::new(1_000_000);
|
||||
|
||||
fn race_unique_key(prefix: &str) -> String {
|
||||
let id = RACE_TEST_KEY_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
format!("{}_{}", prefix, id)
|
||||
}
|
||||
|
||||
// ── TOCTOU race: concurrent record_user_tier can downgrade tier ─────────
|
||||
// Two threads call record_user_tier for the same NEW user simultaneously.
|
||||
// Thread A records Tier1, Thread B records Base. Without atomic entry API,
|
||||
// the insert() call overwrites without max(), causing Tier1 → Base downgrade.
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_concurrent_insert_no_tier_downgrade() {
|
||||
// Run multiple rounds to increase race detection probability.
|
||||
for round in 0..50 {
|
||||
let key = race_unique_key(&format!("race_downgrade_{}", round));
|
||||
let key_a = key.clone();
|
||||
let key_b = key.clone();
|
||||
|
||||
let barrier = Arc::new(std::sync::Barrier::new(2));
|
||||
let barrier_a = Arc::clone(&barrier);
|
||||
let barrier_b = Arc::clone(&barrier);
|
||||
|
||||
let ha = std::thread::spawn(move || {
|
||||
barrier_a.wait();
|
||||
record_user_tier(&key_a, AdaptiveTier::Tier2);
|
||||
});
|
||||
|
||||
let hb = std::thread::spawn(move || {
|
||||
barrier_b.wait();
|
||||
record_user_tier(&key_b, AdaptiveTier::Base);
|
||||
});
|
||||
|
||||
ha.join().expect("thread A panicked");
|
||||
hb.join().expect("thread B panicked");
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// The final tier must be at least Tier2, never downgraded to Base.
|
||||
// With correct max() semantics: max(Tier2, Base) = Tier2.
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier2,
|
||||
"Round {}: concurrent insert downgraded tier from Tier2 to {:?}",
|
||||
round,
|
||||
result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── TOCTOU race: three threads write three tiers, highest must survive ──
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_triple_concurrent_insert_highest_tier_survives() {
|
||||
for round in 0..30 {
|
||||
let key = race_unique_key(&format!("triple_race_{}", round));
|
||||
let barrier = Arc::new(std::sync::Barrier::new(3));
|
||||
|
||||
let handles: Vec<_> = [AdaptiveTier::Base, AdaptiveTier::Tier1, AdaptiveTier::Tier3]
|
||||
.into_iter()
|
||||
.map(|tier| {
|
||||
let k = key.clone();
|
||||
let b = Arc::clone(&barrier);
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
record_user_tier(&k, tier);
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier3,
|
||||
"Round {}: triple concurrent insert didn't preserve Tier3, got {:?}",
|
||||
round,
|
||||
result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Stress: 20 threads writing different tiers to same key ──────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_20_concurrent_writers_no_panic_no_downgrade() {
|
||||
let key = race_unique_key("stress_20");
|
||||
let barrier = Arc::new(std::sync::Barrier::new(20));
|
||||
|
||||
let handles: Vec<_> = (0..20u32)
|
||||
.map(|i| {
|
||||
let k = key.clone();
|
||||
let b = Arc::clone(&barrier);
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
let tier = match i % 4 {
|
||||
0 => AdaptiveTier::Base,
|
||||
1 => AdaptiveTier::Tier1,
|
||||
2 => AdaptiveTier::Tier2,
|
||||
_ => AdaptiveTier::Tier3,
|
||||
};
|
||||
for _ in 0..100 {
|
||||
record_user_tier(&k, tier);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// At least one thread writes Tier3, max() should preserve it
|
||||
assert!(
|
||||
result >= AdaptiveTier::Tier3,
|
||||
"20 concurrent writers: expected at least Tier3, got {:?}",
|
||||
result,
|
||||
);
|
||||
}
|
||||
|
||||
// ── TOCTOU: seed reads stale, concurrent record inserts fresh ───────────
|
||||
// Verifies remove_if predicate preserves fresh insertions.
|
||||
|
||||
#[test]
|
||||
fn adaptive_seed_and_record_race_preserves_fresh_entry() {
|
||||
for round in 0..30 {
|
||||
let key = race_unique_key(&format!("seed_record_race_{}", round));
|
||||
|
||||
// Plant a stale entry
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier1,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
|
||||
let key_seed = key.clone();
|
||||
let key_record = key.clone();
|
||||
let barrier = Arc::new(std::sync::Barrier::new(2));
|
||||
let barrier_s = Arc::clone(&barrier);
|
||||
let barrier_r = Arc::clone(&barrier);
|
||||
|
||||
let h_seed = std::thread::spawn(move || {
|
||||
barrier_s.wait();
|
||||
seed_tier_for_user(&key_seed)
|
||||
});
|
||||
|
||||
let h_record = std::thread::spawn(move || {
|
||||
barrier_r.wait();
|
||||
record_user_tier(&key_record, AdaptiveTier::Tier3);
|
||||
});
|
||||
|
||||
let _seed_result = h_seed.join().expect("seed thread panicked");
|
||||
h_record.join().expect("record thread panicked");
|
||||
|
||||
let final_result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// Fresh Tier3 entry should survive the stale-removal race.
|
||||
// Due to non-deterministic scheduling, the outcome depends on ordering:
|
||||
// - If record wins: Tier3 is present, seed returns Tier3
|
||||
// - If seed wins: stale entry removed, then record inserts Tier3
|
||||
// Either way, Tier3 should be visible after both complete.
|
||||
assert!(
|
||||
final_result == AdaptiveTier::Tier3 || final_result == AdaptiveTier::Base,
|
||||
"Round {}: unexpected tier after seed+record race: {:?}",
|
||||
round,
|
||||
final_result,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ── Eviction safety: retain() during concurrent inserts ─────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_eviction_during_concurrent_inserts_no_panic() {
|
||||
let prefix = race_unique_key("evict_conc");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
|
||||
// Pre-fill with stale entries to push past the eviction threshold
|
||||
for i in 0..100 {
|
||||
let k = format!("{}_{}", prefix, i);
|
||||
profiles().insert(
|
||||
k,
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Base,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let barrier = Arc::new(std::sync::Barrier::new(10));
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|t| {
|
||||
let b = Arc::clone(&barrier);
|
||||
let pfx = prefix.clone();
|
||||
std::thread::spawn(move || {
|
||||
b.wait();
|
||||
for i in 0..50 {
|
||||
let k = format!("{}_t{}_{}", pfx, t, i);
|
||||
record_user_tier(&k, AdaptiveTier::Tier1);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("eviction thread panicked");
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
profiles().retain(|k, _| !k.starts_with(&prefix));
|
||||
}
|
||||
|
||||
// ── Adversarial: attacker races insert+seed in tight loop ───────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_tight_loop_insert_seed_race_no_panic() {
|
||||
let key = race_unique_key("tight_loop");
|
||||
let key_w = key.clone();
|
||||
let key_r = key.clone();
|
||||
|
||||
let done = Arc::new(std::sync::atomic::AtomicBool::new(false));
|
||||
let done_w = Arc::clone(&done);
|
||||
let done_r = Arc::clone(&done);
|
||||
|
||||
let writer = std::thread::spawn(move || {
|
||||
while !done_w.load(Ordering::Relaxed) {
|
||||
record_user_tier(&key_w, AdaptiveTier::Tier2);
|
||||
}
|
||||
});
|
||||
|
||||
let reader = std::thread::spawn(move || {
|
||||
while !done_r.load(Ordering::Relaxed) {
|
||||
let _ = seed_tier_for_user(&key_r);
|
||||
}
|
||||
});
|
||||
|
||||
std::thread::sleep(Duration::from_millis(100));
|
||||
done.store(true, Ordering::Relaxed);
|
||||
|
||||
writer.join().expect("writer panicked");
|
||||
reader.join().expect("reader panicked");
|
||||
profiles().remove(&key);
|
||||
}
|
||||
|
|
@ -0,0 +1,453 @@
|
|||
use super::*;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// Unique key generator to avoid test interference through the global DashMap.
|
||||
static TEST_KEY_COUNTER: AtomicUsize = AtomicUsize::new(0);
|
||||
|
||||
fn unique_key(prefix: &str) -> String {
|
||||
let id = TEST_KEY_COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
format!("{}_{}", prefix, id)
|
||||
}
|
||||
|
||||
// ── Positive / Lifecycle ────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_seed_unknown_user_returns_base() {
|
||||
let key = unique_key("seed_unknown");
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Base);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_then_seed_returns_recorded_tier() {
|
||||
let key = unique_key("record_seed");
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_separate_users_have_independent_tiers() {
|
||||
let key_a = unique_key("indep_a");
|
||||
let key_b = unique_key("indep_b");
|
||||
record_user_tier(&key_a, AdaptiveTier::Tier1);
|
||||
record_user_tier(&key_b, AdaptiveTier::Tier2);
|
||||
assert_eq!(seed_tier_for_user(&key_a), AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key_b), AdaptiveTier::Tier2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_upgrades_tier_within_ttl() {
|
||||
let key = unique_key("upgrade");
|
||||
record_user_tier(&key, AdaptiveTier::Base);
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_record_does_not_downgrade_within_ttl() {
|
||||
let key = unique_key("no_downgrade");
|
||||
record_user_tier(&key, AdaptiveTier::Tier2);
|
||||
record_user_tier(&key, AdaptiveTier::Base);
|
||||
// max(Tier2, Base) = Tier2 — within TTL the higher tier is retained
|
||||
assert_eq!(seed_tier_for_user(&key), AdaptiveTier::Tier2);
|
||||
}
|
||||
|
||||
// ── Edge Cases ──────────────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_base_tier_buffers_unchanged() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Base, 65536, 262144);
|
||||
assert_eq!(c2s, 65536);
|
||||
assert_eq!(s2c, 262144);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_tier1_buffers_within_caps() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier1, 65536, 262144);
|
||||
assert!(c2s > 65536, "Tier1 c2s should exceed Base");
|
||||
assert!(
|
||||
c2s <= 128 * 1024,
|
||||
"Tier1 c2s should not exceed DIRECT_C2S_CAP_BYTES"
|
||||
);
|
||||
assert!(s2c > 262144, "Tier1 s2c should exceed Base");
|
||||
assert!(
|
||||
s2c <= 512 * 1024,
|
||||
"Tier1 s2c should not exceed DIRECT_S2C_CAP_BYTES"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_tier3_buffers_capped() {
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier3, 65536, 262144);
|
||||
assert!(c2s <= 128 * 1024, "Tier3 c2s must not exceed cap");
|
||||
assert!(s2c <= 512 * 1024, "Tier3 s2c must not exceed cap");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_scale_zero_base_returns_at_least_one() {
|
||||
// scale(0, num, den, cap) should return at least 1 (the .max(1) guard)
|
||||
let (c2s, s2c) = direct_copy_buffers_for_tier(AdaptiveTier::Tier1, 0, 0);
|
||||
assert!(c2s >= 1);
|
||||
assert!(s2c >= 1);
|
||||
}
|
||||
|
||||
// ── Stale Entry Handling ────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_stale_profile_returns_base_tier() {
|
||||
let key = unique_key("stale_base");
|
||||
// Manually insert a stale entry with seen_at in the far past.
|
||||
// PROFILE_TTL = 300s, so 600s ago is well past expiry.
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier3,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
assert_eq!(
|
||||
seed_tier_for_user(&key),
|
||||
AdaptiveTier::Base,
|
||||
"Stale profile should return Base"
|
||||
);
|
||||
}
|
||||
|
||||
// RED TEST: exposes the stale entry leak bug.
|
||||
// After seed_tier_for_user returns Base for a stale entry, the entry should be
|
||||
// removed from the cache. Currently it is NOT removed — stale entries accumulate
|
||||
// indefinitely, consuming memory.
|
||||
#[test]
|
||||
fn adaptive_stale_entry_removed_after_seed() {
|
||||
let key = unique_key("stale_removal");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier2,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
let _ = seed_tier_for_user(&key);
|
||||
// After seeding, the stale entry should have been removed.
|
||||
assert!(
|
||||
!profiles().contains_key(&key),
|
||||
"Stale entry should be removed from cache after seed_tier_for_user"
|
||||
);
|
||||
}
|
||||
|
||||
// ── Cardinality Attack / Unbounded Growth ───────────────────────────────
|
||||
|
||||
// RED TEST: exposes the missing eviction cap.
|
||||
// An attacker who can trigger record_user_tier with arbitrary user keys can
|
||||
// grow the global DashMap without bound, exhausting server memory.
|
||||
// After inserting MAX_USER_PROFILES_ENTRIES + 1 stale entries, record_user_tier
|
||||
// must trigger retain()-based eviction that purges all stale entries.
|
||||
#[test]
|
||||
fn adaptive_profile_cache_bounded_under_cardinality_attack() {
|
||||
let prefix = unique_key("cardinality");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
let n = MAX_USER_PROFILES_ENTRIES + 1;
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
profiles().insert(
|
||||
key,
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Base,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
}
|
||||
// This insert should push the cache over MAX_USER_PROFILES_ENTRIES and trigger eviction.
|
||||
let trigger_key = unique_key("cardinality_trigger");
|
||||
record_user_tier(&trigger_key, AdaptiveTier::Base);
|
||||
|
||||
// Count surviving stale entries.
|
||||
let mut surviving_stale = 0;
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
if profiles().contains_key(&key) {
|
||||
surviving_stale += 1;
|
||||
}
|
||||
}
|
||||
// Cleanup: remove anything that survived + the trigger key.
|
||||
for i in 0..n {
|
||||
let key = format!("{}_{}", prefix, i);
|
||||
profiles().remove(&key);
|
||||
}
|
||||
profiles().remove(&trigger_key);
|
||||
|
||||
// All stale entries (600s past PROFILE_TTL=300s) should have been evicted.
|
||||
assert_eq!(
|
||||
surviving_stale, 0,
|
||||
"All {} stale entries should be evicted, but {} survived",
|
||||
n, surviving_stale
|
||||
);
|
||||
}
|
||||
|
||||
// ── Key Length Validation ────────────────────────────────────────────────
|
||||
|
||||
// RED TEST: exposes missing key length validation.
|
||||
// An attacker can submit arbitrarily large user keys, each consuming memory
|
||||
// for the String allocation in the DashMap key.
|
||||
#[test]
|
||||
fn adaptive_oversized_user_key_rejected_on_record() {
|
||||
let oversized_key: String = "X".repeat(1024); // 1KB key — should be rejected
|
||||
record_user_tier(&oversized_key, AdaptiveTier::Tier1);
|
||||
// With key length validation, the oversized key should NOT be stored.
|
||||
let stored = profiles().contains_key(&oversized_key);
|
||||
// Cleanup regardless
|
||||
profiles().remove(&oversized_key);
|
||||
assert!(
|
||||
!stored,
|
||||
"Oversized user key (1024 bytes) should be rejected by record_user_tier"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_oversized_user_key_rejected_on_seed() {
|
||||
let oversized_key: String = "X".repeat(1024);
|
||||
// Insert it directly to test seed behavior
|
||||
profiles().insert(
|
||||
oversized_key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier3,
|
||||
seen_at: Instant::now(),
|
||||
},
|
||||
);
|
||||
let result = seed_tier_for_user(&oversized_key);
|
||||
profiles().remove(&oversized_key);
|
||||
assert_eq!(
|
||||
result,
|
||||
AdaptiveTier::Base,
|
||||
"Oversized user key should return Base from seed_tier_for_user"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_empty_user_key_safe() {
|
||||
// Empty string is a valid (if unusual) key — should not panic
|
||||
record_user_tier("", AdaptiveTier::Tier1);
|
||||
let tier = seed_tier_for_user("");
|
||||
profiles().remove("");
|
||||
assert_eq!(tier, AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_max_length_key_accepted() {
|
||||
// A key at exactly 512 bytes should be accepted
|
||||
let key: String = "K".repeat(512);
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let tier = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
assert_eq!(tier, AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
// ── Concurrent Access Safety ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_concurrent_record_and_seed_no_torn_read() {
|
||||
let key = unique_key("concurrent_rw");
|
||||
let key_clone = key.clone();
|
||||
|
||||
// Record from multiple threads simultaneously
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|i| {
|
||||
let k = key_clone.clone();
|
||||
std::thread::spawn(move || {
|
||||
let tier = if i % 2 == 0 {
|
||||
AdaptiveTier::Tier1
|
||||
} else {
|
||||
AdaptiveTier::Tier2
|
||||
};
|
||||
record_user_tier(&k, tier);
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread panicked");
|
||||
}
|
||||
|
||||
let result = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
// Result must be one of the recorded tiers, not a corrupted value
|
||||
assert!(
|
||||
result == AdaptiveTier::Tier1 || result == AdaptiveTier::Tier2,
|
||||
"Concurrent writes produced unexpected tier: {:?}",
|
||||
result
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_concurrent_seed_does_not_panic() {
|
||||
let key = unique_key("concurrent_seed");
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let key_clone = key.clone();
|
||||
|
||||
let handles: Vec<_> = (0..20)
|
||||
.map(|_| {
|
||||
let k = key_clone.clone();
|
||||
std::thread::spawn(move || {
|
||||
for _ in 0..100 {
|
||||
let _ = seed_tier_for_user(&k);
|
||||
}
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("concurrent seed panicked");
|
||||
}
|
||||
profiles().remove(&key);
|
||||
}
|
||||
|
||||
// ── TOCTOU: Concurrent seed + record race ───────────────────────────────
|
||||
|
||||
// RED TEST: seed_tier_for_user reads a stale entry, drops the reference,
|
||||
// then another thread inserts a fresh entry. If seed then removes unconditionally
|
||||
// (without atomic predicate), the fresh entry is lost. With remove_if, the
|
||||
// fresh entry survives.
|
||||
#[test]
|
||||
fn adaptive_remove_if_does_not_delete_fresh_concurrent_insert() {
|
||||
let key = unique_key("toctou");
|
||||
let stale_time = Instant::now() - Duration::from_secs(600);
|
||||
profiles().insert(
|
||||
key.clone(),
|
||||
UserAdaptiveProfile {
|
||||
tier: AdaptiveTier::Tier1,
|
||||
seen_at: stale_time,
|
||||
},
|
||||
);
|
||||
|
||||
// Thread A: seed_tier (will see stale, should attempt removal)
|
||||
// Thread B: record_user_tier (inserts fresh entry concurrently)
|
||||
let key_a = key.clone();
|
||||
let key_b = key.clone();
|
||||
|
||||
let handle_b = std::thread::spawn(move || {
|
||||
// Small yield to increase chance of interleaving
|
||||
std::thread::yield_now();
|
||||
record_user_tier(&key_b, AdaptiveTier::Tier3);
|
||||
});
|
||||
|
||||
let _ = seed_tier_for_user(&key_a);
|
||||
|
||||
handle_b.join().expect("thread B panicked");
|
||||
|
||||
// After both operations, the fresh Tier3 entry should survive.
|
||||
// With a correct remove_if predicate, the fresh entry is NOT deleted.
|
||||
// Without remove_if (current code), the entry may be lost.
|
||||
let final_tier = seed_tier_for_user(&key);
|
||||
profiles().remove(&key);
|
||||
|
||||
// The fresh Tier3 entry should survive the stale-removal race.
|
||||
// Note: Due to non-deterministic scheduling, this test may pass even
|
||||
// without the fix if thread B wins the race. Run with --test-threads=1
|
||||
// or multiple iterations for reliable detection.
|
||||
assert!(
|
||||
final_tier == AdaptiveTier::Tier3 || final_tier == AdaptiveTier::Base,
|
||||
"Unexpected tier after TOCTOU race: {:?}",
|
||||
final_tier
|
||||
);
|
||||
}
|
||||
|
||||
// ── Fuzz: Random keys ──────────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_fuzz_random_keys_no_panic() {
|
||||
use rand::{Rng, RngExt};
|
||||
let mut rng = rand::rng();
|
||||
let mut keys = Vec::new();
|
||||
for _ in 0..200 {
|
||||
let len: usize = rng.random_range(0..=256);
|
||||
let key: String = (0..len)
|
||||
.map(|_| {
|
||||
let c: u8 = rng.random_range(0x20..=0x7E);
|
||||
c as char
|
||||
})
|
||||
.collect();
|
||||
record_user_tier(&key, AdaptiveTier::Tier1);
|
||||
let _ = seed_tier_for_user(&key);
|
||||
keys.push(key);
|
||||
}
|
||||
// Cleanup
|
||||
for key in &keys {
|
||||
profiles().remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
// ── average_throughput_to_tier (proposed function, tests the mapping) ────
|
||||
|
||||
// These tests verify the function that will be added in PR-D.
|
||||
// They are written against the current code's constant definitions.
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_mapping_below_threshold_is_base() {
|
||||
// 7 Mbps < 8 Mbps threshold → Base
|
||||
// 7 Mbps = 7_000_000 bps = 875_000 bytes/s over 10s = 8_750_000 bytes
|
||||
// max(c2s, s2c) determines direction
|
||||
let c2s_bytes: u64 = 8_750_000;
|
||||
let s2c_bytes: u64 = 1_000_000;
|
||||
let duration_secs: f64 = 10.0;
|
||||
let avg_bps = (c2s_bytes.max(s2c_bytes) as f64 * 8.0) / duration_secs;
|
||||
// 8_750_000 * 8 / 10 = 7_000_000 bps = 7 Mbps → Base
|
||||
assert!(
|
||||
avg_bps < THROUGHPUT_UP_BPS,
|
||||
"Should be below threshold: {} < {}",
|
||||
avg_bps,
|
||||
THROUGHPUT_UP_BPS,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_mapping_above_threshold_is_tier1() {
|
||||
// 10 Mbps > 8 Mbps threshold → Tier1
|
||||
let bytes_10mbps_10s: u64 = 12_500_000; // 10 Mbps * 10s / 8 = 12_500_000 bytes
|
||||
let duration_secs: f64 = 10.0;
|
||||
let avg_bps = (bytes_10mbps_10s as f64 * 8.0) / duration_secs;
|
||||
assert!(
|
||||
avg_bps >= THROUGHPUT_UP_BPS,
|
||||
"Should be above threshold: {} >= {}",
|
||||
avg_bps,
|
||||
THROUGHPUT_UP_BPS,
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_throughput_short_session_should_return_base() {
|
||||
// Sessions shorter than 1 second should not promote (too little data to judge)
|
||||
let duration_secs: f64 = 0.5;
|
||||
// Even with high throughput, short sessions should return Base
|
||||
assert!(
|
||||
duration_secs < 1.0,
|
||||
"Short session duration guard should activate"
|
||||
);
|
||||
}
|
||||
|
||||
// ── me_flush_policy_for_tier ────────────────────────────────────────────
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_base_unchanged() {
|
||||
let (frames, bytes, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Base, 32, 65536, Duration::from_micros(1000));
|
||||
assert_eq!(frames, 32);
|
||||
assert_eq!(bytes, 65536);
|
||||
assert_eq!(delay, Duration::from_micros(1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_tier1_delay_reduced() {
|
||||
let (_, _, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Tier1, 32, 65536, Duration::from_micros(1000));
|
||||
// Tier1: delay * 7/10 = 700 µs
|
||||
assert_eq!(delay, Duration::from_micros(700));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adaptive_me_flush_delay_never_below_minimum() {
|
||||
let (_, _, delay) =
|
||||
me_flush_policy_for_tier(AdaptiveTier::Tier3, 32, 65536, Duration::from_micros(200));
|
||||
// Tier3: 200 * 3/10 = 60, but min is ME_DELAY_MIN_US = 150
|
||||
assert!(delay.as_micros() >= 150, "Delay must respect minimum");
|
||||
}
|
||||
|
|
@ -0,0 +1,473 @@
|
|||
use super::*;
|
||||
use crate::config::{ProxyConfig, UpstreamConfig, UpstreamType};
|
||||
use crate::protocol::constants::{MAX_TLS_PLAINTEXT_SIZE, MIN_TLS_CLIENT_HELLO_SIZE};
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt, ReadBuf, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
#[test]
|
||||
fn edge_mask_reject_delay_min_greater_than_max_does_not_panic() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.censorship.server_hello_delay_min_ms = 5000;
|
||||
config.censorship.server_hello_delay_max_ms = 1000;
|
||||
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let start = std::time::Instant::now();
|
||||
maybe_apply_mask_reject_delay(&config).await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
assert!(elapsed >= Duration::from_millis(1000));
|
||||
assert!(elapsed < Duration::from_millis(1500));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_handshake_timeout_with_mask_grace_saturating_add_prevents_overflow() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = u64::MAX;
|
||||
config.censorship.mask = true;
|
||||
|
||||
let timeout = handshake_timeout_with_mask_grace(&config);
|
||||
assert_eq!(timeout.as_secs(), u64::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_tls_clienthello_len_in_bounds_exact_boundaries() {
|
||||
assert!(tls_clienthello_len_in_bounds(MIN_TLS_CLIENT_HELLO_SIZE));
|
||||
assert!(!tls_clienthello_len_in_bounds(
|
||||
MIN_TLS_CLIENT_HELLO_SIZE - 1
|
||||
));
|
||||
assert!(tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE));
|
||||
assert!(!tls_clienthello_len_in_bounds(MAX_TLS_PLAINTEXT_SIZE + 1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_synthetic_local_addr_boundaries() {
|
||||
assert_eq!(synthetic_local_addr(0).port(), 0);
|
||||
assert_eq!(synthetic_local_addr(80).port(), 80);
|
||||
assert_eq!(synthetic_local_addr(u16::MAX).port(), u16::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_beobachten_record_handshake_failure_class_stream_error_eof() {
|
||||
let beobachten = BeobachtenStore::new();
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = 1;
|
||||
|
||||
let eof_err = ProxyError::Stream(crate::error::StreamError::UnexpectedEof);
|
||||
let peer_ip: IpAddr = "198.51.100.100".parse().unwrap();
|
||||
|
||||
record_handshake_failure_class(&beobachten, &config, peer_ip, &eof_err);
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn adversarial_tls_handshake_timeout_during_masking_delay() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.timeouts.client_handshake = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.server_hello_delay_min_ms = 3000;
|
||||
cfg.censorship.server_hello_delay_max_ms = 3000;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.1:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0xFF, 0xFF])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::TgHandshakeTimeout)));
|
||||
assert_eq!(stats.get_handshake_timeouts(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_proxy_protocol_slowloris_timeout() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.proxy_protocol_header_timeout_ms = 200;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.2:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
true,
|
||||
));
|
||||
|
||||
client_side.write_all(b"PROXY TCP4 192.").await.unwrap();
|
||||
tokio::time::sleep(Duration::from_millis(300)).await;
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::InvalidProxyProtocol)));
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn blackhat_ipv4_mapped_ipv6_proxy_source_bypass_attempt() {
|
||||
let trusted = vec!["192.0.2.0/24".parse().unwrap()];
|
||||
let peer_ip = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x0201));
|
||||
assert!(!is_trusted_proxy_source(peer_ip, &trusted));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn negative_proxy_protocol_enabled_but_client_sends_tls_hello() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.proxy_protocol_header_timeout_ms = 500;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.3:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
true,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x02, 0x00])
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handle)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::InvalidProxyProtocol)));
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_client_stream_exactly_4_bytes_eof() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.4:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x00])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_client_stream_tls_header_valid_but_body_1_byte_short_eof() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.5:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x00, 100])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&vec![0x41; 99]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn integration_non_tls_modes_disabled_immediately_masks() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
cfg.censorship.mask = true;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handle = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.6:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"GET / HTTP/1.1\r\n").await.unwrap();
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handle).await;
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
struct YieldingReader {
|
||||
data: Vec<u8>,
|
||||
pos: usize,
|
||||
yields_left: usize,
|
||||
}
|
||||
|
||||
impl AsyncRead for YieldingReader {
|
||||
fn poll_read(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<std::io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
if this.yields_left > 0 {
|
||||
this.yields_left -= 1;
|
||||
cx.waker().wake_by_ref();
|
||||
return Poll::Pending;
|
||||
}
|
||||
if this.pos >= this.data.len() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
buf.put_slice(&this.data[this.pos..this.pos + 1]);
|
||||
this.pos += 1;
|
||||
this.yields_left = 2;
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fuzz_read_with_progress_heavy_yielding() {
|
||||
let expected_data = b"HEAVY_YIELD_TEST_DATA".to_vec();
|
||||
let mut reader = YieldingReader {
|
||||
data: expected_data.clone(),
|
||||
pos: 0,
|
||||
yields_left: 2,
|
||||
};
|
||||
|
||||
let mut buf = vec![0u8; expected_data.len()];
|
||||
let read_bytes = read_with_progress(&mut reader, &mut buf).await.unwrap();
|
||||
|
||||
assert_eq!(read_bytes, expected_data.len());
|
||||
assert_eq!(buf, expected_data);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_wrap_tls_application_record_exactly_u16_max() {
|
||||
let payload = vec![0u8; 65535];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
assert_eq!(wrapped.len(), 65540);
|
||||
assert_eq!(wrapped[0], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(&wrapped[3..5], &65535u16.to_be_bytes());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fuzz_wrap_tls_application_record_lengths() {
|
||||
let lengths = [0, 1, 65534, 65535, 65536, 131070, 131071, 131072];
|
||||
for len in lengths {
|
||||
let payload = vec![0u8; len];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
let expected_chunks = len.div_ceil(65535).max(1);
|
||||
assert_eq!(wrapped.len(), len + 5 * expected_chunks);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stress_user_connection_reservation_concurrent_same_ip_exhaustion() {
|
||||
let user = "stress-same-ip-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 5);
|
||||
|
||||
let config = Arc::new(config);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 10).await;
|
||||
|
||||
let peer = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77)), 55000);
|
||||
|
||||
let mut tasks = tokio::task::JoinSet::new();
|
||||
let mut reservations = Vec::new();
|
||||
|
||||
for _ in 0..10 {
|
||||
let config = config.clone();
|
||||
let stats = stats.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
tasks.spawn(async move {
|
||||
RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await
|
||||
});
|
||||
}
|
||||
|
||||
let mut successes = 0;
|
||||
let mut failures = 0;
|
||||
|
||||
while let Some(res) = tasks.join_next().await {
|
||||
match res.unwrap() {
|
||||
Ok(r) => {
|
||||
successes += 1;
|
||||
reservations.push(r);
|
||||
}
|
||||
Err(_) => failures += 1,
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(successes, 5);
|
||||
assert_eq!(failures, 5);
|
||||
assert_eq!(stats.get_user_curr_connects(user), 5);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 1);
|
||||
|
||||
for reservation in reservations {
|
||||
reservation.release().await;
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::protocol::constants::MIN_TLS_CLIENT_HELLO_SIZE;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncWriteExt, duplex};
|
||||
|
||||
fn preload_user_quota(stats: &Stats, user: &str, bytes: u64) {
|
||||
let user_stats = stats.get_or_create_user_stats_handle(user);
|
||||
stats.quota_charge_post_write(user_stats.as_ref(), bytes);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invariant_wrap_tls_application_record_exact_multiples() {
|
||||
let chunk_size = u16::MAX as usize;
|
||||
let payload = vec![0xAA; chunk_size * 2];
|
||||
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
|
||||
assert_eq!(wrapped.len(), 2 * (5 + chunk_size));
|
||||
assert_eq!(wrapped[0], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(&wrapped[3..5], &65535u16.to_be_bytes());
|
||||
|
||||
let second_header_idx = 5 + chunk_size;
|
||||
assert_eq!(wrapped[second_header_idx], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(
|
||||
&wrapped[second_header_idx + 3..second_header_idx + 5],
|
||||
&65535u16.to_be_bytes()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_tls_clienthello_truncation_exact_boundary_triggers_masking() {
|
||||
let config = Arc::new(ProxyConfig::default());
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.20:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
let claimed_len = MIN_TLS_CLIENT_HELLO_SIZE as u16;
|
||||
let mut header = vec![0x16, 0x03, 0x01];
|
||||
header.extend_from_slice(&claimed_len.to_be_bytes());
|
||||
|
||||
client_side.write_all(&header).await.unwrap();
|
||||
client_side
|
||||
.write_all(&vec![0x42; MIN_TLS_CLIENT_HELLO_SIZE - 1])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_acquire_reservation_ip_limit_rollback() {
|
||||
let user = "rollback-test-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 10);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let peer_a = "198.51.100.21:55000".parse().unwrap();
|
||||
let _res_a = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer_a,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
|
||||
let peer_b = "203.0.113.22:55000".parse().unwrap();
|
||||
let res_b = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer_b,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(
|
||||
res_b,
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. })
|
||||
));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_quota_exact_boundary_inclusive() {
|
||||
let user = "quota-strict-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_data_quota.insert(user.to_string(), 1000);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let peer = "198.51.100.23:55000".parse().unwrap();
|
||||
|
||||
preload_user_quota(stats.as_ref(), user, 999);
|
||||
let res1 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(res1.is_ok());
|
||||
res1.unwrap().release().await;
|
||||
|
||||
preload_user_quota(stats.as_ref(), user, 1);
|
||||
let res2 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(res2, Err(ProxyError::DataQuotaExceeded { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_direct_mode_partial_header_eof_is_error_not_bad_connect() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.25:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&[0xEF, 0xEF, 0xEF]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(result.is_err());
|
||||
assert_eq!(stats.get_connects_bad(), 0);
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[expected_64_got_0]"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn invariant_route_mode_snapshot_picks_up_latest_mode() {
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
|
||||
assert!(matches!(
|
||||
route_runtime.snapshot().mode,
|
||||
RelayRouteMode::Direct
|
||||
));
|
||||
|
||||
route_runtime.set_mode(RelayRouteMode::Middle);
|
||||
assert!(matches!(
|
||||
route_runtime.snapshot().mode,
|
||||
RelayRouteMode::Middle
|
||||
));
|
||||
}
|
||||
|
|
@ -40,6 +40,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ fn build_harness(config: ProxyConfig) -> PipelineHarness {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,101 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::Duration;
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fragmented_connect_probe_is_classified_as_http_via_prefetch_window() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let peer: SocketAddr = "198.51.100.251:57501".parse().unwrap();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"CONNE").await.unwrap();
|
||||
client_side
|
||||
.write_all(b"CT example.org:443 HTTP/1.1\r\nHost: example.org\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT example.org:443 HTTP/1.1"),
|
||||
"mask backend must receive the full fragmented CONNECT probe"
|
||||
);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[HTTP]"));
|
||||
assert!(snapshot.contains("198.51.100.251-1"));
|
||||
}
|
||||
|
|
@ -34,6 +34,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,123 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
async fn run_http2_fragment_case(split_at: usize, delay_ms: u64, peer: SocketAddr) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
let preface = b"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n".to_vec();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
let first = split_at.min(preface.len());
|
||||
client_side.write_all(&preface[..first]).await.unwrap();
|
||||
if first < preface.len() {
|
||||
sleep(Duration::from_millis(delay_ms)).await;
|
||||
client_side.write_all(&preface[first..]).await.unwrap();
|
||||
}
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(
|
||||
forwarded.starts_with(&preface),
|
||||
"mask backend must receive an intact HTTP/2 preface prefix"
|
||||
);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
assert!(snapshot.contains("[HTTP]"));
|
||||
assert!(snapshot.contains(&format!("{}-1", peer.ip())));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn http2_preface_fragmentation_matrix_is_classified_and_forwarded() {
|
||||
let cases = [(2usize, 0u64), (3, 0), (4, 0), (2, 7), (3, 7), (8, 1)];
|
||||
|
||||
for (i, (split_at, delay_ms)) in cases.into_iter().enumerate() {
|
||||
let peer: SocketAddr = format!("198.51.100.{}:58{}", 140 + i, 100 + i)
|
||||
.parse()
|
||||
.unwrap();
|
||||
run_http2_fragment_case(split_at, delay_ms, peer).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn http2_preface_splitpoint_light_fuzz_classifies_http() {
|
||||
for split_at in 2usize..=12 {
|
||||
let delay_ms = if split_at % 3 == 0 { 7 } else { 1 };
|
||||
let peer: SocketAddr = format!("198.51.101.{}:59{}", split_at, 10 + split_at)
|
||||
.parse()
|
||||
.unwrap();
|
||||
run_http2_fragment_case(split_at, delay_ms, peer).await;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,151 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
async fn run_pipeline_prefetch_case(
|
||||
prefetch_timeout_ms: u64,
|
||||
delayed_tail_ms: u64,
|
||||
peer: SocketAddr,
|
||||
) -> (Vec<u8>, String) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = Vec::new();
|
||||
stream.read_to_end(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.censorship.mask_classifier_prefetch_timeout_ms = prefetch_timeout_ms;
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(b"C").await.unwrap();
|
||||
sleep(Duration::from_millis(delayed_tail_ms)).await;
|
||||
|
||||
client_side
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\nHost: example.org\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let forwarded = tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
|
||||
let snapshot = beobachten.snapshot_text(Duration::from_secs(60));
|
||||
(forwarded, snapshot)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tdd_pipeline_prefetch_5ms_misses_15ms_tail_and_classifies_as_port_scanner() {
|
||||
let peer: SocketAddr = "198.51.100.171:58071".parse().unwrap();
|
||||
let (forwarded, snapshot) = run_pipeline_prefetch_case(5, 15, peer).await;
|
||||
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT"),
|
||||
"mask backend must still receive full payload bytes in-order"
|
||||
);
|
||||
assert!(
|
||||
snapshot.contains("[HTTP]") || snapshot.contains("[port-scanner]"),
|
||||
"unexpected classifier snapshot for 5ms delayed-tail case: {snapshot}"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tdd_pipeline_prefetch_20ms_recovers_15ms_tail_and_classifies_as_http() {
|
||||
let peer: SocketAddr = "198.51.100.172:58072".parse().unwrap();
|
||||
let (forwarded, snapshot) = run_pipeline_prefetch_case(20, 15, peer).await;
|
||||
|
||||
assert!(
|
||||
forwarded.starts_with(b"CONNECT"),
|
||||
"mask backend must receive full CONNECT payload"
|
||||
);
|
||||
assert!(
|
||||
snapshot.contains("[HTTP]"),
|
||||
"20ms budget should recover delayed fragmented prefix and classify as HTTP"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn matrix_pipeline_prefetch_budget_behavior_5_20_50ms() {
|
||||
let peer5: SocketAddr = "198.51.100.173:58073".parse().unwrap();
|
||||
let peer20: SocketAddr = "198.51.100.174:58074".parse().unwrap();
|
||||
let peer50: SocketAddr = "198.51.100.175:58075".parse().unwrap();
|
||||
|
||||
let (_, snap5) = run_pipeline_prefetch_case(5, 35, peer5).await;
|
||||
let (_, snap20) = run_pipeline_prefetch_case(20, 35, peer20).await;
|
||||
let (_, snap50) = run_pipeline_prefetch_case(50, 35, peer50).await;
|
||||
|
||||
assert!(
|
||||
snap5.contains("[HTTP]") || snap5.contains("[port-scanner]"),
|
||||
"unexpected 5ms snapshot: {snap5}"
|
||||
);
|
||||
assert!(
|
||||
snap20.contains("[HTTP]") || snap20.contains("[port-scanner]"),
|
||||
"unexpected 20ms snapshot: {snap20}"
|
||||
);
|
||||
assert!(snap50.contains("[HTTP]"));
|
||||
}
|
||||
|
|
@ -0,0 +1,88 @@
|
|||
use super::*;
|
||||
use tokio::io::{AsyncWriteExt, duplex};
|
||||
use tokio::time::{Duration, sleep};
|
||||
|
||||
#[test]
|
||||
fn prefetch_timeout_budget_reads_from_config() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
assert_eq!(
|
||||
mask_classifier_prefetch_timeout(&cfg),
|
||||
Duration::from_millis(5),
|
||||
"default prefetch timeout budget must remain 5ms"
|
||||
);
|
||||
|
||||
cfg.censorship.mask_classifier_prefetch_timeout_ms = 20;
|
||||
assert_eq!(
|
||||
mask_classifier_prefetch_timeout(&cfg),
|
||||
Duration::from_millis(20),
|
||||
"runtime prefetch timeout budget must follow configured value"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn configured_prefetch_budget_20ms_recovers_tail_delayed_15ms() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(15)).await;
|
||||
writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.expect("tail bytes must be writable");
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.expect("writer shutdown must succeed");
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(20),
|
||||
)
|
||||
.await;
|
||||
|
||||
writer_task
|
||||
.await
|
||||
.expect("writer task must not panic in runtime timeout test");
|
||||
|
||||
assert!(
|
||||
initial_data.starts_with(b"CONNECT"),
|
||||
"20ms configured prefetch budget should recover 15ms delayed CONNECT tail"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn configured_prefetch_budget_5ms_misses_tail_delayed_15ms() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(15)).await;
|
||||
writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.expect("tail bytes must be writable");
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.expect("writer shutdown must succeed");
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(5),
|
||||
)
|
||||
.await;
|
||||
|
||||
writer_task
|
||||
.await
|
||||
.expect("writer task must not panic in runtime timeout test");
|
||||
|
||||
assert!(
|
||||
!initial_data.starts_with(b"CONNECT"),
|
||||
"5ms configured prefetch budget should miss 15ms delayed CONNECT tail"
|
||||
);
|
||||
}
|
||||
|
|
@ -0,0 +1,265 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||
use crate::protocol::tls;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
struct PipelineHarness {
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
}
|
||||
|
||||
fn build_harness(secret_hex: &str, mask_port: u16) -> PipelineHarness {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = mask_port;
|
||||
cfg.censorship.mask_proxy_protocol = 0;
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
PipelineHarness {
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||
buffer_pool: Arc::new(BufferPool::new()),
|
||||
rng: Arc::new(SecureRandom::new()),
|
||||
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||
beobachten: Arc::new(BeobachtenStore::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||
let total_len = 5 + tls_len;
|
||||
let mut handshake = vec![fill; total_len];
|
||||
|
||||
handshake[0] = 0x16;
|
||||
handshake[1] = 0x03;
|
||||
handshake[2] = 0x01;
|
||||
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
|
||||
let session_id_len: usize = 32;
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
|
||||
handshake
|
||||
}
|
||||
|
||||
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||
let mut record = Vec::with_capacity(5 + payload.len());
|
||||
record.push(0x17);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(payload);
|
||||
record
|
||||
}
|
||||
|
||||
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||
where
|
||||
T: tokio::io::AsyncRead + Unpin,
|
||||
{
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await.unwrap();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_initial_data_prefetch_gate_is_fail_closed() {
|
||||
assert!(
|
||||
!should_prefetch_mask_classifier_window(&[]),
|
||||
"empty initial_data must not trigger classifier prefetch"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_empty_initial_data_prefetch_must_not_consume_fallback_payload() {
|
||||
let payload = b"\x17\x03\x03\x00\x10coalesced-tail-bytes".to_vec();
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
writer.write_all(&payload).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = Vec::new();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
|
||||
assert!(
|
||||
initial_data.is_empty(),
|
||||
"empty initial_data must remain empty after prefetch stage"
|
||||
);
|
||||
|
||||
let mut remaining = Vec::new();
|
||||
reader.read_to_end(&mut remaining).await.unwrap();
|
||||
assert_eq!(
|
||||
remaining, payload,
|
||||
"prefetch stage must not consume fallback payload when initial_data is empty"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn positive_fragmented_http_prefix_still_prefetches_within_window() {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
writer
|
||||
.write_all(b"NECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = b"CON".to_vec();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
|
||||
assert!(
|
||||
initial_data.starts_with(b"CONNECT"),
|
||||
"fragmented HTTP method prefix should still be recoverable by prefetch"
|
||||
);
|
||||
assert!(
|
||||
initial_data.len() <= 16,
|
||||
"prefetch window must remain bounded"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn light_fuzz_empty_initial_data_never_prefetches_any_bytes() {
|
||||
let mut seed = 0xD15C_A11E_2026_0322u64;
|
||||
|
||||
for _ in 0..128 {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
let len = ((seed & 0x3f) as usize).saturating_add(1);
|
||||
let mut payload = vec![0u8; len];
|
||||
for (idx, byte) in payload.iter_mut().enumerate() {
|
||||
*byte = (seed as u8).wrapping_add(idx as u8).wrapping_mul(17);
|
||||
}
|
||||
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
writer.write_all(&payload).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
|
||||
let mut initial_data = Vec::new();
|
||||
extend_masking_initial_window(&mut reader, &mut initial_data).await;
|
||||
assert!(initial_data.is_empty());
|
||||
|
||||
let mut remaining = Vec::new();
|
||||
reader.read_to_end(&mut remaining).await.unwrap();
|
||||
assert_eq!(remaining, payload);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_integration_empty_initial_data_path_is_byte_exact_and_eof_clean() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let secret = [0xD3u8; 16];
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 411, 600, 0x2B);
|
||||
let mut invalid_payload = vec![0u8; HANDSHAKE_LEN];
|
||||
invalid_payload[0] = 0xFF;
|
||||
let invalid_mtproto_record = wrap_tls_application_data(&invalid_payload);
|
||||
let trailing_record = wrap_tls_application_data(b"empty-prefetch-invariant");
|
||||
let expected = trailing_record.clone();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
|
||||
let mut got = vec![0u8; expected.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
assert_eq!(got, expected);
|
||||
|
||||
let mut one = [0u8; 1];
|
||||
let n = stream.read(&mut one).await.unwrap();
|
||||
assert_eq!(
|
||||
n, 0,
|
||||
"fallback stream must not append synthetic bytes on empty initial_data path"
|
||||
);
|
||||
});
|
||||
|
||||
let harness = build_harness("d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3d3", backend_addr.port());
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.245:56145".parse().unwrap(),
|
||||
harness.config,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&client_hello).await.unwrap();
|
||||
let mut head = [0u8; 5];
|
||||
client_side.read_exact(&mut head).await.unwrap();
|
||||
assert_eq!(head[0], 0x16);
|
||||
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||
|
||||
client_side
|
||||
.write_all(&invalid_mtproto_record)
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&trailing_record).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
}
|
||||
|
|
@ -0,0 +1,72 @@
|
|||
use super::*;
|
||||
use tokio::io::{AsyncWriteExt, duplex};
|
||||
use tokio::time::{Duration, advance, sleep};
|
||||
|
||||
async fn run_strict_prefetch_case(prefetch_ms: u64, tail_delay_ms: u64) -> Vec<u8> {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(tail_delay_ms)).await;
|
||||
let _ = writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await;
|
||||
let _ = writer.shutdown().await;
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
let mut prefetch_task = tokio::spawn(async move {
|
||||
extend_masking_initial_window_with_timeout(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(prefetch_ms),
|
||||
)
|
||||
.await;
|
||||
initial_data
|
||||
});
|
||||
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
if tail_delay_ms > 0 {
|
||||
advance(Duration::from_millis(tail_delay_ms)).await;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
if prefetch_ms > tail_delay_ms {
|
||||
advance(Duration::from_millis(prefetch_ms - tail_delay_ms)).await;
|
||||
tokio::task::yield_now().await;
|
||||
}
|
||||
|
||||
let result = prefetch_task.await.expect("prefetch task must not panic");
|
||||
writer_task.await.expect("writer task must not panic");
|
||||
result
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn strict_prefetch_5ms_misses_15ms_tail() {
|
||||
let got = run_strict_prefetch_case(5, 15).await;
|
||||
assert_eq!(got, b"C".to_vec());
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn strict_prefetch_20ms_recovers_15ms_tail() {
|
||||
let got = run_strict_prefetch_case(20, 15).await;
|
||||
assert!(got.starts_with(b"CONNECT"));
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn strict_prefetch_50ms_recovers_35ms_tail() {
|
||||
let got = run_strict_prefetch_case(50, 35).await;
|
||||
assert!(got.starts_with(b"CONNECT"));
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn strict_prefetch_equal_budget_and_delay_recovers_tail() {
|
||||
let got = run_strict_prefetch_case(20, 20).await;
|
||||
assert!(got.starts_with(b"CONNECT"));
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn strict_prefetch_one_ms_after_budget_misses_tail() {
|
||||
let got = run_strict_prefetch_case(20, 21).await;
|
||||
assert_eq!(got, b"C".to_vec());
|
||||
}
|
||||
|
|
@ -0,0 +1,98 @@
|
|||
use super::*;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::time::{Duration, sleep, timeout};
|
||||
|
||||
async fn extend_masking_initial_window_with_budget<R>(
|
||||
reader: &mut R,
|
||||
initial_data: &mut Vec<u8>,
|
||||
prefetch_timeout: Duration,
|
||||
) where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if !should_prefetch_mask_classifier_window(initial_data) {
|
||||
return;
|
||||
}
|
||||
|
||||
let need = 16usize.saturating_sub(initial_data.len());
|
||||
if need == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut extra = [0u8; 16];
|
||||
if let Ok(Ok(n)) = timeout(prefetch_timeout, reader.read(&mut extra[..need])).await
|
||||
&& n > 0
|
||||
{
|
||||
initial_data.extend_from_slice(&extra[..n]);
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_prefetch_budget_case(prefetch_budget_ms: u64, delayed_tail_ms: u64) -> bool {
|
||||
let (mut reader, mut writer) = duplex(1024);
|
||||
|
||||
let writer_task = tokio::spawn(async move {
|
||||
sleep(Duration::from_millis(delayed_tail_ms)).await;
|
||||
writer
|
||||
.write_all(b"ONNECT example.org:443 HTTP/1.1\r\n")
|
||||
.await
|
||||
.expect("tail bytes must be writable");
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.expect("writer shutdown must succeed");
|
||||
});
|
||||
|
||||
let mut initial_data = b"C".to_vec();
|
||||
extend_masking_initial_window_with_budget(
|
||||
&mut reader,
|
||||
&mut initial_data,
|
||||
Duration::from_millis(prefetch_budget_ms),
|
||||
)
|
||||
.await;
|
||||
|
||||
writer_task
|
||||
.await
|
||||
.expect("writer task must not panic during matrix case");
|
||||
|
||||
initial_data.starts_with(b"CONNECT")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn adversarial_prefetch_budget_matrix_5_20_50ms_for_fragmented_connect_tail() {
|
||||
let cases = [
|
||||
// (tail-delay-ms, expected CONNECT recovery for budgets [5, 20, 50])
|
||||
(2u64, [true, true, true]),
|
||||
(15u64, [false, true, true]),
|
||||
(35u64, [false, false, true]),
|
||||
];
|
||||
|
||||
for (tail_delay_ms, expected) in cases {
|
||||
let got_5 = run_prefetch_budget_case(5, tail_delay_ms).await;
|
||||
let got_20 = run_prefetch_budget_case(20, tail_delay_ms).await;
|
||||
let got_50 = run_prefetch_budget_case(50, tail_delay_ms).await;
|
||||
|
||||
assert_eq!(
|
||||
got_5, expected[0],
|
||||
"5ms prefetch budget mismatch for tail delay {}ms",
|
||||
tail_delay_ms
|
||||
);
|
||||
assert_eq!(
|
||||
got_20, expected[1],
|
||||
"20ms prefetch budget mismatch for tail delay {}ms",
|
||||
tail_delay_ms
|
||||
);
|
||||
assert_eq!(
|
||||
got_50, expected[2],
|
||||
"50ms prefetch budget mismatch for tail delay {}ms",
|
||||
tail_delay_ms
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn control_current_runtime_prefetch_budget_is_5ms() {
|
||||
assert_eq!(
|
||||
MASK_CLASSIFIER_PREFETCH_TIMEOUT,
|
||||
Duration::from_millis(5),
|
||||
"matrix assumptions require current runtime prefetch budget to stay at 5ms"
|
||||
);
|
||||
}
|
||||
|
|
@ -25,6 +25,7 @@ fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -48,6 +48,7 @@ fn build_harness(secret_hex: &str, mask_port: u16) -> RedTeamHarness {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -237,6 +238,7 @@ async fn redteam_03_masking_duration_must_be_less_than_1ms_when_backend_down() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
Arc::new(Stats::new()),
|
||||
|
|
@ -477,6 +479,7 @@ async fn measure_invalid_probe_duration_ms(delay_ms: u64, tls_len: u16, body_sen
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
Arc::new(Stats::new()),
|
||||
|
|
@ -550,6 +553,7 @@ async fn capture_forwarded_probe_len(tls_len: u16, body_sent: usize) -> usize {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
Arc::new(Stats::new()),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,168 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||
use crate::protocol::tls;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||
let total_len = 5 + tls_len;
|
||||
let mut handshake = vec![fill; total_len];
|
||||
|
||||
handshake[0] = 0x16;
|
||||
handshake[1] = 0x03;
|
||||
handshake[2] = 0x01;
|
||||
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
|
||||
let session_id_len: usize = 32;
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
async fn run_replay_candidate_session(
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
hello: &[u8],
|
||||
peer: SocketAddr,
|
||||
drive_mtproto_fail: bool,
|
||||
) -> Duration {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = 1;
|
||||
cfg.censorship.mask_timing_normalization_enabled = false;
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.access.users.insert(
|
||||
"user".to_string(),
|
||||
"abababababababababababababababab".to_string(),
|
||||
);
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(65536);
|
||||
let started = Instant::now();
|
||||
|
||||
let task = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
new_upstream_manager(stats),
|
||||
replay_checker,
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(hello).await.unwrap();
|
||||
|
||||
if drive_mtproto_fail {
|
||||
let mut server_hello_head = [0u8; 5];
|
||||
client_side
|
||||
.read_exact(&mut server_hello_head)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(server_hello_head[0], 0x16);
|
||||
let body_len = u16::from_be_bytes([server_hello_head[3], server_hello_head[4]]) as usize;
|
||||
let mut body = vec![0u8; body_len];
|
||||
client_side.read_exact(&mut body).await.unwrap();
|
||||
|
||||
let mut invalid_mtproto_record = Vec::with_capacity(5 + HANDSHAKE_LEN);
|
||||
invalid_mtproto_record.push(0x17);
|
||||
invalid_mtproto_record.extend_from_slice(&TLS_VERSION);
|
||||
invalid_mtproto_record.extend_from_slice(&(HANDSHAKE_LEN as u16).to_be_bytes());
|
||||
invalid_mtproto_record.extend_from_slice(&vec![0u8; HANDSHAKE_LEN]);
|
||||
client_side
|
||||
.write_all(&invalid_mtproto_record)
|
||||
.await
|
||||
.unwrap();
|
||||
client_side
|
||||
.write_all(b"GET /replay-fallback HTTP/1.1\r\nHost: x\r\n\r\n")
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(4), task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
started.elapsed()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn replay_reject_still_honors_masking_timing_budget() {
|
||||
let replay_checker = Arc::new(ReplayChecker::new(256, Duration::from_secs(60)));
|
||||
let hello = make_valid_tls_client_hello(&[0xAB; 16], 7, 600, 0x51);
|
||||
|
||||
let seed_elapsed = run_replay_candidate_session(
|
||||
Arc::clone(&replay_checker),
|
||||
&hello,
|
||||
"198.51.100.201:58001".parse().unwrap(),
|
||||
true,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
seed_elapsed >= Duration::from_millis(40) && seed_elapsed < Duration::from_millis(250),
|
||||
"seed replay-candidate run must honor masking timing budget without unbounded delay"
|
||||
);
|
||||
|
||||
let replay_elapsed = run_replay_candidate_session(
|
||||
Arc::clone(&replay_checker),
|
||||
&hello,
|
||||
"198.51.100.202:58002".parse().unwrap(),
|
||||
false,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
replay_elapsed >= Duration::from_millis(40) && replay_elapsed < Duration::from_millis(250),
|
||||
"replay rejection path must still satisfy masking timing budget without unbounded DB/CPU delay"
|
||||
);
|
||||
}
|
||||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -20,6 +20,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -34,6 +34,7 @@ fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -0,0 +1,291 @@
|
|||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::UpstreamManager;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
|
||||
fn preload_user_quota(stats: &Stats, user: &str, bytes: u64) {
|
||||
let user_stats = stats.get_or_create_user_stats_handle(user);
|
||||
stats.quota_charge_post_write(user_stats.as_ref(), bytes);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_mask_delay_bypassed_if_max_is_zero() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.censorship.server_hello_delay_min_ms = 10_000;
|
||||
config.censorship.server_hello_delay_max_ms = 0;
|
||||
|
||||
let start = std::time::Instant::now();
|
||||
maybe_apply_mask_reject_delay(&config).await;
|
||||
assert!(start.elapsed() < Duration::from_millis(50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_beobachten_ttl_clamps_exactly_to_24_hours() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = 100_000;
|
||||
|
||||
let ttl = beobachten_ttl(&config);
|
||||
assert_eq!(ttl.as_secs(), 24 * 60 * 60);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_wrap_tls_application_record_empty_payload() {
|
||||
let wrapped = wrap_tls_application_record(&[]);
|
||||
assert_eq!(wrapped.len(), 5);
|
||||
assert_eq!(wrapped[0], TLS_RECORD_APPLICATION);
|
||||
assert_eq!(&wrapped[3..5], &[0, 0]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn boundary_user_data_quota_exact_match_rejects() {
|
||||
let user = "quota-boundary-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_data_quota.insert(user.to_string(), 1024);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
preload_user_quota(stats.as_ref(), user, 1024);
|
||||
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let peer = "198.51.100.10:55000".parse().unwrap();
|
||||
|
||||
let result = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::DataQuotaExceeded { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn boundary_user_expiration_in_past_rejects() {
|
||||
let user = "expired-boundary-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
let expired_time = chrono::Utc::now() - chrono::Duration::milliseconds(1);
|
||||
config
|
||||
.access
|
||||
.user_expirations
|
||||
.insert(user.to_string(), expired_time);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let peer = "198.51.100.11:55000".parse().unwrap();
|
||||
|
||||
let result = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(result, Err(ProxyError::UserExpired { .. })));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_proxy_protocol_massive_garbage_rejected_quickly() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.server.proxy_protocol_header_timeout_ms = 300;
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.12:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
true,
|
||||
));
|
||||
|
||||
client_side.write_all(&vec![b'A'; 2000]).await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(matches!(result, Err(ProxyError::InvalidProxyProtocol)));
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn edge_tls_body_immediate_eof_triggers_masking_and_bad_connect() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = true;
|
||||
cfg.general.beobachten_minutes = 1;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.13:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
beobachten.clone(),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side
|
||||
.write_all(&[0x16, 0x03, 0x01, 0x00, 100])
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn security_classic_mode_disabled_masks_valid_length_payload() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.modes.classic = false;
|
||||
cfg.general.modes.secure = false;
|
||||
cfg.censorship.mask = true;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.15:55000".parse().unwrap(),
|
||||
config,
|
||||
stats.clone(),
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
)),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&vec![0xEF; 64]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let _ = tokio::time::timeout(Duration::from_secs(2), handler)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(stats.get_connects_bad(), 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn concurrency_ip_tracker_strict_limit_one_rapid_churn() {
|
||||
let user = "rapid-churn-user";
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 10);
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let peer = "198.51.100.16:55000".parse().unwrap();
|
||||
|
||||
for _ in 0..500 {
|
||||
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
reservation.release().await;
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn quirk_read_with_progress_zero_length_buffer_returns_zero_immediately() {
|
||||
let (mut server_side, _client_side) = duplex(4096);
|
||||
let mut empty_buf = &mut [][..];
|
||||
|
||||
let result = tokio::time::timeout(
|
||||
Duration::from_millis(50),
|
||||
read_with_progress(&mut server_side, &mut empty_buf),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.unwrap().unwrap(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn stress_read_with_progress_cancellation_safety() {
|
||||
let (mut server_side, mut client_side) = duplex(4096);
|
||||
|
||||
client_side.write_all(b"12345").await.unwrap();
|
||||
|
||||
let mut buf = [0u8; 10];
|
||||
let result = tokio::time::timeout(
|
||||
Duration::from_millis(50),
|
||||
read_with_progress(&mut server_side, &mut buf),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(result.is_err());
|
||||
|
||||
client_side.write_all(b"67890").await.unwrap();
|
||||
let mut buf2 = [0u8; 5];
|
||||
server_side.read_exact(&mut buf2).await.unwrap();
|
||||
assert_eq!(&buf2, b"67890");
|
||||
}
|
||||
|
|
@ -1,12 +1,17 @@
|
|||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::AesCtr;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::ProtoTag;
|
||||
use crate::crypto::{AesCtr, sha256, sha256_hmac};
|
||||
use crate::protocol::constants::{
|
||||
DC_IDX_POS, HANDSHAKE_LEN, IV_LEN, PREKEY_LEN, PROTO_TAG_POS, ProtoTag, SKIP_LEN,
|
||||
TLS_RECORD_CHANGE_CIPHER,
|
||||
};
|
||||
use crate::protocol::tls;
|
||||
use crate::proxy::handshake::HandshakeSuccess;
|
||||
use crate::stream::{CryptoReader, CryptoWriter};
|
||||
use crate::transport::proxy_protocol::ProxyProtocolV1Builder;
|
||||
use rand::Rng;
|
||||
use rand::SeedableRng;
|
||||
use rand::rngs::StdRng;
|
||||
use std::net::Ipv4Addr;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
|
|
@ -25,6 +30,220 @@ fn synthetic_local_addr_uses_configured_port_for_max() {
|
|||
assert_eq!(addr.port(), u16::MAX);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_timeout_with_mask_grace_includes_mask_margin() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = 2;
|
||||
|
||||
config.censorship.mask = false;
|
||||
assert_eq!(
|
||||
handshake_timeout_with_mask_grace(&config),
|
||||
Duration::from_secs(2)
|
||||
);
|
||||
|
||||
config.censorship.mask = true;
|
||||
assert_eq!(
|
||||
handshake_timeout_with_mask_grace(&config),
|
||||
Duration::from_millis(2750),
|
||||
"mask mode extends handshake timeout by 750 ms"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_reads_partial_buffers_before_eof() {
|
||||
let data = vec![0xAA, 0xBB, 0xCC];
|
||||
let mut reader = std::io::Cursor::new(data);
|
||||
let mut buf = [0u8; 5];
|
||||
|
||||
let read = read_with_progress(&mut reader, &mut buf).await.unwrap();
|
||||
assert_eq!(read, 3);
|
||||
assert_eq!(&buf[..3], &[0xAA, 0xBB, 0xCC]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_trusted_proxy_source_respects_cidr_list_and_empty_rejects_all() {
|
||||
let peer: IpAddr = "10.10.10.10".parse().unwrap();
|
||||
assert!(!is_trusted_proxy_source(peer, &[]));
|
||||
|
||||
let trusted = vec!["10.0.0.0/8".parse().unwrap()];
|
||||
assert!(is_trusted_proxy_source(peer, &trusted));
|
||||
|
||||
let not_trusted = vec!["192.0.2.0/24".parse().unwrap()];
|
||||
assert!(!is_trusted_proxy_source(peer, ¬_trusted));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_trusted_proxy_source_accepts_cidr_zero_zero_as_global_cidr() {
|
||||
let peer: IpAddr = "203.0.113.42".parse().unwrap();
|
||||
let trust_all = vec!["0.0.0.0/0".parse().unwrap()];
|
||||
assert!(is_trusted_proxy_source(peer, &trust_all));
|
||||
|
||||
let peer_v6: IpAddr = "2001:db8::1".parse().unwrap();
|
||||
let trust_all_v6 = vec!["::/0".parse().unwrap()];
|
||||
assert!(is_trusted_proxy_source(peer_v6, &trust_all_v6));
|
||||
}
|
||||
|
||||
struct ErrorReader;
|
||||
|
||||
impl tokio::io::AsyncRead for ErrorReader {
|
||||
fn poll_read(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
_cx: &mut std::task::Context<'_>,
|
||||
_buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
std::task::Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
"fake error",
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_returns_error_from_failed_reader() {
|
||||
let mut reader = ErrorReader;
|
||||
let mut buf = [0u8; 8];
|
||||
let err = read_with_progress(&mut reader, &mut buf).await.unwrap_err();
|
||||
assert_eq!(err.kind(), std::io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_timeout_with_mask_grace_handles_maximum_values_without_overflow() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = u64::MAX;
|
||||
config.censorship.mask = true;
|
||||
|
||||
let timeout = handshake_timeout_with_mask_grace(&config);
|
||||
assert!(timeout >= Duration::from_secs(u64::MAX));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_zero_length_buffer_returns_zero() {
|
||||
let data = vec![1, 2, 3];
|
||||
let mut reader = std::io::Cursor::new(data);
|
||||
let mut buf = [];
|
||||
|
||||
let read = read_with_progress(&mut reader, &mut buf).await.unwrap();
|
||||
assert_eq!(read, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_timeout_without_mask_is_exact_base() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = 7;
|
||||
config.censorship.mask = false;
|
||||
|
||||
assert_eq!(
|
||||
handshake_timeout_with_mask_grace(&config),
|
||||
Duration::from_secs(7)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_timeout_mask_enabled_adds_750ms() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.timeouts.client_handshake = 3;
|
||||
config.censorship.mask = true;
|
||||
|
||||
assert_eq!(
|
||||
handshake_timeout_with_mask_grace(&config),
|
||||
Duration::from_millis(3750)
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_full_then_empty_transition() {
|
||||
let data = vec![0x10, 0x20];
|
||||
let mut cursor = std::io::Cursor::new(data);
|
||||
let mut buf = [0u8; 2];
|
||||
|
||||
assert_eq!(read_with_progress(&mut cursor, &mut buf).await.unwrap(), 2);
|
||||
assert_eq!(read_with_progress(&mut cursor, &mut buf).await.unwrap(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_fragmented_io_works_over_multiple_calls() {
|
||||
let mut cursor = std::io::Cursor::new(vec![1, 2, 3, 4, 5]);
|
||||
let mut result = Vec::new();
|
||||
|
||||
for chunk_size in 1..=5 {
|
||||
let mut b = vec![0u8; chunk_size];
|
||||
let n = read_with_progress(&mut cursor, &mut b).await.unwrap();
|
||||
result.extend_from_slice(&b[..n]);
|
||||
if n == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(result, vec![1, 2, 3, 4, 5]);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn read_with_progress_stress_randomized_chunk_sizes() {
|
||||
for i in 0..128 {
|
||||
let mut rng = StdRng::seed_from_u64(i as u64 + 1);
|
||||
let mut input: Vec<u8> = (0..(i % 41)).map(|_| rng.next_u32() as u8).collect();
|
||||
let mut cursor = std::io::Cursor::new(input.clone());
|
||||
let mut collected = Vec::new();
|
||||
|
||||
while cursor.position() < cursor.get_ref().len() as u64 {
|
||||
let chunk = 1 + (rng.next_u32() as usize % 8);
|
||||
let mut b = vec![0u8; chunk];
|
||||
let read = read_with_progress(&mut cursor, &mut b).await.unwrap();
|
||||
collected.extend_from_slice(&b[..read]);
|
||||
if read == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(collected, input);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_trusted_proxy_source_boundary_narrow_ipv4() {
|
||||
let matching = "172.16.0.1".parse().unwrap();
|
||||
let not_matching = "172.15.255.255".parse().unwrap();
|
||||
let cidr = vec!["172.16.0.0/12".parse().unwrap()];
|
||||
assert!(is_trusted_proxy_source(matching, &cidr));
|
||||
assert!(!is_trusted_proxy_source(not_matching, &cidr));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_trusted_proxy_source_rejects_out_of_family_ipv6_v4_cidr() {
|
||||
let peer = "2001:db8::1".parse().unwrap();
|
||||
let cidr = vec!["10.0.0.0/8".parse().unwrap()];
|
||||
assert!(!is_trusted_proxy_source(peer, &cidr));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrap_tls_application_record_reserved_chunks_look_reasonable() {
|
||||
let payload = vec![0xAA; 1 + (u16::MAX as usize) + 2];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
assert!(wrapped.len() > payload.len());
|
||||
assert!(wrapped.contains(&0x17));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn wrap_tls_application_record_roundtrip_size_check() {
|
||||
let payload_len = 3000;
|
||||
let payload = vec![0x55; payload_len];
|
||||
let wrapped = wrap_tls_application_record(&payload);
|
||||
|
||||
let mut idx = 0;
|
||||
let mut consumed = 0;
|
||||
while idx + 5 <= wrapped.len() {
|
||||
assert_eq!(wrapped[idx], 0x17);
|
||||
let len = u16::from_be_bytes([wrapped[idx + 3], wrapped[idx + 4]]) as usize;
|
||||
consumed += len;
|
||||
idx += 5 + len;
|
||||
if idx >= wrapped.len() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(consumed, payload_len);
|
||||
}
|
||||
|
||||
fn make_crypto_reader<R>(reader: R) -> CryptoReader<R>
|
||||
where
|
||||
R: tokio::io::AsyncRead + Unpin,
|
||||
|
|
@ -43,6 +262,11 @@ where
|
|||
CryptoWriter::new(writer, AesCtr::new(&key, iv), 8 * 1024)
|
||||
}
|
||||
|
||||
fn preload_user_quota(stats: &Stats, user: &str, bytes: u64) {
|
||||
let user_stats = stats.get_or_create_user_stats_handle(user);
|
||||
stats.quota_charge_post_write(user_stats.as_ref(), bytes);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn user_connection_reservation_drop_enqueues_cleanup_synchronously() {
|
||||
let ip_tracker = Arc::new(crate::ip_tracker::UserIpTracker::new());
|
||||
|
|
@ -117,6 +341,7 @@ async fn relay_task_abort_releases_user_gate_and_ip_reservation() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -230,6 +455,7 @@ async fn relay_cutover_releases_user_gate_and_ip_reservation() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -353,6 +579,7 @@ async fn integration_route_cutover_and_quota_overlap_fails_closed_and_releases_s
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -522,6 +749,7 @@ async fn proxy_protocol_header_is_rejected_when_trust_list_is_empty() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -598,6 +826,7 @@ async fn proxy_protocol_header_from_untrusted_peer_range_is_rejected_under_load(
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -757,6 +986,7 @@ async fn short_tls_probe_is_masked_through_client_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -844,6 +1074,7 @@ async fn tls12_record_probe_is_masked_through_client_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -929,6 +1160,7 @@ async fn handle_client_stream_increments_connects_all_exactly_once() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1021,6 +1253,7 @@ async fn running_client_handler_increments_connects_all_exactly_once() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1088,6 +1321,163 @@ async fn running_client_handler_increments_connects_all_exactly_once() {
|
|||
);
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn idle_pooled_connection_closes_cleanly_in_generic_stream_path() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.timeouts.client_first_byte_idle_secs = 1;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
));
|
||||
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
|
||||
let buffer_pool = Arc::new(BufferPool::new());
|
||||
let rng = Arc::new(SecureRandom::new());
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let (server_side, _client_side) = duplex(4096);
|
||||
let peer: SocketAddr = "198.51.100.169:55200".parse().unwrap();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
config,
|
||||
stats.clone(),
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
None,
|
||||
route_runtime,
|
||||
None,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
// Let the spawned handler arm the idle-phase timeout before advancing paused time.
|
||||
tokio::task::yield_now().await;
|
||||
tokio::time::advance(Duration::from_secs(2)).await;
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(1), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
assert_eq!(stats.get_connects_bad(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test(start_paused = true)]
|
||||
async fn idle_pooled_connection_closes_cleanly_in_client_handler_path() {
|
||||
let front_listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let front_addr = front_listener.local_addr().unwrap();
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.timeouts.client_first_byte_idle_secs = 1;
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
));
|
||||
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
|
||||
let buffer_pool = Arc::new(BufferPool::new());
|
||||
let rng = Arc::new(SecureRandom::new());
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct));
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
|
||||
let server_task = {
|
||||
let config = config.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
let buffer_pool = buffer_pool.clone();
|
||||
let rng = rng.clone();
|
||||
let route_runtime = route_runtime.clone();
|
||||
let ip_tracker = ip_tracker.clone();
|
||||
let beobachten = beobachten.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let (stream, peer) = front_listener.accept().await.unwrap();
|
||||
let real_peer_report = Arc::new(std::sync::Mutex::new(None));
|
||||
ClientHandler::new(
|
||||
stream,
|
||||
peer,
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker,
|
||||
buffer_pool,
|
||||
rng,
|
||||
None,
|
||||
route_runtime,
|
||||
None,
|
||||
ip_tracker,
|
||||
beobachten,
|
||||
false,
|
||||
real_peer_report,
|
||||
)
|
||||
.run()
|
||||
.await
|
||||
})
|
||||
};
|
||||
|
||||
let _client = TcpStream::connect(front_addr).await.unwrap();
|
||||
|
||||
// Let the accepted connection reach the idle wait before advancing paused time.
|
||||
tokio::task::yield_now().await;
|
||||
tokio::time::advance(Duration::from_secs(2)).await;
|
||||
tokio::task::yield_now().await;
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(1), server_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
assert_eq!(stats.get_connects_bad(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn partial_tls_header_stall_triggers_handshake_timeout() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
|
|
@ -1110,6 +1500,7 @@ async fn partial_tls_header_stall_triggers_handshake_timeout() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1255,6 +1646,148 @@ fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
|||
record
|
||||
}
|
||||
|
||||
fn wrap_tls_ccs_record() -> Vec<u8> {
|
||||
let mut record = Vec::with_capacity(6);
|
||||
record.push(TLS_RECORD_CHANGE_CIPHER);
|
||||
record.extend_from_slice(&[0x03, 0x03]);
|
||||
record.extend_from_slice(&1u16.to_be_bytes());
|
||||
record.push(0x01);
|
||||
record
|
||||
}
|
||||
|
||||
fn make_valid_mtproto_handshake(
|
||||
secret_hex: &str,
|
||||
proto_tag: ProtoTag,
|
||||
dc_idx: i16,
|
||||
) -> [u8; HANDSHAKE_LEN] {
|
||||
let secret = hex::decode(secret_hex).expect("secret hex must decode for mtproto test helper");
|
||||
|
||||
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
|
||||
for (idx, b) in handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN]
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
{
|
||||
*b = (idx as u8).wrapping_add(1);
|
||||
}
|
||||
|
||||
let dec_prekey = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN];
|
||||
let dec_iv_bytes = &handshake[SKIP_LEN + PREKEY_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(&secret);
|
||||
let dec_key = sha256(&dec_key_input);
|
||||
|
||||
let mut dec_iv_arr = [0u8; IV_LEN];
|
||||
dec_iv_arr.copy_from_slice(dec_iv_bytes);
|
||||
let dec_iv = u128::from_be_bytes(dec_iv_arr);
|
||||
|
||||
let mut stream = AesCtr::new(&dec_key, dec_iv);
|
||||
let keystream = stream.encrypt(&[0u8; HANDSHAKE_LEN]);
|
||||
|
||||
let mut target_plain = [0u8; HANDSHAKE_LEN];
|
||||
target_plain[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
target_plain[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
||||
|
||||
for idx in PROTO_TAG_POS..HANDSHAKE_LEN {
|
||||
handshake[idx] = target_plain[idx] ^ keystream[idx];
|
||||
}
|
||||
|
||||
handshake
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fragmented_tls_mtproto_with_interleaved_ccs_is_accepted() {
|
||||
let secret_hex = "55555555555555555555555555555555";
|
||||
let secret = [0x55u8; 16];
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 0);
|
||||
let mtproto_handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let replay_checker = Arc::new(ReplayChecker::new(128, Duration::from_secs(60)));
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
let peer: SocketAddr = "198.51.100.85:55007".parse().unwrap();
|
||||
let (read_half, write_half) = tokio::io::split(server_side);
|
||||
|
||||
let (mut tls_reader, tls_writer, tls_user) = match handle_tls_handshake(
|
||||
&client_hello,
|
||||
read_half,
|
||||
write_half,
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
{
|
||||
HandshakeResult::Success(result) => result,
|
||||
_ => panic!("expected successful TLS handshake"),
|
||||
};
|
||||
|
||||
let mut tls_response_head = [0u8; 5];
|
||||
client_side
|
||||
.read_exact(&mut tls_response_head)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(tls_response_head[0], 0x16);
|
||||
let tls_response_len =
|
||||
u16::from_be_bytes([tls_response_head[3], tls_response_head[4]]) as usize;
|
||||
let mut tls_response_body = vec![0u8; tls_response_len];
|
||||
client_side
|
||||
.read_exact(&mut tls_response_body)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
client_side
|
||||
.write_all(&wrap_tls_application_data(&mtproto_handshake[..13]))
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&wrap_tls_ccs_record()).await.unwrap();
|
||||
client_side
|
||||
.write_all(&wrap_tls_application_data(&mtproto_handshake[13..37]))
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&wrap_tls_ccs_record()).await.unwrap();
|
||||
client_side
|
||||
.write_all(&wrap_tls_application_data(&mtproto_handshake[37..]))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mtproto_data = tls_reader.read_exact(HANDSHAKE_LEN).await.unwrap();
|
||||
assert_eq!(&mtproto_data[..], &mtproto_handshake);
|
||||
|
||||
let mtproto_handshake: [u8; HANDSHAKE_LEN] = mtproto_data[..].try_into().unwrap();
|
||||
let (_, _, success) = match handle_mtproto_handshake(
|
||||
&mtproto_handshake,
|
||||
tls_reader,
|
||||
tls_writer,
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
true,
|
||||
Some(tls_user.as_str()),
|
||||
)
|
||||
.await
|
||||
{
|
||||
HandshakeResult::Success(result) => result,
|
||||
_ => panic!("expected successful MTProto handshake"),
|
||||
};
|
||||
|
||||
assert_eq!(success.user, "user");
|
||||
assert_eq!(success.proto_tag, ProtoTag::Secure);
|
||||
assert_eq!(success.dc_idx, 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn valid_tls_path_does_not_fall_back_to_mask_backend() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
|
|
@ -1292,6 +1825,7 @@ async fn valid_tls_path_does_not_fall_back_to_mask_backend() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1400,6 +1934,7 @@ async fn valid_tls_with_invalid_mtproto_falls_back_to_mask_backend() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1506,6 +2041,7 @@ async fn client_handler_tls_bad_mtproto_is_forwarded_to_mask_backend() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1627,6 +2163,7 @@ async fn alpn_mismatch_tls_probe_is_masked_through_client_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1719,6 +2256,7 @@ async fn invalid_hmac_tls_probe_is_masked_through_client_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1817,6 +2355,7 @@ async fn burst_invalid_tls_probes_are_masked_verbatim() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1995,14 +2534,16 @@ async fn tcp_limit_rejection_does_not_reserve_ip_or_trigger_rollback() {
|
|||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn zero_tcp_limit_rejects_without_ip_or_counter_side_effects() {
|
||||
async fn zero_tcp_limit_uses_global_fallback_and_rejects_without_side_effects() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert("user".to_string(), 0);
|
||||
config.access.user_max_tcp_conns_global_each = 1;
|
||||
|
||||
let stats = Stats::new();
|
||||
stats.increment_user_curr_connects("user");
|
||||
let ip_tracker = UserIpTracker::new();
|
||||
let peer_addr: SocketAddr = "198.51.100.211:50001".parse().unwrap();
|
||||
|
||||
|
|
@ -2019,10 +2560,75 @@ async fn zero_tcp_limit_rejects_without_ip_or_counter_side_effects() {
|
|||
result,
|
||||
Err(ProxyError::ConnectionLimitExceeded { user }) if user == "user"
|
||||
));
|
||||
assert_eq!(
|
||||
stats.get_user_curr_connects("user"),
|
||||
1,
|
||||
"TCP-limit rejection must keep pre-existing in-flight connection count unchanged"
|
||||
);
|
||||
assert_eq!(ip_tracker.get_active_ip_count("user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn zero_tcp_limit_with_disabled_global_fallback_is_unlimited() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert("user".to_string(), 0);
|
||||
config.access.user_max_tcp_conns_global_each = 0;
|
||||
|
||||
let stats = Stats::new();
|
||||
let ip_tracker = UserIpTracker::new();
|
||||
let peer_addr: SocketAddr = "198.51.100.212:50002".parse().unwrap();
|
||||
|
||||
let result = RunningClientHandler::check_user_limits_static(
|
||||
"user",
|
||||
&config,
|
||||
&stats,
|
||||
peer_addr,
|
||||
&ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"per-user zero with global fallback disabled must not enforce a TCP limit"
|
||||
);
|
||||
assert_eq!(stats.get_user_curr_connects("user"), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count("user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn global_tcp_fallback_applies_when_per_user_limit_is_missing() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns_global_each = 1;
|
||||
|
||||
let stats = Stats::new();
|
||||
stats.increment_user_curr_connects("user");
|
||||
let ip_tracker = UserIpTracker::new();
|
||||
let peer_addr: SocketAddr = "198.51.100.213:50003".parse().unwrap();
|
||||
|
||||
let result = RunningClientHandler::check_user_limits_static(
|
||||
"user",
|
||||
&config,
|
||||
&stats,
|
||||
peer_addr,
|
||||
&ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(
|
||||
result,
|
||||
Err(ProxyError::ConnectionLimitExceeded { user }) if user == "user"
|
||||
));
|
||||
assert_eq!(
|
||||
stats.get_user_curr_connects("user"),
|
||||
1,
|
||||
"Global fallback TCP-limit rejection must keep pre-existing counter unchanged"
|
||||
);
|
||||
assert_eq!(ip_tracker.get_active_ip_count("user").await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn check_user_limits_static_success_does_not_leak_counter_or_ip_reservation() {
|
||||
let user = "check-helper-user";
|
||||
|
|
@ -2654,6 +3260,7 @@ async fn relay_connect_error_releases_user_and_ip_before_return() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -2841,7 +3448,7 @@ async fn quota_rejection_does_not_reserve_ip_or_trigger_rollback() {
|
|||
.insert("user".to_string(), 1024);
|
||||
|
||||
let stats = Stats::new();
|
||||
stats.add_user_octets_from("user", 1024);
|
||||
preload_user_quota(&stats, "user", 1024);
|
||||
|
||||
let ip_tracker = UserIpTracker::new();
|
||||
let peer_addr: SocketAddr = "203.0.113.211:50001".parse().unwrap();
|
||||
|
|
@ -3214,6 +3821,7 @@ async fn untrusted_proxy_header_source_is_rejected() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3283,6 +3891,7 @@ async fn empty_proxy_trusted_cidrs_rejects_proxy_header_by_default() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3379,6 +3988,7 @@ async fn oversized_tls_record_is_masked_in_generic_stream_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3481,6 +4091,7 @@ async fn oversized_tls_record_is_masked_in_client_handler_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3597,6 +4208,7 @@ async fn tls_record_len_min_minus_1_is_rejected_in_generic_stream_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3699,6 +4311,7 @@ async fn tls_record_len_min_minus_1_is_rejected_in_client_handler_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3804,6 +4417,7 @@ async fn tls_record_len_16384_is_accepted_in_generic_stream_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -3904,6 +4518,7 @@ async fn tls_record_len_16384_is_accepted_in_client_handler_pipeline() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
|
|||
|
|
@ -33,6 +33,7 @@ fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -35,6 +35,7 @@ fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -36,6 +36,7 @@ fn make_test_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
|
|
|
|||
|
|
@ -50,6 +50,7 @@ fn build_harness(secret_hex: &str, mask_port: u16) -> PipelineHarness {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
|
|||
|
|
@ -25,13 +25,26 @@ fn wrap_tls_application_record_oversized_payload_is_chunked_without_truncation()
|
|||
let len = u16::from_be_bytes([record[offset + 3], record[offset + 4]]) as usize;
|
||||
let body_start = offset + 5;
|
||||
let body_end = body_start + len;
|
||||
assert!(body_end <= record.len(), "declared TLS record length must be in-bounds");
|
||||
assert!(
|
||||
body_end <= record.len(),
|
||||
"declared TLS record length must be in-bounds"
|
||||
);
|
||||
recovered.extend_from_slice(&record[body_start..body_end]);
|
||||
offset = body_end;
|
||||
frames += 1;
|
||||
}
|
||||
|
||||
assert_eq!(offset, record.len(), "record parser must consume exact output size");
|
||||
assert_eq!(frames, 2, "oversized payload should split into exactly two records");
|
||||
assert_eq!(recovered, payload, "chunked records must preserve full payload");
|
||||
assert_eq!(
|
||||
offset,
|
||||
record.len(),
|
||||
"record parser must consume exact output size"
|
||||
);
|
||||
assert_eq!(
|
||||
frames, 2,
|
||||
"oversized payload should split into exactly two records"
|
||||
);
|
||||
assert_eq!(
|
||||
recovered, payload,
|
||||
"chunked records must preserve full payload"
|
||||
);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -773,8 +773,7 @@ fn anchored_open_nix_path_writes_expected_lines() {
|
|||
"target/telemt-unknown-dc-anchored-open-ok-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let _ = fs::remove_file(&sanitized.resolved_path);
|
||||
|
||||
let mut first = open_unknown_dc_log_append_anchored(&sanitized)
|
||||
|
|
@ -787,7 +786,10 @@ fn anchored_open_nix_path_writes_expected_lines() {
|
|||
|
||||
let content =
|
||||
fs::read_to_string(&sanitized.resolved_path).expect("anchored log file must be readable");
|
||||
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
|
||||
let lines: Vec<&str> = content
|
||||
.lines()
|
||||
.filter(|line| !line.trim().is_empty())
|
||||
.collect();
|
||||
assert_eq!(lines.len(), 2, "expected one line per anchored append call");
|
||||
assert!(
|
||||
lines.contains(&"dc_idx=31200") && lines.contains(&"dc_idx=31201"),
|
||||
|
|
@ -811,8 +813,7 @@ fn anchored_open_parallel_appends_preserve_line_integrity() {
|
|||
"target/telemt-unknown-dc-anchored-open-parallel-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let _ = fs::remove_file(&sanitized.resolved_path);
|
||||
|
||||
let mut workers = Vec::new();
|
||||
|
|
@ -831,8 +832,15 @@ fn anchored_open_parallel_appends_preserve_line_integrity() {
|
|||
|
||||
let content =
|
||||
fs::read_to_string(&sanitized.resolved_path).expect("parallel log file must be readable");
|
||||
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
|
||||
assert_eq!(lines.len(), 64, "expected one complete line per worker append");
|
||||
let lines: Vec<&str> = content
|
||||
.lines()
|
||||
.filter(|line| !line.trim().is_empty())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines.len(),
|
||||
64,
|
||||
"expected one complete line per worker append"
|
||||
);
|
||||
for line in lines {
|
||||
assert!(
|
||||
line.starts_with("dc_idx="),
|
||||
|
|
@ -867,8 +875,7 @@ fn anchored_open_creates_private_0600_file_permissions() {
|
|||
"target/telemt-unknown-dc-anchored-perms-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let _ = fs::remove_file(&sanitized.resolved_path);
|
||||
|
||||
let mut file = open_unknown_dc_log_append_anchored(&sanitized)
|
||||
|
|
@ -905,8 +912,7 @@ fn anchored_open_rejects_existing_symlink_target() {
|
|||
"target/telemt-unknown-dc-anchored-symlink-target-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
|
||||
let outside = std::env::temp_dir().join(format!(
|
||||
"telemt-unknown-dc-anchored-symlink-outside-{}.log",
|
||||
|
|
@ -943,8 +949,7 @@ fn anchored_open_high_contention_multi_write_preserves_complete_lines() {
|
|||
"target/telemt-unknown-dc-anchored-contention-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let _ = fs::remove_file(&sanitized.resolved_path);
|
||||
|
||||
let workers = 24usize;
|
||||
|
|
@ -970,7 +975,10 @@ fn anchored_open_high_contention_multi_write_preserves_complete_lines() {
|
|||
|
||||
let content = fs::read_to_string(&sanitized.resolved_path)
|
||||
.expect("contention output file must be readable");
|
||||
let lines: Vec<&str> = content.lines().filter(|line| !line.trim().is_empty()).collect();
|
||||
let lines: Vec<&str> = content
|
||||
.lines()
|
||||
.filter(|line| !line.trim().is_empty())
|
||||
.collect();
|
||||
assert_eq!(
|
||||
lines.len(),
|
||||
workers * rounds,
|
||||
|
|
@ -1014,8 +1022,7 @@ fn append_unknown_dc_line_returns_error_for_read_only_descriptor() {
|
|||
"target/telemt-unknown-dc-append-ro-{}/unknown-dc.log",
|
||||
std::process::id()
|
||||
);
|
||||
let sanitized =
|
||||
sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
let sanitized = sanitize_unknown_dc_log_path(&rel_candidate).expect("candidate must sanitize");
|
||||
fs::write(&sanitized.resolved_path, "seed\n").expect("seed file must be writable");
|
||||
|
||||
let mut readonly = std::fs::OpenOptions::new()
|
||||
|
|
@ -1295,6 +1302,7 @@ async fn direct_relay_abort_midflight_releases_route_gauge() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1401,6 +1409,7 @@ async fn direct_relay_cutover_midflight_releases_route_gauge() {
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1522,6 +1531,7 @@ async fn direct_relay_cutover_storm_multi_session_keeps_generic_errors_and_relea
|
|||
1,
|
||||
1,
|
||||
1,
|
||||
10,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1754,6 +1764,7 @@ async fn negative_direct_relay_dc_connection_refused_fails_fast() {
|
|||
1,
|
||||
100,
|
||||
5000,
|
||||
10,
|
||||
3,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
@ -1844,6 +1855,7 @@ async fn adversarial_direct_relay_cutover_integrity() {
|
|||
1,
|
||||
100,
|
||||
5000,
|
||||
10,
|
||||
3,
|
||||
false,
|
||||
stats.clone(),
|
||||
|
|
|
|||
|
|
@ -0,0 +1,713 @@
|
|||
use super::*;
|
||||
use crate::crypto::{AesCtr, sha256, sha256_hmac};
|
||||
use crate::protocol::constants::{ProtoTag, RESERVED_NONCE_BEGINNINGS, RESERVED_NONCE_FIRST_BYTES};
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
// --- Helpers ---
|
||||
|
||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.clear();
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.general.modes.secure = true;
|
||||
cfg.general.modes.classic = true;
|
||||
cfg.general.modes.tls = true;
|
||||
cfg
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
|
||||
let session_id_len: usize = 32;
|
||||
let len = tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_mtproto_handshake(
|
||||
secret_hex: &str,
|
||||
proto_tag: ProtoTag,
|
||||
dc_idx: i16,
|
||||
) -> [u8; HANDSHAKE_LEN] {
|
||||
let secret = hex::decode(secret_hex).expect("secret hex must decode");
|
||||
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
|
||||
for (idx, b) in handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN]
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
{
|
||||
*b = (idx as u8).wrapping_add(1);
|
||||
}
|
||||
|
||||
let dec_prekey = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN];
|
||||
let dec_iv_bytes = &handshake[SKIP_LEN + PREKEY_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(&secret);
|
||||
let dec_key = sha256(&dec_key_input);
|
||||
|
||||
let mut dec_iv_arr = [0u8; IV_LEN];
|
||||
dec_iv_arr.copy_from_slice(dec_iv_bytes);
|
||||
let dec_iv = u128::from_be_bytes(dec_iv_arr);
|
||||
|
||||
let mut stream = AesCtr::new(&dec_key, dec_iv);
|
||||
let keystream = stream.encrypt(&[0u8; HANDSHAKE_LEN]);
|
||||
|
||||
let mut target_plain = [0u8; HANDSHAKE_LEN];
|
||||
target_plain[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
target_plain[DC_IDX_POS..DC_IDX_POS + 2].copy_from_slice(&dc_idx.to_le_bytes());
|
||||
|
||||
for idx in PROTO_TAG_POS..HANDSHAKE_LEN {
|
||||
handshake[idx] = target_plain[idx] ^ keystream[idx];
|
||||
}
|
||||
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello_with_alpn(
|
||||
secret: &[u8],
|
||||
timestamp: u32,
|
||||
alpn_protocols: &[&[u8]],
|
||||
) -> Vec<u8> {
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&TLS_VERSION);
|
||||
body.extend_from_slice(&[0u8; 32]);
|
||||
body.push(32);
|
||||
body.extend_from_slice(&[0x42u8; 32]);
|
||||
body.extend_from_slice(&2u16.to_be_bytes());
|
||||
body.extend_from_slice(&[0x13, 0x01]);
|
||||
body.push(1);
|
||||
body.push(0);
|
||||
|
||||
let mut ext_blob = Vec::new();
|
||||
if !alpn_protocols.is_empty() {
|
||||
let mut alpn_list = Vec::new();
|
||||
for proto in alpn_protocols {
|
||||
alpn_list.push(proto.len() as u8);
|
||||
alpn_list.extend_from_slice(proto);
|
||||
}
|
||||
let mut alpn_data = Vec::new();
|
||||
alpn_data.extend_from_slice(&(alpn_list.len() as u16).to_be_bytes());
|
||||
alpn_data.extend_from_slice(&alpn_list);
|
||||
|
||||
ext_blob.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(alpn_data.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&alpn_data);
|
||||
}
|
||||
body.extend_from_slice(&(ext_blob.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext_blob);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01);
|
||||
let body_len = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&body_len[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]);
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
|
||||
record[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &record);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
record[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
|
||||
record
|
||||
}
|
||||
|
||||
// --- Category 1: Edge Cases & Protocol Boundaries ---
|
||||
|
||||
#[tokio::test]
|
||||
async fn tls_minimum_viable_length_boundary() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0x11u8; 16];
|
||||
let config = test_config_with_secret_hex("11111111111111111111111111111111");
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.1:12345".parse().unwrap();
|
||||
|
||||
let min_len = tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1;
|
||||
let mut exact_min_handshake = vec![0x42u8; min_len];
|
||||
exact_min_handshake[min_len - 1] = 0;
|
||||
exact_min_handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let digest = sha256_hmac(&secret, &exact_min_handshake);
|
||||
exact_min_handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
|
||||
let res = handle_tls_handshake(
|
||||
&exact_min_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(res, HandshakeResult::Success(_)),
|
||||
"Exact minimum length TLS handshake must succeed"
|
||||
);
|
||||
|
||||
let short_handshake = vec![0x42u8; min_len - 1];
|
||||
let res_short = handle_tls_handshake(
|
||||
&short_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(res_short, HandshakeResult::BadClient { .. }),
|
||||
"Handshake 1 byte shorter than minimum must fail closed"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_extreme_dc_index_serialization() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "22222222222222222222222222222222";
|
||||
let config = test_config_with_secret_hex(secret_hex);
|
||||
for (idx, extreme_dc) in [i16::MIN, i16::MAX, -1, 0].into_iter().enumerate() {
|
||||
// Keep replay state independent per case so we validate dc_idx encoding,
|
||||
// not duplicate-handshake rejection behavior.
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let peer = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(192, 0, 2, 2)), 12345 + idx as u16);
|
||||
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, extreme_dc);
|
||||
let res = handle_mtproto_handshake(
|
||||
&handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
match res {
|
||||
HandshakeResult::Success((_, _, success)) => {
|
||||
assert_eq!(
|
||||
success.dc_idx, extreme_dc,
|
||||
"Extreme DC index {} must serialize/deserialize perfectly",
|
||||
extreme_dc
|
||||
);
|
||||
}
|
||||
_ => panic!(
|
||||
"MTProto handshake with extreme DC index {} failed",
|
||||
extreme_dc
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn alpn_strict_case_and_padding_rejection() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0x33u8; 16];
|
||||
let mut config = test_config_with_secret_hex("33333333333333333333333333333333");
|
||||
config.censorship.alpn_enforce = true;
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.3:12345".parse().unwrap();
|
||||
|
||||
let bad_alpns: &[&[u8]] = &[b"H2", b"h2\0", b" http/1.1", b"http/1.1\n"];
|
||||
|
||||
for bad_alpn in bad_alpns {
|
||||
let handshake = make_valid_tls_client_hello_with_alpn(&secret, 0, &[*bad_alpn]);
|
||||
let res = handle_tls_handshake(
|
||||
&handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(res, HandshakeResult::BadClient { .. }),
|
||||
"ALPN strict enforcement must reject {:?}",
|
||||
bad_alpn
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn ipv4_mapped_ipv6_bucketing_anomaly() {
|
||||
let ipv4_mapped_1 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc000, 0x0201));
|
||||
let ipv4_mapped_2 = IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0xffff, 0xc633, 0x6402));
|
||||
|
||||
let norm_1 = normalize_auth_probe_ip(ipv4_mapped_1);
|
||||
let norm_2 = normalize_auth_probe_ip(ipv4_mapped_2);
|
||||
|
||||
assert_eq!(
|
||||
norm_1, norm_2,
|
||||
"IPv4-mapped IPv6 addresses must collapse into the same /64 bucket (::0)"
|
||||
);
|
||||
assert_eq!(
|
||||
norm_1,
|
||||
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
|
||||
"The bucket must be exactly ::0"
|
||||
);
|
||||
}
|
||||
|
||||
// --- Category 2: Adversarial & Black Hat ---
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_invalid_ciphertext_does_not_poison_replay_cache() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "55555555555555555555555555555555";
|
||||
let config = test_config_with_secret_hex(secret_hex);
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let peer: SocketAddr = "192.0.2.5:12345".parse().unwrap();
|
||||
|
||||
let valid_handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
||||
let mut invalid_handshake = valid_handshake;
|
||||
invalid_handshake[SKIP_LEN + PREKEY_LEN + IV_LEN + 1] ^= 0xFF;
|
||||
|
||||
let res_invalid = handle_mtproto_handshake(
|
||||
&invalid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(res_invalid, HandshakeResult::BadClient { .. }));
|
||||
|
||||
let res_valid = handle_mtproto_handshake(
|
||||
&valid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(res_valid, HandshakeResult::Success(_)),
|
||||
"Invalid MTProto ciphertext must not poison the replay cache"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tls_invalid_session_does_not_poison_replay_cache() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0x66u8; 16];
|
||||
let config = test_config_with_secret_hex("66666666666666666666666666666666");
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.6:12345".parse().unwrap();
|
||||
|
||||
let valid_handshake = make_valid_tls_handshake(&secret, 0);
|
||||
let mut invalid_handshake = valid_handshake.clone();
|
||||
let session_idx = tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1;
|
||||
invalid_handshake[session_idx] ^= 0xFF;
|
||||
|
||||
let res_invalid = handle_tls_handshake(
|
||||
&invalid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(res_invalid, HandshakeResult::BadClient { .. }));
|
||||
|
||||
let res_valid = handle_tls_handshake(
|
||||
&valid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
matches!(res_valid, HandshakeResult::Success(_)),
|
||||
"Invalid TLS payload must not poison the replay cache"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn server_hello_delay_timing_neutrality_on_hmac_failure() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0x77u8; 16];
|
||||
let mut config = test_config_with_secret_hex("77777777777777777777777777777777");
|
||||
config.censorship.server_hello_delay_min_ms = 50;
|
||||
config.censorship.server_hello_delay_max_ms = 50;
|
||||
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.7:12345".parse().unwrap();
|
||||
|
||||
let mut invalid_handshake = make_valid_tls_handshake(&secret, 0);
|
||||
invalid_handshake[tls::TLS_DIGEST_POS] ^= 0xFF;
|
||||
|
||||
let start = Instant::now();
|
||||
let res = handle_tls_handshake(
|
||||
&invalid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||
assert!(
|
||||
elapsed >= Duration::from_millis(45),
|
||||
"Invalid HMAC must still incur the configured ServerHello delay to prevent timing side-channels"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn server_hello_delay_inversion_resilience() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0x88u8; 16];
|
||||
let mut config = test_config_with_secret_hex("88888888888888888888888888888888");
|
||||
config.censorship.server_hello_delay_min_ms = 100;
|
||||
config.censorship.server_hello_delay_max_ms = 10;
|
||||
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.8:12345".parse().unwrap();
|
||||
|
||||
let valid_handshake = make_valid_tls_handshake(&secret, 0);
|
||||
|
||||
let start = Instant::now();
|
||||
let res = handle_tls_handshake(
|
||||
&valid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
assert!(matches!(res, HandshakeResult::Success(_)));
|
||||
assert!(
|
||||
elapsed >= Duration::from_millis(90),
|
||||
"Delay logic must gracefully handle min > max inversions via max.max(min)"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mixed_valid_and_invalid_user_secrets_configuration() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
clear_warned_secrets_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.ignore_time_skew = true;
|
||||
|
||||
for i in 0..9 {
|
||||
let bad_secret = if i % 2 == 0 { "badhex!" } else { "1122" };
|
||||
config
|
||||
.access
|
||||
.users
|
||||
.insert(format!("bad_user_{}", i), bad_secret.to_string());
|
||||
}
|
||||
let valid_secret_hex = "99999999999999999999999999999999";
|
||||
config
|
||||
.access
|
||||
.users
|
||||
.insert("good_user".to_string(), valid_secret_hex.to_string());
|
||||
config.general.modes.secure = true;
|
||||
config.general.modes.classic = true;
|
||||
config.general.modes.tls = true;
|
||||
|
||||
let secret = [0x99u8; 16];
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.9:12345".parse().unwrap();
|
||||
|
||||
let valid_handshake = make_valid_tls_handshake(&secret, 0);
|
||||
|
||||
let res = handle_tls_handshake(
|
||||
&valid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
matches!(res, HandshakeResult::Success(_)),
|
||||
"Proxy must gracefully skip invalid secrets and authenticate the valid one"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tls_emulation_fallback_when_cache_missing() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret = [0xAAu8; 16];
|
||||
let mut config = test_config_with_secret_hex("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
|
||||
config.censorship.tls_emulation = true;
|
||||
config.general.modes.tls = true;
|
||||
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "192.0.2.10:12345".parse().unwrap();
|
||||
|
||||
let valid_handshake = make_valid_tls_handshake(&secret, 0);
|
||||
|
||||
let res = handle_tls_handshake(
|
||||
&valid_handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
matches!(res, HandshakeResult::Success(_)),
|
||||
"TLS emulation must gracefully fall back to standard ServerHello if cache is missing"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn classic_mode_over_tls_transport_protocol_confusion() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
|
||||
let mut config = test_config_with_secret_hex(secret_hex);
|
||||
config.general.modes.classic = true;
|
||||
config.general.modes.tls = true;
|
||||
|
||||
let replay_checker = ReplayChecker::new(128, Duration::from_secs(60));
|
||||
let peer: SocketAddr = "192.0.2.11:12345".parse().unwrap();
|
||||
|
||||
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Intermediate, 1);
|
||||
|
||||
let res = handle_mtproto_handshake(
|
||||
&handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&config,
|
||||
&replay_checker,
|
||||
true,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(
|
||||
matches!(res, HandshakeResult::Success(_)),
|
||||
"Intermediate tag over TLS must succeed if classic mode is enabled, locking in cross-transport behavior"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn generate_tg_nonce_never_emits_reserved_bytes() {
|
||||
let client_enc_key = [0xCCu8; 32];
|
||||
let client_enc_iv = 123456789u128;
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
for _ in 0..10_000 {
|
||||
let (nonce, _, _, _, _) = generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
1,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
assert!(
|
||||
!RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]),
|
||||
"Nonce must never start with reserved bytes"
|
||||
);
|
||||
let first_four: [u8; 4] = [nonce[0], nonce[1], nonce[2], nonce[3]];
|
||||
assert!(
|
||||
!RESERVED_NONCE_BEGINNINGS.contains(&first_four),
|
||||
"Nonce must never match reserved 4-byte beginnings"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn dashmap_concurrent_saturation_stress() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip_a: IpAddr = "192.0.2.13".parse().unwrap();
|
||||
let ip_b: IpAddr = "198.51.100.13".parse().unwrap();
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for i in 0..100 {
|
||||
let target_ip = if i % 2 == 0 { ip_a } else { ip_b };
|
||||
let shared = shared.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
for _ in 0..50 {
|
||||
auth_probe_record_failure_in(shared.as_ref(), target_ip, Instant::now());
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await
|
||||
.expect("Task panicked during concurrent DashMap stress");
|
||||
}
|
||||
|
||||
assert!(
|
||||
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_a),
|
||||
"IP A must be throttled after concurrent stress"
|
||||
);
|
||||
assert!(
|
||||
auth_probe_is_throttled_for_testing_in_shared(shared.as_ref(), ip_b),
|
||||
"IP B must be throttled after concurrent stress"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn prototag_invalid_bytes_fail_closed() {
|
||||
let invalid_tags: [[u8; 4]; 5] = [
|
||||
[0, 0, 0, 0],
|
||||
[0xFF, 0xFF, 0xFF, 0xFF],
|
||||
[0xDE, 0xAD, 0xBE, 0xEF],
|
||||
[0xDD, 0xDD, 0xDD, 0xDE],
|
||||
[0x11, 0x22, 0x33, 0x44],
|
||||
];
|
||||
|
||||
for tag in invalid_tags {
|
||||
assert_eq!(
|
||||
ProtoTag::from_bytes(tag),
|
||||
None,
|
||||
"Invalid ProtoTag bytes {:?} must fail closed",
|
||||
tag
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn auth_probe_eviction_hash_collision_stress() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
let now = Instant::now();
|
||||
|
||||
for i in 0..10_000u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, (i >> 8) as u8, (i & 0xFF) as u8));
|
||||
auth_probe_record_failure_with_state_in(shared.as_ref(), state, ip, now);
|
||||
}
|
||||
|
||||
assert!(
|
||||
state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"Eviction logic must successfully bound the map size under heavy insertion stress"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn encrypt_tg_nonce_with_ciphers_advances_counter_correctly() {
|
||||
let client_enc_key = [0xDDu8; 32];
|
||||
let client_enc_iv = 987654321u128;
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
let (nonce, _, _, _, _) = generate_tg_nonce(
|
||||
ProtoTag::Secure,
|
||||
2,
|
||||
&client_enc_key,
|
||||
client_enc_iv,
|
||||
&rng,
|
||||
false,
|
||||
);
|
||||
|
||||
let (_, mut returned_encryptor, _) = encrypt_tg_nonce_with_ciphers(&nonce);
|
||||
let zeros = [0u8; 64];
|
||||
let returned_keystream = returned_encryptor.encrypt(&zeros);
|
||||
|
||||
let enc_key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let mut expected_enc_key = [0u8; 32];
|
||||
expected_enc_key.copy_from_slice(&enc_key_iv[..KEY_LEN]);
|
||||
let mut expected_enc_iv_arr = [0u8; IV_LEN];
|
||||
expected_enc_iv_arr.copy_from_slice(&enc_key_iv[KEY_LEN..]);
|
||||
let expected_enc_iv = u128::from_be_bytes(expected_enc_iv_arr);
|
||||
|
||||
let mut manual_encryptor = AesCtr::new(&expected_enc_key, expected_enc_iv);
|
||||
|
||||
let mut manual_input = Vec::new();
|
||||
manual_input.extend_from_slice(&nonce);
|
||||
manual_input.extend_from_slice(&zeros);
|
||||
let manual_output = manual_encryptor.encrypt(&manual_input);
|
||||
|
||||
assert_eq!(
|
||||
returned_keystream,
|
||||
&manual_output[64..128],
|
||||
"encrypt_tg_nonce_with_ciphers must correctly advance the AES-CTR counter by exactly the nonce length"
|
||||
);
|
||||
}
|
||||
|
|
@ -44,12 +44,6 @@ fn make_valid_mtproto_handshake(
|
|||
handshake
|
||||
}
|
||||
|
||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
||||
auth_probe_test_lock()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.clear();
|
||||
|
|
@ -67,8 +61,8 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_bit_flip_anywhere_rejected() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "11223344556677889900aabbccddeeff";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -181,26 +175,26 @@ async fn mtproto_handshake_timing_neutrality_mocked() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn auth_probe_throttle_saturation_stress() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let now = Instant::now();
|
||||
|
||||
// Record enough failures for one IP to trigger backoff
|
||||
let target_ip = IpAddr::V4(Ipv4Addr::new(1, 1, 1, 1));
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(target_ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
|
||||
}
|
||||
|
||||
assert!(auth_probe_is_throttled(target_ip, now));
|
||||
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
|
||||
|
||||
// Stress test with many unique IPs
|
||||
for i in 0..500u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 256) as u8));
|
||||
auth_probe_record_failure(ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
|
||||
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
|
||||
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||
assert!(
|
||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"auth probe state grew past hard cap: {tracked} > {AUTH_PROBE_TRACK_MAX_ENTRIES}"
|
||||
|
|
@ -209,8 +203,8 @@ async fn auth_probe_throttle_saturation_stress() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_abridged_prefix_rejected() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let mut handshake = [0x5Au8; HANDSHAKE_LEN];
|
||||
handshake[0] = 0xef; // Abridged prefix
|
||||
|
|
@ -235,8 +229,8 @@ async fn mtproto_handshake_abridged_prefix_rejected() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_preferred_user_mismatch_continues() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret1_hex = "11111111111111111111111111111111";
|
||||
let secret2_hex = "22222222222222222222222222222222";
|
||||
|
|
@ -278,8 +272,8 @@ async fn mtproto_handshake_preferred_user_mismatch_continues() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_concurrent_flood_stability() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
||||
|
|
@ -320,8 +314,8 @@ async fn mtproto_handshake_concurrent_flood_stability() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_replay_is_rejected_across_distinct_peers() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "0123456789abcdeffedcba9876543210";
|
||||
let handshake = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -360,8 +354,8 @@ async fn mtproto_replay_is_rejected_across_distinct_peers() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "89abcdef012345670123456789abcdef";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -405,27 +399,27 @@ async fn mtproto_blackhat_mutation_corpus_never_panics_and_stays_fail_closed() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn auth_probe_success_clears_throttled_peer_state() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let target_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 90));
|
||||
let now = Instant::now();
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(target_ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), target_ip, now);
|
||||
}
|
||||
assert!(auth_probe_is_throttled(target_ip, now));
|
||||
assert!(auth_probe_is_throttled_in(shared.as_ref(), target_ip, now));
|
||||
|
||||
auth_probe_record_success(target_ip);
|
||||
auth_probe_record_success_in(shared.as_ref(), target_ip);
|
||||
assert!(
|
||||
!auth_probe_is_throttled(target_ip, now + Duration::from_millis(1)),
|
||||
!auth_probe_is_throttled_in(shared.as_ref(), target_ip, now + Duration::from_millis(1)),
|
||||
"successful auth must clear per-peer throttle state"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||
let mut invalid = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -458,7 +452,7 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
|||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||
}
|
||||
|
||||
let tracked = AUTH_PROBE_STATE.get().map(|state| state.len()).unwrap_or(0);
|
||||
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||
assert!(
|
||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"probe map must remain bounded under invalid storm: {tracked}"
|
||||
|
|
@ -467,8 +461,8 @@ async fn mtproto_invalid_storm_over_cap_keeps_probe_map_hard_bounded() {
|
|||
|
||||
#[tokio::test]
|
||||
async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "f0e1d2c3b4a5968778695a4b3c2d1e0f";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -520,8 +514,8 @@ async fn mtproto_property_style_multi_bit_mutations_fail_closed_or_auth_only() {
|
|||
#[tokio::test]
|
||||
#[ignore = "heavy soak; run manually"]
|
||||
async fn mtproto_blackhat_20k_mutation_soak_never_panics() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
|
|||
|
|
@ -0,0 +1,93 @@
|
|||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn adversarial_large_state_offsets_escape_first_scan_window() {
|
||||
let shared = ProxySharedState::new();
|
||||
let base = Instant::now();
|
||||
let state_len = 65_536usize;
|
||||
let scan_limit = 1_024usize;
|
||||
|
||||
let mut saw_offset_outside_first_window = false;
|
||||
for i in 0..8_192u64 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
((i >> 16) & 0xff) as u8,
|
||||
((i >> 8) & 0xff) as u8,
|
||||
(i & 0xff) as u8,
|
||||
((i.wrapping_mul(131)) & 0xff) as u8,
|
||||
));
|
||||
let now = base + Duration::from_nanos(i);
|
||||
let start =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
if start >= scan_limit {
|
||||
saw_offset_outside_first_window = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
saw_offset_outside_first_window,
|
||||
"scan start offset must cover the full auth-probe state, not only the first scan window"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress_large_state_offsets_cover_many_scan_windows() {
|
||||
let shared = ProxySharedState::new();
|
||||
let base = Instant::now();
|
||||
let state_len = 65_536usize;
|
||||
let scan_limit = 1_024usize;
|
||||
|
||||
let mut covered_windows = HashSet::new();
|
||||
for i in 0..16_384u64 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
((i >> 16) & 0xff) as u8,
|
||||
((i >> 8) & 0xff) as u8,
|
||||
(i & 0xff) as u8,
|
||||
((i.wrapping_mul(17)) & 0xff) as u8,
|
||||
));
|
||||
let now = base + Duration::from_micros(i);
|
||||
let start =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
covered_windows.insert(start / scan_limit);
|
||||
}
|
||||
|
||||
assert!(
|
||||
covered_windows.len() >= 16,
|
||||
"eviction scan must not collapse to a tiny hot zone; covered windows={} out of {}",
|
||||
covered_windows.len(),
|
||||
state_len / scan_limit
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn light_fuzz_offset_always_stays_inside_state_len() {
|
||||
let shared = ProxySharedState::new();
|
||||
let mut seed = 0xC0FF_EE12_3456_789Au64;
|
||||
let base = Instant::now();
|
||||
|
||||
for _ in 0..8_192usize {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
(seed >> 24) as u8,
|
||||
(seed >> 16) as u8,
|
||||
(seed >> 8) as u8,
|
||||
seed as u8,
|
||||
));
|
||||
let state_len = ((seed >> 16) as usize % 200_000).saturating_add(1);
|
||||
let scan_limit = ((seed >> 40) as usize % 2_048).saturating_add(1);
|
||||
let now = base + Duration::from_nanos(seed & 0x0fff);
|
||||
let start =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
|
||||
assert!(
|
||||
start < state_len,
|
||||
"scan offset must stay inside state length"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -2,68 +2,62 @@ use super::*;
|
|||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
fn auth_probe_test_guard() -> std::sync::MutexGuard<'static, ()> {
|
||||
auth_probe_test_lock()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn positive_preauth_throttle_activates_after_failure_threshold() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 20));
|
||||
let now = Instant::now();
|
||||
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
|
||||
assert!(
|
||||
auth_probe_is_throttled(ip, now),
|
||||
auth_probe_is_throttled_in(shared.as_ref(), ip, now),
|
||||
"peer must be throttled once fail streak reaches threshold"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn negative_unrelated_peer_remains_unthrottled() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let attacker = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 12));
|
||||
let benign = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 13));
|
||||
let now = Instant::now();
|
||||
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(attacker, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), attacker, now);
|
||||
}
|
||||
|
||||
assert!(auth_probe_is_throttled(attacker, now));
|
||||
assert!(auth_probe_is_throttled_in(shared.as_ref(), attacker, now));
|
||||
assert!(
|
||||
!auth_probe_is_throttled(benign, now),
|
||||
!auth_probe_is_throttled_in(shared.as_ref(), benign, now),
|
||||
"throttle state must stay scoped to normalized peer key"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 41));
|
||||
let base = Instant::now();
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(ip, base);
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, base);
|
||||
}
|
||||
|
||||
let expired_at = base + Duration::from_secs(AUTH_PROBE_TRACK_RETENTION_SECS + 1);
|
||||
assert!(
|
||||
!auth_probe_is_throttled(ip, expired_at),
|
||||
!auth_probe_is_throttled_in(shared.as_ref(), ip, expired_at),
|
||||
"expired entries must not keep throttling peers"
|
||||
);
|
||||
|
||||
let state = auth_probe_state_map();
|
||||
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
assert!(
|
||||
state.get(&normalize_auth_probe_ip(ip)).is_none(),
|
||||
"expired lookup should prune stale state"
|
||||
|
|
@ -72,36 +66,40 @@ fn edge_expired_entry_is_pruned_and_no_longer_throttled() {
|
|||
|
||||
#[test]
|
||||
fn adversarial_saturation_grace_requires_extra_failures_before_preauth_throttle() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 18, 0, 7));
|
||||
let now = Instant::now();
|
||||
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure(ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
auth_probe_note_saturation(now);
|
||||
auth_probe_note_saturation_in(shared.as_ref(), now);
|
||||
|
||||
assert!(
|
||||
!auth_probe_should_apply_preauth_throttle(ip, now),
|
||||
!auth_probe_should_apply_preauth_throttle_in(shared.as_ref(), ip, now),
|
||||
"during global saturation, peer must receive configured grace window"
|
||||
);
|
||||
|
||||
for _ in 0..AUTH_PROBE_SATURATION_GRACE_FAILS {
|
||||
auth_probe_record_failure(ip, now + Duration::from_millis(1));
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now + Duration::from_millis(1));
|
||||
}
|
||||
|
||||
assert!(
|
||||
auth_probe_should_apply_preauth_throttle(ip, now + Duration::from_millis(1)),
|
||||
auth_probe_should_apply_preauth_throttle_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
now + Duration::from_millis(1)
|
||||
),
|
||||
"after grace failures are exhausted, preauth throttle must activate"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let now = Instant::now();
|
||||
for idx in 0..(AUTH_PROBE_TRACK_MAX_ENTRIES + 1024) {
|
||||
|
|
@ -111,10 +109,10 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
|||
((idx / 256) % 256) as u8,
|
||||
(idx % 256) as u8,
|
||||
));
|
||||
auth_probe_record_failure(ip, now);
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
|
||||
let tracked = auth_probe_state_map().len();
|
||||
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||
assert!(
|
||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"probe map must remain hard bounded under insertion storm"
|
||||
|
|
@ -123,8 +121,8 @@ fn integration_over_cap_insertion_keeps_probe_map_bounded() {
|
|||
|
||||
#[test]
|
||||
fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let mut seed = 0x4D53_5854_6F66_6175u64;
|
||||
let now = Instant::now();
|
||||
|
|
@ -140,10 +138,14 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
|||
(seed >> 8) as u8,
|
||||
seed as u8,
|
||||
));
|
||||
auth_probe_record_failure(ip, now + Duration::from_millis((seed & 0x3f) as u64));
|
||||
auth_probe_record_failure_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
now + Duration::from_millis((seed & 0x3f) as u64),
|
||||
);
|
||||
}
|
||||
|
||||
let state = auth_probe_state_map();
|
||||
let state = auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
assert!(state.len() <= AUTH_PROBE_TRACK_MAX_ENTRIES);
|
||||
for entry in state.iter() {
|
||||
assert!(entry.value().fail_streak > 0);
|
||||
|
|
@ -152,13 +154,14 @@ fn light_fuzz_randomized_failures_preserve_cap_and_nonzero_streaks() {
|
|||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let start = Instant::now();
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for worker in 0..8u8 {
|
||||
let shared = shared.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
for i in 0..4096u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
|
|
@ -167,7 +170,11 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
|||
((i >> 8) & 0xff) as u8,
|
||||
(i & 0xff) as u8,
|
||||
));
|
||||
auth_probe_record_failure(ip, start + Duration::from_millis((i % 4) as u64));
|
||||
auth_probe_record_failure_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
start + Duration::from_millis((i % 4) as u64),
|
||||
);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
|
@ -176,12 +183,12 @@ async fn stress_parallel_failure_flood_keeps_state_hard_capped() {
|
|||
task.await.expect("stress worker must not panic");
|
||||
}
|
||||
|
||||
let tracked = auth_probe_state_map().len();
|
||||
let tracked = auth_probe_state_for_testing_in_shared(shared.as_ref()).len();
|
||||
assert!(
|
||||
tracked <= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"parallel failure flood must not exceed cap"
|
||||
);
|
||||
|
||||
let probe = IpAddr::V4(Ipv4Addr::new(172, 3, 4, 5));
|
||||
let _ = auth_probe_is_throttled(probe, start + Duration::from_millis(2));
|
||||
let _ = auth_probe_is_throttled_in(shared.as_ref(), probe, start + Duration::from_millis(2));
|
||||
}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,95 @@
|
|||
use super::*;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn edge_zero_state_len_yields_zero_start_offset() {
|
||||
let shared = ProxySharedState::new();
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
||||
let now = Instant::now();
|
||||
|
||||
assert_eq!(
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 0, 16),
|
||||
0,
|
||||
"empty map must not produce non-zero scan offset"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adversarial_large_state_must_allow_start_offset_outside_scan_budget_window() {
|
||||
let shared = ProxySharedState::new();
|
||||
let base = Instant::now();
|
||||
let scan_limit = 16usize;
|
||||
let state_len = 65_536usize;
|
||||
|
||||
let mut saw_offset_outside_window = false;
|
||||
for i in 0..2048u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
203,
|
||||
((i >> 16) & 0xff) as u8,
|
||||
((i >> 8) & 0xff) as u8,
|
||||
(i & 0xff) as u8,
|
||||
));
|
||||
let now = base + Duration::from_micros(i as u64);
|
||||
let start =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
assert!(
|
||||
start < state_len,
|
||||
"start offset must stay within state length; start={start}, len={state_len}"
|
||||
);
|
||||
if start >= scan_limit {
|
||||
saw_offset_outside_window = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(
|
||||
saw_offset_outside_window,
|
||||
"large-state eviction must sample beyond the first scan window"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn positive_state_smaller_than_scan_limit_caps_to_state_len() {
|
||||
let shared = ProxySharedState::new();
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(192, 0, 2, 17));
|
||||
let now = Instant::now();
|
||||
|
||||
for state_len in 1..32usize {
|
||||
let start = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, 64);
|
||||
assert!(
|
||||
start < state_len,
|
||||
"start offset must never exceed state length when scan limit is larger"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn light_fuzz_scan_offset_budget_never_exceeds_effective_window() {
|
||||
let shared = ProxySharedState::new();
|
||||
let mut seed = 0x5A41_5356_4C32_3236u64;
|
||||
let base = Instant::now();
|
||||
|
||||
for _ in 0..4096 {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
(seed >> 24) as u8,
|
||||
(seed >> 16) as u8,
|
||||
(seed >> 8) as u8,
|
||||
seed as u8,
|
||||
));
|
||||
let state_len = ((seed >> 8) as usize % 131_072).saturating_add(1);
|
||||
let scan_limit = ((seed >> 32) as usize % 512).saturating_add(1);
|
||||
let now = base + Duration::from_nanos(seed & 0xffff);
|
||||
let start =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
|
||||
assert!(
|
||||
start < state_len,
|
||||
"scan offset must stay inside state length"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,127 @@
|
|||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
#[test]
|
||||
fn positive_same_ip_moving_time_yields_diverse_scan_offsets() {
|
||||
let shared = ProxySharedState::new();
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 77));
|
||||
let base = Instant::now();
|
||||
let mut uniq = HashSet::new();
|
||||
|
||||
for i in 0..512u64 {
|
||||
let now = base + Duration::from_nanos(i);
|
||||
let offset = auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, 65_536, 16);
|
||||
uniq.insert(offset);
|
||||
}
|
||||
|
||||
assert!(
|
||||
uniq.len() >= 256,
|
||||
"offset randomization collapsed unexpectedly for same-ip moving-time samples (uniq={})",
|
||||
uniq.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adversarial_many_ips_same_time_spreads_offsets_without_bias_collapse() {
|
||||
let shared = ProxySharedState::new();
|
||||
let now = Instant::now();
|
||||
let mut uniq = HashSet::new();
|
||||
|
||||
for i in 0..1024u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
(i >> 16) as u8,
|
||||
(i >> 8) as u8,
|
||||
i as u8,
|
||||
(255 - (i as u8)),
|
||||
));
|
||||
uniq.insert(auth_probe_scan_start_offset_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
now,
|
||||
65_536,
|
||||
16,
|
||||
));
|
||||
}
|
||||
|
||||
assert!(
|
||||
uniq.len() >= 512,
|
||||
"scan offset distribution collapsed unexpectedly across adversarial peer set (uniq={})",
|
||||
uniq.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
async fn stress_parallel_failure_churn_under_saturation_remains_capped_and_live() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let start = Instant::now();
|
||||
let mut workers = Vec::new();
|
||||
for worker in 0..8u8 {
|
||||
let shared = shared.clone();
|
||||
workers.push(tokio::spawn(async move {
|
||||
for i in 0..8192u32 {
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
10,
|
||||
worker,
|
||||
((i >> 8) & 0xff) as u8,
|
||||
(i & 0xff) as u8,
|
||||
));
|
||||
auth_probe_record_failure_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
start + Duration::from_micros((i % 128) as u64),
|
||||
);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for worker in workers {
|
||||
worker.await.expect("saturation worker must not panic");
|
||||
}
|
||||
|
||||
assert!(
|
||||
auth_probe_state_for_testing_in_shared(shared.as_ref()).len()
|
||||
<= AUTH_PROBE_TRACK_MAX_ENTRIES,
|
||||
"state must remain hard-capped under parallel saturation churn"
|
||||
);
|
||||
|
||||
let probe = IpAddr::V4(Ipv4Addr::new(10, 4, 1, 1));
|
||||
let _ = auth_probe_should_apply_preauth_throttle_in(
|
||||
shared.as_ref(),
|
||||
probe,
|
||||
start + Duration::from_millis(1),
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn light_fuzz_scan_offset_stays_within_window_for_randomized_inputs() {
|
||||
let shared = ProxySharedState::new();
|
||||
let mut seed = 0xA55A_1357_2468_9BDFu64;
|
||||
let base = Instant::now();
|
||||
|
||||
for _ in 0..8192 {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(
|
||||
(seed >> 24) as u8,
|
||||
(seed >> 16) as u8,
|
||||
(seed >> 8) as u8,
|
||||
seed as u8,
|
||||
));
|
||||
let state_len = ((seed >> 8) as usize % 200_000).saturating_add(1);
|
||||
let scan_limit = ((seed >> 40) as usize % 1024).saturating_add(1);
|
||||
let now = base + Duration::from_nanos(seed & 0x1fff);
|
||||
|
||||
let offset =
|
||||
auth_probe_scan_start_offset_in(shared.as_ref(), ip, now, state_len, scan_limit);
|
||||
assert!(
|
||||
offset < state_len,
|
||||
"scan offset must always remain inside state length"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,237 @@
|
|||
use super::*;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::stats::ReplayChecker;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::time::timeout;
|
||||
|
||||
fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.access.users.clear();
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.censorship.mask = true;
|
||||
cfg
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
|
||||
let session_id_len: usize = 32;
|
||||
let len = tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_baseline_probe_always_falls_back_to_masking() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let cfg = test_config_with_secret_hex("11111111111111111111111111111111");
|
||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "198.51.100.210:44321".parse().unwrap();
|
||||
|
||||
let probe = b"not-a-tls-clienthello";
|
||||
let res = handle_tls_handshake(
|
||||
probe,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&cfg,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_baseline_invalid_secret_triggers_fallback_not_error_response() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let good_secret = [0x22u8; 16];
|
||||
let bad_cfg = test_config_with_secret_hex("33333333333333333333333333333333");
|
||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "198.51.100.211:44322".parse().unwrap();
|
||||
|
||||
let handshake = make_valid_tls_handshake(&good_secret, 0);
|
||||
let res = handle_tls_handshake(
|
||||
&handshake,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&bad_cfg,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_baseline_auth_probe_streak_increments_per_ip() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let cfg = test_config_with_secret_hex("44444444444444444444444444444444");
|
||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
let peer: SocketAddr = "203.0.113.10:5555".parse().unwrap();
|
||||
let untouched_ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 11));
|
||||
let bad_probe = b"\x16\x03\x01\x00";
|
||||
|
||||
for expected in 1..=3 {
|
||||
let res = handle_tls_handshake_with_shared(
|
||||
bad_probe,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&cfg,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
shared.as_ref(),
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(res, HandshakeResult::BadClient { .. }));
|
||||
assert_eq!(
|
||||
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), peer.ip()),
|
||||
Some(expected)
|
||||
);
|
||||
assert_eq!(
|
||||
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), untouched_ip),
|
||||
None
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_baseline_saturation_fires_at_compile_time_threshold() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 33));
|
||||
let now = Instant::now();
|
||||
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS.saturating_sub(1) {
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
assert!(!auth_probe_is_throttled_in(shared.as_ref(), ip, now));
|
||||
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
assert!(auth_probe_is_throttled_in(shared.as_ref(), ip, now));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_baseline_repeated_probes_streak_monotonic() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(203, 0, 113, 42));
|
||||
let now = Instant::now();
|
||||
let mut prev = 0u32;
|
||||
|
||||
for _ in 0..100 {
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
let current =
|
||||
auth_probe_fail_streak_for_testing_in_shared(shared.as_ref(), ip).unwrap_or(0);
|
||||
assert!(current >= prev, "streak must be monotonic");
|
||||
prev = current;
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn handshake_baseline_throttled_ip_incurs_backoff_delay() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(198, 51, 100, 44));
|
||||
let now = Instant::now();
|
||||
|
||||
for _ in 0..AUTH_PROBE_BACKOFF_START_FAILS {
|
||||
auth_probe_record_failure_in(shared.as_ref(), ip, now);
|
||||
}
|
||||
|
||||
let delay = auth_probe_backoff(AUTH_PROBE_BACKOFF_START_FAILS);
|
||||
assert!(delay >= Duration::from_millis(AUTH_PROBE_BACKOFF_BASE_MS));
|
||||
|
||||
let before_expiry = now + delay.saturating_sub(Duration::from_millis(1));
|
||||
let after_expiry = now + delay + Duration::from_millis(1);
|
||||
|
||||
assert!(auth_probe_is_throttled_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
before_expiry
|
||||
));
|
||||
assert!(!auth_probe_is_throttled_in(
|
||||
shared.as_ref(),
|
||||
ip,
|
||||
after_expiry
|
||||
));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn handshake_baseline_malformed_probe_frames_fail_closed_to_masking() {
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let cfg = test_config_with_secret_hex("55555555555555555555555555555555");
|
||||
let replay_checker = ReplayChecker::new(64, Duration::from_secs(60));
|
||||
let rng = SecureRandom::new();
|
||||
let peer: SocketAddr = "198.51.100.212:44323".parse().unwrap();
|
||||
|
||||
let corpus: Vec<Vec<u8>> = vec![
|
||||
vec![0x16, 0x03, 0x01],
|
||||
vec![0x16, 0x03, 0x01, 0xFF, 0xFF],
|
||||
vec![0x00; 128],
|
||||
(0..64u8).collect(),
|
||||
];
|
||||
|
||||
for probe in corpus {
|
||||
let res = timeout(
|
||||
Duration::from_millis(250),
|
||||
handle_tls_handshake(
|
||||
&probe,
|
||||
tokio::io::empty(),
|
||||
tokio::io::sink(),
|
||||
peer,
|
||||
&cfg,
|
||||
&replay_checker,
|
||||
&rng,
|
||||
None,
|
||||
),
|
||||
)
|
||||
.await
|
||||
.expect("malformed probe handling must complete in bounded time");
|
||||
|
||||
assert!(
|
||||
matches!(
|
||||
res,
|
||||
HandshakeResult::BadClient { .. } | HandshakeResult::Error(_)
|
||||
),
|
||||
"malformed probe must fail closed"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
@ -67,16 +67,10 @@ fn test_config_with_secret_hex(secret_hex: &str) -> ProxyConfig {
|
|||
cfg
|
||||
}
|
||||
|
||||
fn auth_probe_test_guard() -> MutexGuard<'static, ()> {
|
||||
auth_probe_test_lock()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "11223344556677889900aabbccddeeff";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 2);
|
||||
|
|
@ -110,13 +104,13 @@ async fn mtproto_handshake_duplicate_digest_is_replayed_on_second_attempt() {
|
|||
.await;
|
||||
assert!(matches!(second, HandshakeResult::BadClient { .. }));
|
||||
|
||||
clear_auth_probe_state_for_testing();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "00112233445566778899aabbccddeeff";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 1);
|
||||
|
|
@ -178,13 +172,13 @@ async fn mtproto_handshake_fuzz_corpus_never_panics_and_stays_fail_closed() {
|
|||
);
|
||||
}
|
||||
|
||||
clear_auth_probe_state_for_testing();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_rejected() {
|
||||
let _guard = auth_probe_test_guard();
|
||||
clear_auth_probe_state_for_testing();
|
||||
let shared = ProxySharedState::new();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
|
||||
let secret_hex = "99887766554433221100ffeeddccbbaa";
|
||||
let base = make_valid_mtproto_handshake(secret_hex, ProtoTag::Secure, 4);
|
||||
|
|
@ -274,5 +268,5 @@ async fn mtproto_handshake_mixed_corpus_never_panics_and_exact_duplicates_are_re
|
|||
);
|
||||
}
|
||||
|
||||
clear_auth_probe_state_for_testing();
|
||||
clear_auth_probe_state_for_testing_in_shared(shared.as_ref());
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue