mirror of
https://github.com/telemt/telemt.git
synced 2026-04-15 01:24:09 +03:00
Compare commits
276 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
042d4fd612 | ||
|
|
bbc69f945e | ||
|
|
03c9a2588f | ||
|
|
9de8b2f0bf | ||
|
|
76eb8634a4 | ||
|
|
4e5b67bae8 | ||
|
|
bb2f3b24ac | ||
|
|
73f218b62a | ||
|
|
9cbc625b9b | ||
|
|
13ff3af1db | ||
|
|
d3f32b5568 | ||
|
|
77f717e3d1 | ||
|
|
db3e246390 | ||
|
|
388e14d01f | ||
|
|
b74ba38d40 | ||
|
|
269fce839f | ||
|
|
5a4072c964 | ||
|
|
2d3c2807ab | ||
|
|
50ae16ddf7 | ||
|
|
de5c26b7d7 | ||
|
|
a95678988a | ||
|
|
b17482ede3 | ||
|
|
a059de9191 | ||
|
|
e7e763888b | ||
|
|
c0a3e43aa8 | ||
|
|
4c32370b25 | ||
|
|
a6c298b633 | ||
|
|
e7a1d26e6e | ||
|
|
b91c6cb339 | ||
|
|
e676633dcd | ||
|
|
c4e7f54cbe | ||
|
|
f85205d48d | ||
|
|
d767ec02ee | ||
|
|
51835c33f2 | ||
|
|
88a4c652b6 | ||
|
|
ea2d964502 | ||
|
|
bd7218c39c | ||
|
|
3055637571 | ||
|
|
19b84b9d73 | ||
|
|
165a1ede57 | ||
|
|
6ead8b1922 | ||
|
|
63aa1038c0 | ||
|
|
4473826303 | ||
|
|
d7bbb376c9 | ||
|
|
7a8f946029 | ||
|
|
f2e6dc1774 | ||
|
|
54d65dd124 | ||
|
|
24594e648e | ||
|
|
e8b38ea860 | ||
|
|
b14c2b0a9b | ||
|
|
c1ee43fbac | ||
|
|
c8632de5b6 | ||
|
|
b930ea1ec5 | ||
|
|
3b86a883b9 | ||
|
|
5933b5e821 | ||
|
|
8188fedf6a | ||
|
|
f3598cf309 | ||
|
|
f2335c211c | ||
|
|
246ca11b88 | ||
|
|
bb355e916f | ||
|
|
777b15b1da | ||
|
|
8814854ae4 | ||
|
|
44c65f9c60 | ||
|
|
1260217be9 | ||
|
|
ebd37932c5 | ||
|
|
43d7e6e991 | ||
|
|
0eca535955 | ||
|
|
3abde52de8 | ||
|
|
801f670827 | ||
|
|
99ba2f7bbc | ||
|
|
1689b8a5dc | ||
|
|
babd902d95 | ||
|
|
e14dd07220 | ||
|
|
d93a4fbd53 | ||
|
|
2798039ab8 | ||
|
|
9dce748679 | ||
|
|
79093679ab | ||
|
|
35a8f5b2e5 | ||
|
|
456c433875 | ||
|
|
8f1ffe8c25 | ||
|
|
342b0119dd | ||
|
|
2605929b93 | ||
|
|
36814b6355 | ||
|
|
269ba537ad | ||
|
|
5c0eb6dbe8 | ||
|
|
a78c3e3ebd | ||
|
|
a4b70405b8 | ||
|
|
3afc3e1775 | ||
|
|
512bee6a8d | ||
|
|
66867d3f5b | ||
|
|
db36945293 | ||
|
|
5c5fdcb124 | ||
|
|
0ded366199 | ||
|
|
84a34cea3d | ||
|
|
7dc3c3666d | ||
|
|
dd07fa9453 | ||
|
|
bb1a372ac4 | ||
|
|
6661401a34 | ||
|
|
cd65fb432b | ||
|
|
caf0717789 | ||
|
|
4a610d83a3 | ||
|
|
aba4205dcc | ||
|
|
ef9b7b1492 | ||
|
|
d112f15b90 | ||
|
|
b55b264345 | ||
|
|
f61d25ebe0 | ||
|
|
ed4d1167dd | ||
|
|
dc6948cf39 | ||
|
|
4f11aa0772 | ||
|
|
6ea8ba25c4 | ||
|
|
e40361b171 | ||
|
|
1c6c73beda | ||
|
|
3f3bf5bbd2 | ||
|
|
ec793f3065 | ||
|
|
e83d366518 | ||
|
|
5a4209fe00 | ||
|
|
e7daf51193 | ||
|
|
754e4db8a9 | ||
|
|
7416829e89 | ||
|
|
c07b600acb | ||
|
|
7b44496706 | ||
|
|
67dc1e8d18 | ||
|
|
ad8ada33c9 | ||
|
|
bbb201b433 | ||
|
|
8d1faece60 | ||
|
|
a603505f90 | ||
|
|
f8c42c324f | ||
|
|
dd8ef4d996 | ||
|
|
e6ad9e4c7f | ||
|
|
2a01ca2d6f | ||
|
|
dc3363aa0d | ||
|
|
f655924323 | ||
|
|
05c066c676 | ||
|
|
1e000c2e7e | ||
|
|
fa17e719f6 | ||
|
|
ae3ced8e7c | ||
|
|
3279f6d46a | ||
|
|
6f9aef7bb4 | ||
|
|
049db1196f | ||
|
|
c8ffc23cf7 | ||
|
|
f230f2ce0e | ||
|
|
bdac6e3480 | ||
|
|
a4e9746dc7 | ||
|
|
c47495d671 | ||
|
|
44376b5652 | ||
|
|
5ae3a90d5e | ||
|
|
c7cf37898b | ||
|
|
901a0b7c23 | ||
|
|
03891db0c9 | ||
|
|
89e5668c7e | ||
|
|
1935455256 | ||
|
|
20e205189c | ||
|
|
1544e3fcff | ||
|
|
85295a9961 | ||
|
|
a54f807a45 | ||
|
|
31f6258c47 | ||
|
|
062464175e | ||
|
|
a5983c17d3 | ||
|
|
def42f0baa | ||
|
|
30ba41eb47 | ||
|
|
42f946f29e | ||
|
|
c53d7951b5 | ||
|
|
f36e264093 | ||
|
|
a3bdf64353 | ||
|
|
2aa7ea5137 | ||
|
|
462c927da6 | ||
|
|
cb87b2eac3 | ||
|
|
3739f38440 | ||
|
|
97d4a1c5c8 | ||
|
|
c2443e6f1a | ||
|
|
a7cffb547e | ||
|
|
f0c37f233e | ||
|
|
8e96039a1c | ||
|
|
36b360dfb6 | ||
|
|
60953bcc2c | ||
|
|
2c06288b40 | ||
|
|
0284b9f9e3 | ||
|
|
4e3f42dce3 | ||
|
|
5dd0c47f14 | ||
|
|
50a827e7fd | ||
|
|
d81140ccec | ||
|
|
4739083f57 | ||
|
|
c540a6657f | ||
|
|
4808a30185 | ||
|
|
1357f3cc4c | ||
|
|
d9aa6f4956 | ||
|
|
37a31c13cb | ||
|
|
35bca7d4cc | ||
|
|
f39d317d93 | ||
|
|
d4d93aabf5 | ||
|
|
c9271d9083 | ||
|
|
4f55d08c51 | ||
|
|
9c9ba4becd | ||
|
|
93caab1aec | ||
|
|
0c6bb3a641 | ||
|
|
b2e15327fe | ||
|
|
2e8be87ccf | ||
|
|
d78360982c | ||
|
|
bd0cefdb12 | ||
|
|
e2ed1eb286 | ||
|
|
822bcbf7a5 | ||
|
|
b25ec97a43 | ||
|
|
a74def9561 | ||
|
|
8821e38013 | ||
|
|
a1caebbe6f | ||
|
|
e0d821c6b6 | ||
|
|
205fc88718 | ||
|
|
95c1306166 | ||
|
|
e1ef192c10 | ||
|
|
ee4d15fed6 | ||
|
|
0040e9b6da | ||
|
|
2c10560795 | ||
|
|
5eff38eb82 | ||
|
|
b6206a6dfe | ||
|
|
e4a50f9286 | ||
|
|
213ce4555a | ||
|
|
5a16e68487 | ||
|
|
6ffbc51fb0 | ||
|
|
4d8a5ca174 | ||
|
|
0ae67db492 | ||
|
|
c4f77814ee | ||
|
|
dcab19a64f | ||
|
|
f10ca192fa | ||
|
|
92972ab6bf | ||
|
|
c351e08c43 | ||
|
|
e29855c8c6 | ||
|
|
3634fbd7e8 | ||
|
|
bb29797bfb | ||
|
|
3d5af3d248 | ||
|
|
2d7df3da6c | ||
|
|
4abc0e5134 | ||
|
|
4028579068 | ||
|
|
58f26ba8a7 | ||
|
|
2be3e4ab7f | ||
|
|
3d43ff6e57 | ||
|
|
1294da586f | ||
|
|
ac0698b772 | ||
|
|
2bd9036908 | ||
|
|
dda31b3d2f | ||
|
|
7d5e1cb9e8 | ||
|
|
56e38e8d00 | ||
|
|
4677b43c6e | ||
|
|
4ddbb97908 | ||
|
|
8b0b47145d | ||
|
|
f7e3ddcdb6 | ||
|
|
af5cff3304 | ||
|
|
cb9144bdb3 | ||
|
|
fa82634faf | ||
|
|
37b1a0289e | ||
|
|
9be33bcf93 | ||
|
|
bc9f691284 | ||
|
|
58e5605f39 | ||
|
|
75a654c766 | ||
|
|
2b058f7df7 | ||
|
|
01af2999bb | ||
|
|
c12d27f08a | ||
|
|
5e3408e80b | ||
|
|
052110618d | ||
|
|
47b8f0f656 | ||
|
|
67b2e25e39 | ||
|
|
9a08b541ed | ||
|
|
04379b4374 | ||
|
|
5cfb05b1f4 | ||
|
|
aa68ce531e | ||
|
|
d4ce304a37 | ||
|
|
8a579d9bda | ||
|
|
70cc6f22aa | ||
|
|
1674ba36b2 | ||
|
|
0c1a5c24d5 | ||
|
|
5df08300e2 | ||
|
|
543a87e166 | ||
|
|
519c8d276b | ||
|
|
4dc733d3e3 | ||
|
|
4506f38bfb | ||
|
|
5073248911 | ||
|
|
ae72e6f356 |
15
.cargo/deny.toml
Normal file
15
.cargo/deny.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[bans]
|
||||
multiple-versions = "deny"
|
||||
wildcards = "allow"
|
||||
highlight = "all"
|
||||
|
||||
# Explicitly flag the weak cryptography so the agent is forced to justify its existence
|
||||
[[bans.skip]]
|
||||
name = "md-5"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for legacy checksums, never for security."
|
||||
|
||||
[[bans.skip]]
|
||||
name = "sha1"
|
||||
version = "*"
|
||||
reason = "MUST VERIFY: Only allowed for backwards compatibility."
|
||||
8
.dockerignore
Normal file
8
.dockerignore
Normal file
@@ -0,0 +1,8 @@
|
||||
.git
|
||||
.github
|
||||
target
|
||||
.kilocode
|
||||
cache
|
||||
tlsfront
|
||||
*.tar
|
||||
*.tar.gz
|
||||
242
.github/workflows/release.yml
vendored
242
.github/workflows/release.yml
vendored
@@ -6,36 +6,34 @@ on:
|
||||
- '[0-9]+.[0-9]+.[0-9]+'
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: release-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
BINARY_NAME: telemt
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build ${{ matrix.target }}
|
||||
# ==========================
|
||||
# GNU / glibc
|
||||
# ==========================
|
||||
build-gnu:
|
||||
name: GNU ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-gnu
|
||||
asset: telemt-x86_64-linux-gnu
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-gnu
|
||||
- target: x86_64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
artifact_name: telemt
|
||||
asset_name: telemt-aarch64-linux-musl
|
||||
asset: telemt-aarch64-linux-gnu
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
@@ -43,12 +41,20 @@ jobs:
|
||||
- uses: dtolnay/rust-toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
targets: ${{ matrix.target }}
|
||||
targets: |
|
||||
x86_64-unknown-linux-gnu
|
||||
aarch64-unknown-linux-gnu
|
||||
|
||||
- name: Install cross-compilation tools
|
||||
- name: Install deps
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y gcc-aarch64-linux-gnu
|
||||
sudo apt-get install -y \
|
||||
build-essential \
|
||||
clang \
|
||||
lld \
|
||||
pkg-config \
|
||||
gcc-aarch64-linux-gnu \
|
||||
g++-aarch64-linux-gnu
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
@@ -56,41 +62,173 @@ jobs:
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||
key: gnu-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Install cross
|
||||
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
- name: Build Release
|
||||
env:
|
||||
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||
run: cross build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package binary
|
||||
- name: Build
|
||||
run: |
|
||||
cd target/${{ matrix.target }}/release
|
||||
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
|
||||
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
|
||||
export CC=aarch64-linux-gnu-gcc
|
||||
export CXX=aarch64-linux-gnu-g++
|
||||
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
export CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"
|
||||
else
|
||||
export CC=clang
|
||||
export CXX=clang++
|
||||
export CC_x86_64_unknown_linux_gnu=clang
|
||||
export CXX_x86_64_unknown_linux_gnu=clang++
|
||||
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset_name }}
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
|
||||
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
|
||||
build-docker-image:
|
||||
needs: build
|
||||
# ==========================
|
||||
# MUSL
|
||||
# ==========================
|
||||
build-musl:
|
||||
name: MUSL ${{ matrix.target }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
container:
|
||||
image: rust:slim-bookworm
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
asset: telemt-x86_64-linux-musl
|
||||
- target: aarch64-unknown-linux-musl
|
||||
asset: telemt-aarch64-linux-musl
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- name: Install deps
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y \
|
||||
musl-tools \
|
||||
pkg-config \
|
||||
curl
|
||||
|
||||
- uses: actions/cache@v4
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
with:
|
||||
path: ~/.musl-aarch64
|
||||
key: musl-toolchain-aarch64-v1
|
||||
|
||||
- name: Install aarch64 musl toolchain
|
||||
if: matrix.target == 'aarch64-unknown-linux-musl'
|
||||
run: |
|
||||
set -e
|
||||
|
||||
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
|
||||
ARCHIVE="aarch64-linux-musl-cross.tgz"
|
||||
URL="https://github.com/telemt/telemt/releases/download/toolchains/$ARCHIVE"
|
||||
|
||||
if [ -x "$TOOLCHAIN_DIR/bin/aarch64-linux-musl-gcc" ]; then
|
||||
echo "✅ MUSL toolchain already installed"
|
||||
else
|
||||
echo "⬇️ Downloading musl toolchain from Telemt GitHub Releases..."
|
||||
|
||||
curl -fL \
|
||||
--retry 5 \
|
||||
--retry-delay 3 \
|
||||
--connect-timeout 10 \
|
||||
--max-time 120 \
|
||||
-o "$ARCHIVE" "$URL"
|
||||
|
||||
mkdir -p "$TOOLCHAIN_DIR"
|
||||
tar -xzf "$ARCHIVE" --strip-components=1 -C "$TOOLCHAIN_DIR"
|
||||
fi
|
||||
|
||||
echo "$TOOLCHAIN_DIR/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Add rust target
|
||||
run: rustup target add ${{ matrix.target }}
|
||||
|
||||
- uses: actions/cache@v4
|
||||
with:
|
||||
path: |
|
||||
/usr/local/cargo/registry
|
||||
/usr/local/cargo/git
|
||||
target
|
||||
key: musl-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- name: Build
|
||||
run: |
|
||||
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
|
||||
export CC=aarch64-linux-musl-gcc
|
||||
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc"
|
||||
else
|
||||
export CC=musl-gcc
|
||||
export CC_x86_64_unknown_linux_musl=musl-gcc
|
||||
export RUSTFLAGS="-C target-feature=+crt-static"
|
||||
fi
|
||||
|
||||
cargo build --release --target ${{ matrix.target }}
|
||||
|
||||
- name: Package
|
||||
run: |
|
||||
mkdir -p dist
|
||||
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
|
||||
|
||||
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
|
||||
cd dist
|
||||
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
|
||||
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ matrix.asset }}
|
||||
path: |
|
||||
dist/${{ matrix.asset }}.tar.gz
|
||||
dist/${{ matrix.asset }}.sha256
|
||||
|
||||
# ==========================
|
||||
# Docker
|
||||
# ==========================
|
||||
docker:
|
||||
name: Docker
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
continue-on-error: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Extract binaries
|
||||
run: |
|
||||
mkdir dist
|
||||
find artifacts -name "*.tar.gz" -exec tar -xzf {} -C dist \;
|
||||
|
||||
cp dist/telemt-x86_64-unknown-linux-musl dist/telemt || true
|
||||
|
||||
- uses: docker/setup-qemu-action@v3
|
||||
- uses: docker/setup-buildx-action@v3
|
||||
|
||||
@@ -105,35 +243,43 @@ jobs:
|
||||
id: vars
|
||||
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build and push
|
||||
- name: Build & Push
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
platforms: linux/amd64,linux/arm64
|
||||
tags: |
|
||||
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
|
||||
ghcr.io/${{ github.repository }}:latest
|
||||
build-args: |
|
||||
BINARY=dist/telemt
|
||||
|
||||
# ==========================
|
||||
# Release
|
||||
# ==========================
|
||||
release:
|
||||
name: Create Release
|
||||
needs: build
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-gnu, build-musl]
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: artifacts
|
||||
|
||||
- name: Flatten artifacts
|
||||
run: |
|
||||
mkdir dist
|
||||
find artifacts -type f -exec cp {} dist/ \;
|
||||
|
||||
- name: Create Release
|
||||
uses: softprops/action-gh-release@v2
|
||||
with:
|
||||
files: artifacts/**/*
|
||||
files: dist/*
|
||||
generate_release_notes: true
|
||||
draft: false
|
||||
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}
|
||||
|
||||
12
.github/workflows/rust.yml
vendored
12
.github/workflows/rust.yml
vendored
@@ -45,6 +45,18 @@ jobs:
|
||||
- name: Run tests
|
||||
run: cargo test --verbose
|
||||
|
||||
- name: Stress quota-lock suites (PR only)
|
||||
if: github.event_name == 'pull_request'
|
||||
env:
|
||||
RUST_TEST_THREADS: 16
|
||||
run: |
|
||||
set -euo pipefail
|
||||
for i in $(seq 1 12); do
|
||||
echo "[quota-lock-stress] iteration ${i}/12"
|
||||
cargo test quota_lock_ --bin telemt -- --nocapture --test-threads 16
|
||||
cargo test relay_quota_wake --bin telemt -- --nocapture --test-threads 16
|
||||
done
|
||||
|
||||
# clippy dont fail on warnings because of active development of telemt
|
||||
# and many warnings
|
||||
- name: Run clippy
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ target
|
||||
#.idea/
|
||||
|
||||
proxy-secret
|
||||
coverage-html/
|
||||
22
AGENTS.md
22
AGENTS.md
@@ -5,6 +5,22 @@ Your responses are precise, minimal, and architecturally sound. You are working
|
||||
|
||||
---
|
||||
|
||||
### Context: The Telemt Project
|
||||
|
||||
You are working on **Telemt**, a high-performance, production-grade Telegram MTProxy implementation written in Rust. It is explicitly designed to operate in highly hostile network environments and evade advanced network censorship.
|
||||
|
||||
**Adversarial Threat Model:**
|
||||
The proxy operates under constant surveillance by DPI (Deep Packet Inspection) systems and active scanners (state firewalls, mobile operator fraud controls). These entities actively probe IPs, analyze protocol handshakes, and look for known proxy signatures to block or throttle traffic.
|
||||
|
||||
**Core Architectural Pillars:**
|
||||
1. **TLS-Fronting (TLS-F) & TCP-Splitting (TCP-S):** To the outside world, Telemt looks like a standard TLS server. If a client presents a valid MTProxy key, the connection is handled internally. If a censor's scanner, web browser, or unauthorized crawler connects, Telemt seamlessly splices the TCP connection (L4) to a real, legitimate HTTPS fallback server (e.g., Nginx) without modifying the `ClientHello` or terminating the TLS handshake.
|
||||
2. **Middle-End (ME) Orchestration:** A highly concurrent, generation-based pool managing upstream connections to Telegram Datacenters (DCs). It utilizes an **Adaptive Floor** (dynamically scaling writer connections based on traffic), **Hardswaps** (zero-downtime pool reconfiguration), and **STUN/NAT** reflection mechanisms.
|
||||
3. **Strict KDF Routing:** Cryptographic Key Derivation Functions (KDF) in this protocol strictly rely on the exact pairing of Source IP/Port and Destination IP/Port. Deviations or missing port logic will silently break the MTProto handshake.
|
||||
4. **Data Plane vs. Control Plane Isolation:** The Data Plane (readers, writers, payload relay, TCP splicing) must remain strictly non-blocking, zero-allocation in hot paths, and highly resilient to network backpressure. The Control Plane (API, metrics, pool generation swaps, config reloads) orchestrates the state asynchronously without stalling the Data Plane.
|
||||
|
||||
Any modification you make must preserve Telemt's invisibility to censors, its strict memory-safety invariants, and its hot-path throughput.
|
||||
|
||||
|
||||
### 0. Priority Resolution — Scope Control
|
||||
|
||||
This section resolves conflicts between code quality enforcement and scope limitation.
|
||||
@@ -374,6 +390,12 @@ you MUST explain why existing invariants remain valid.
|
||||
- Do not modify existing tests unless the task explicitly requires it.
|
||||
- Do not weaken assertions.
|
||||
- Preserve determinism in testable components.
|
||||
- Bug-first forces the discipline of proving you understand a bug before you fix it. Tests written after a fix almost always pass trivially and catch nothing new.
|
||||
- Invariants over scenarios is the core shift. The route_mode table alone would have caught both BUG-1 and BUG-2 before they were written — "snapshot equals watch state after any transition burst" is a two-line property test that fails immediately on the current diverged-atomics code.
|
||||
- Differential/model catches logic drift over time.
|
||||
- Scheduler pressure is specifically aimed at the concurrent state bugs that keep reappearing. A single-threaded happy-path test of set_mode will never find subtle bugs; 10,000 concurrent calls will find it on the first run.
|
||||
- Mutation gate answers your original complaint directly. It measures test power. If you can remove a bounds check and nothing breaks, the suite isn't covering that branch yet — it just says so explicitly.
|
||||
- Dead parameter is a code smell rule.
|
||||
|
||||
### 15. Security Constraints
|
||||
|
||||
|
||||
208
CODE_OF_CONDUCT.md
Normal file
208
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,208 @@
|
||||
# Code of Conduct
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
Telemt exists to solve technical problems.
|
||||
|
||||
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
|
||||
|
||||
It is a place for building, testing, reasoning, documenting, and improving systems.
|
||||
|
||||
Discussions that advance this work are in scope. Discussions that divert it are not.
|
||||
|
||||
Technology has consequences. Responsibility is inherent.
|
||||
|
||||
> **Zweck bestimmt die Form.**
|
||||
|
||||
> Purpose defines form.
|
||||
|
||||
---
|
||||
|
||||
## 2. Principles
|
||||
|
||||
* **Technical over emotional**
|
||||
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
|
||||
|
||||
* **Clarity over noise**
|
||||
Communication is structured, concise, and relevant.
|
||||
|
||||
* **Openness with standards**
|
||||
Participation is open. The work remains disciplined.
|
||||
|
||||
* **Independence of judgment**
|
||||
Claims are evaluated on technical merit, not affiliation or posture.
|
||||
|
||||
* **Responsibility over capability**
|
||||
Capability does not justify careless use.
|
||||
|
||||
* **Cooperation over friction**
|
||||
Progress depends on coordination, mutual support, and honest review.
|
||||
|
||||
* **Good intent, rigorous method**
|
||||
Assume good intent, but require rigor.
|
||||
|
||||
> **Aussagen gelten nach ihrer Begründung.**
|
||||
|
||||
> Claims are weighed by evidence.
|
||||
|
||||
---
|
||||
|
||||
## 3. Expected Behavior
|
||||
|
||||
Participants are expected to:
|
||||
|
||||
* Communicate directly and respectfully
|
||||
* Support claims with evidence
|
||||
* Stay within technical scope
|
||||
* Accept critique and provide it constructively
|
||||
* Reduce noise, duplication, and ambiguity
|
||||
* Help others reach correct and reproducible outcomes
|
||||
* Act in a way that improves the system as a whole
|
||||
|
||||
Precision is learned.
|
||||
|
||||
New contributors are welcome. They are expected to grow into these standards. Existing contributors are expected to make that growth possible.
|
||||
|
||||
> **Wer behauptet, belegt.**
|
||||
|
||||
> Whoever claims, proves.
|
||||
|
||||
---
|
||||
|
||||
## 4. Unacceptable Behavior
|
||||
|
||||
The following is not allowed:
|
||||
|
||||
* Personal attacks, insults, harassment, or intimidation
|
||||
* Repeatedly derailing discussion away from Telemt’s purpose
|
||||
* Spam, flooding, or repeated low-quality input
|
||||
* Misinformation presented as fact
|
||||
* Attempts to degrade, destabilize, or exhaust Telemt or its participants
|
||||
* Use of Telemt or its spaces to enable harm
|
||||
|
||||
Telemt is not a venue for disputes that displace technical work.
|
||||
Such discussions may be closed, removed, or redirected.
|
||||
|
||||
> **Störung ist kein Beitrag.**
|
||||
|
||||
> Disruption is not contribution.
|
||||
|
||||
---
|
||||
|
||||
## 5. Security and Misuse
|
||||
|
||||
Telemt is intended for responsible use.
|
||||
|
||||
* Do not use it to plan, coordinate, or execute harm
|
||||
* Do not publish vulnerabilities without responsible disclosure
|
||||
* Report security issues privately where possible
|
||||
|
||||
Security is both technical and behavioral.
|
||||
|
||||
> **Verantwortung endet nicht am Code.**
|
||||
|
||||
> Responsibility does not end at the code.
|
||||
|
||||
---
|
||||
|
||||
## 6. Openness
|
||||
|
||||
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
|
||||
|
||||
Standards are public, legible, and applied to the work itself.
|
||||
|
||||
Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
|
||||
|
||||
Gatekeeping by obscurity, status signaling, or hostility is not.
|
||||
|
||||
---
|
||||
|
||||
## 7. Scope
|
||||
|
||||
This Code of Conduct applies to all official spaces:
|
||||
|
||||
* Source repositories (issues, pull requests, discussions)
|
||||
* Documentation
|
||||
* Communication channels associated with Telemt
|
||||
|
||||
---
|
||||
|
||||
## 8. Maintainer Stewardship
|
||||
|
||||
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
|
||||
|
||||
This responsibility is stewardship: preserving continuity, protecting signal, maintaining standards, and keeping Telemt workable for others.
|
||||
|
||||
Judgment should be exercised with restraint, consistency, and institutional responsibility.
|
||||
|
||||
Not every decision requires extended debate.
|
||||
Not every intervention requires public explanation.
|
||||
|
||||
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Funktion.**
|
||||
|
||||
> Order is the precondition of function.
|
||||
|
||||
---
|
||||
|
||||
## 9. Enforcement
|
||||
|
||||
Maintainers may act to preserve the integrity of Telemt, including by:
|
||||
|
||||
* Removing content
|
||||
* Locking discussions
|
||||
* Rejecting contributions
|
||||
* Restricting or banning participants
|
||||
|
||||
Actions are taken to maintain function, continuity, and signal quality.
|
||||
|
||||
Where possible, correction is preferred to exclusion.
|
||||
|
||||
Where necessary, exclusion is preferred to decay.
|
||||
|
||||
---
|
||||
|
||||
## 10. Final
|
||||
|
||||
Telemt is built on discipline, structure, and shared intent.
|
||||
|
||||
Signal over noise.
|
||||
Facts over opinion.
|
||||
Systems over rhetoric.
|
||||
|
||||
Work is collective.
|
||||
Outcomes are shared.
|
||||
Responsibility is distributed.
|
||||
|
||||
Precision is learned.
|
||||
Rigor is expected.
|
||||
Help is part of the work.
|
||||
|
||||
> **Ordnung ist Voraussetzung der Freiheit.**
|
||||
|
||||
If you contribute — contribute with care.
|
||||
If you speak — speak with substance.
|
||||
If you engage — engage constructively.
|
||||
|
||||
---
|
||||
|
||||
## 11. After All
|
||||
|
||||
Systems outlive intentions.
|
||||
|
||||
What is built will be used.
|
||||
What is released will propagate.
|
||||
What is maintained will define the future state.
|
||||
|
||||
There is no neutral infrastructure, only infrastructure shaped well or poorly.
|
||||
|
||||
> **Jedes System trägt Verantwortung.**
|
||||
|
||||
> Every system carries responsibility.
|
||||
|
||||
Stability requires discipline.
|
||||
Freedom requires structure.
|
||||
Trust requires honesty.
|
||||
|
||||
In the end, the system reflects its contributors.
|
||||
1515
Cargo.lock
generated
1515
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
29
Cargo.toml
29
Cargo.toml
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "telemt"
|
||||
version = "3.3.17"
|
||||
version = "3.3.29"
|
||||
edition = "2024"
|
||||
|
||||
[dependencies]
|
||||
@@ -22,16 +22,19 @@ hmac = "0.12"
|
||||
crc32fast = "1.4"
|
||||
crc32c = "0.6"
|
||||
zeroize = { version = "1.8", features = ["derive"] }
|
||||
subtle = "2.6"
|
||||
static_assertions = "1.1"
|
||||
|
||||
# Network
|
||||
socket2 = { version = "0.5", features = ["all"] }
|
||||
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||
socket2 = { version = "0.6", features = ["all"] }
|
||||
nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
|
||||
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
|
||||
|
||||
# Serialization
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
toml = "0.8"
|
||||
x509-parser = "0.15"
|
||||
toml = "1.0"
|
||||
x509-parser = "0.18"
|
||||
|
||||
# Utils
|
||||
bytes = "1.9"
|
||||
@@ -39,9 +42,10 @@ thiserror = "2.0"
|
||||
tracing = "0.1"
|
||||
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
|
||||
parking_lot = "0.12"
|
||||
dashmap = "5.5"
|
||||
dashmap = "6.1"
|
||||
arc-swap = "1.7"
|
||||
lru = "0.16"
|
||||
rand = "0.9"
|
||||
rand = "0.10"
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
hex = "0.4"
|
||||
base64 = "0.22"
|
||||
@@ -50,23 +54,24 @@ regex = "1.11"
|
||||
crossbeam-queue = "0.3"
|
||||
num-bigint = "0.4"
|
||||
num-traits = "0.2"
|
||||
x25519-dalek = "2"
|
||||
anyhow = "1.0"
|
||||
|
||||
# HTTP
|
||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||
notify = { version = "6", features = ["macos_fsevent"] }
|
||||
ipnetwork = "0.20"
|
||||
reqwest = { version = "0.13", features = ["rustls"], default-features = false }
|
||||
notify = "8.2"
|
||||
ipnetwork = { version = "0.21", features = ["serde"] }
|
||||
hyper = { version = "1", features = ["server", "http1"] }
|
||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||
http-body-util = "0.1"
|
||||
httpdate = "1.0"
|
||||
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||
webpki-roots = "0.26"
|
||||
webpki-roots = "1.0"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio-test = "0.4"
|
||||
criterion = "0.5"
|
||||
criterion = "0.8"
|
||||
proptest = "1.4"
|
||||
futures = "0.3"
|
||||
|
||||
|
||||
66
Dockerfile
66
Dockerfile
@@ -1,3 +1,5 @@
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# ==========================
|
||||
# Stage 1: Build
|
||||
# ==========================
|
||||
@@ -5,39 +7,91 @@ FROM rust:1.88-slim-bookworm AS builder
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
pkg-config \
|
||||
ca-certificates \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /build
|
||||
|
||||
# Depcache
|
||||
COPY Cargo.toml Cargo.lock* ./
|
||||
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
|
||||
cargo build --release 2>/dev/null || true && \
|
||||
rm -rf src
|
||||
|
||||
# Build
|
||||
COPY . .
|
||||
RUN cargo build --release && strip target/release/telemt
|
||||
|
||||
# ==========================
|
||||
# Stage 2: Runtime
|
||||
# Stage 2: Compress (strip + UPX)
|
||||
# ==========================
|
||||
FROM debian:bookworm-slim
|
||||
FROM debian:12-slim AS minimal
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
upx \
|
||||
binutils \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /telemt
|
||||
|
||||
RUN strip /telemt || true
|
||||
RUN upx --best --lzma /telemt || true
|
||||
|
||||
# ==========================
|
||||
# Stage 3: Debug base
|
||||
# ==========================
|
||||
FROM debian:12-slim AS debug-base
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
curl \
|
||||
iproute2 \
|
||||
busybox \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN useradd -r -s /usr/sbin/nologin telemt
|
||||
# ==========================
|
||||
# Stage 4: Debug image
|
||||
# ==========================
|
||||
FROM debug-base AS debug
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=builder /build/target/release/telemt /app/telemt
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
RUN chown -R telemt:telemt /app
|
||||
USER telemt
|
||||
USER root
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
# ==========================
|
||||
# Stage 5: Production (distroless)
|
||||
# ==========================
|
||||
FROM gcr.io/distroless/base-debian12 AS prod
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
COPY --from=minimal /telemt /app/telemt
|
||||
COPY config.toml /app/config.toml
|
||||
|
||||
# TLS + timezone + shell
|
||||
COPY --from=debug-base /etc/ssl/certs /etc/ssl/certs
|
||||
COPY --from=debug-base /usr/share/zoneinfo /usr/share/zoneinfo
|
||||
COPY --from=debug-base /bin/busybox /bin/busybox
|
||||
|
||||
RUN ["/bin/busybox", "--install", "-s", "/bin"]
|
||||
|
||||
# distroless user
|
||||
USER nonroot:nonroot
|
||||
|
||||
EXPOSE 443
|
||||
EXPOSE 9090
|
||||
EXPOSE 9091
|
||||
|
||||
ENTRYPOINT ["/app/telemt"]
|
||||
CMD ["config.toml"]
|
||||
|
||||
165
LICENSE
Normal file
165
LICENSE
Normal file
@@ -0,0 +1,165 @@
|
||||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### License Versioning Policy
|
||||
|
||||
This License is version 3 of the TELEMT Public License.
|
||||
Each version of the Software is licensed under the License that
|
||||
accompanies its corresponding source code distribution.
|
||||
|
||||
Future versions of the Software may be distributed under a different
|
||||
version of the TELEMT Public License or under a different license,
|
||||
as determined by the Telemt maintainers.
|
||||
|
||||
Any such change of license applies only to the versions of the
|
||||
Software distributed with the new license and SHALL NOT retroactively
|
||||
affect any previously released versions of the Software.
|
||||
|
||||
Recipients of the Software are granted rights only under the License
|
||||
provided with the version of the Software they received.
|
||||
|
||||
Redistributions of the Software, including Modified Versions, MUST
|
||||
preserve the copyright notices, license text, and conditions of this
|
||||
License for all portions of the Software derived from Telemt.
|
||||
|
||||
Additional terms or licenses may be applied to modifications or
|
||||
additional code added by a redistributor, provided that such terms
|
||||
do not restrict or alter the rights granted under this License for
|
||||
the original Telemt Software.
|
||||
|
||||
Nothing in this section limits the rights granted under this License
|
||||
for versions of the Software already released.
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
- "Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
- "Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
- "Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
- "Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
- "Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service SHOULD provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
19
LICENSING.md
19
LICENSING.md
@@ -1,17 +1,12 @@
|
||||
# LICENSING
|
||||
## Licenses for Versions
|
||||
| Version | License |
|
||||
|---------|---------------|
|
||||
| 1.0 | NO LICNESE |
|
||||
| 1.1 | NO LICENSE |
|
||||
| 1.2 | NO LICENSE |
|
||||
| 2.0 | NO LICENSE |
|
||||
| 3.0 | TELEMT UL 1 |
|
||||
| Version ≥ | Version ≤ | License |
|
||||
|-----------|-----------|---------------|
|
||||
| 1.0 | 3.3.17 | NO LICNESE |
|
||||
| 3.3.18 | 3.4.0 | TELEMT PL 3 |
|
||||
|
||||
### License Types
|
||||
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||
- fair use,
|
||||
- contributions,
|
||||
- distribution,
|
||||
- but prohibits NOT mentioning the authors
|
||||
- **TELEMT PL** - special Telemt Public License based on Apache License 2 principles
|
||||
|
||||
## [Telemt Public License 3](https://github.com/telemt/telemt/blob/main/LICENSE)
|
||||
|
||||
@@ -19,9 +19,9 @@
|
||||
|
||||
### 🇷🇺 RU
|
||||
|
||||
#### Релиз 3.3.16
|
||||
#### О релизах
|
||||
|
||||
[3.3.16](https://github.com/telemt/telemt/releases/tag/3.3.16)!
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
|
||||
|
||||
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
|
||||
|
||||
@@ -40,9 +40,9 @@
|
||||
|
||||
### 🇬🇧 EN
|
||||
|
||||
#### Release 3.3.16
|
||||
#### About releases
|
||||
|
||||
[3.3.16](https://github.com/telemt/telemt/releases/tag/3.3.16)
|
||||
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
|
||||
|
||||
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
// Cryptobench
|
||||
use criterion::{black_box, criterion_group, Criterion};
|
||||
use criterion::{Criterion, black_box, criterion_group};
|
||||
|
||||
fn bench_aes_ctr(c: &mut Criterion) {
|
||||
c.bench_function("aes_ctr_encrypt_64kb", |b| {
|
||||
@@ -9,4 +9,4 @@ fn bench_aes_ctr(c: &mut Criterion) {
|
||||
black_box(enc.encrypt(&data))
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
697
config.full.toml
697
config.full.toml
@@ -1,697 +0,0 @@
|
||||
# ==============================================================================
|
||||
#
|
||||
# TELEMT — Advanced Rust-based Telegram MTProto Proxy
|
||||
# Full Configuration Reference
|
||||
#
|
||||
# This file is both a working config and a complete documentation.
|
||||
# Every parameter is explained. Read it top to bottom before deploying.
|
||||
#
|
||||
# Quick Start:
|
||||
# 1. Set [server].port to your desired port (443 recommended)
|
||||
# 2. Generate a secret: openssl rand -hex 16
|
||||
# 3. Put it in [access.users] under a name you choose
|
||||
# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site
|
||||
# 5. Set your public IP in [general].middle_proxy_nat_ip
|
||||
# and [general.links].public_host
|
||||
# 6. Set announce IP in [[server.listeners]]
|
||||
# 7. Run Telemt. It prints a tg:// link. Send it to your users.
|
||||
#
|
||||
# Modes of Operation:
|
||||
# Direct Mode (use_middle_proxy = false)
|
||||
# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead.
|
||||
# No ad_tag support. No CDN DC support (203, etc).
|
||||
#
|
||||
# Middle-Proxy Mode (use_middle_proxy = true)
|
||||
# Connects to Telegram Middle-End servers via RPC protocol.
|
||||
# Required for ad_tag monetization and CDN support.
|
||||
# Requires proxy_secret_path and a valid public IP.
|
||||
#
|
||||
# ==============================================================================
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# LEGACY TOP-LEVEL FIELDS
|
||||
# ==============================================================================
|
||||
|
||||
# Deprecated. Use [general.links].show instead.
|
||||
# Accepts "*" for all users, or an array like ["alice", "bob"].
|
||||
show_link = ["0"]
|
||||
|
||||
# Fallback Datacenter index (1-5) when a client requests an unknown DC ID.
|
||||
# DC 2 is Amsterdam (Europe), closest for most CIS users.
|
||||
# default_dc = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# GENERAL SETTINGS
|
||||
# ==============================================================================
|
||||
|
||||
[general]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Core Protocol
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Coalesce the MTProto handshake and first data payload into a single TCP packet.
|
||||
# Significantly reduces connection latency. No reason to disable.
|
||||
fast_mode = true
|
||||
|
||||
# How the proxy connects to Telegram servers.
|
||||
# false = Direct TCP to Telegram DCs (simple, low overhead)
|
||||
# true = Middle-End RPC protocol (required for ad_tag and CDN DCs)
|
||||
use_middle_proxy = true
|
||||
|
||||
# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection.
|
||||
# Only works when use_middle_proxy = true.
|
||||
# Obtain yours: message @MTProxybot on Telegram, register your proxy.
|
||||
# ad_tag = "00000000000000000000000000000000"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Authentication
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Path to the Telegram infrastructure AES key file.
|
||||
# Auto-downloaded from https://core.telegram.org/getProxySecret on first run.
|
||||
# This key authenticates your proxy with Middle-End servers.
|
||||
proxy_secret_path = "proxy-secret"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Public IP Configuration (Critical for Middle-Proxy Mode)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Your server's PUBLIC IPv4 address.
|
||||
# Middle-End servers need this for the cryptographic Key Derivation Function.
|
||||
# If your server has a direct public IP, set it here.
|
||||
# If behind NAT (AWS, Docker, etc.), this MUST be your external IP.
|
||||
# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe).
|
||||
# middle_proxy_nat_ip = "203.0.113.10"
|
||||
|
||||
# Auto-detect public IP via STUN servers defined in [network].
|
||||
# Set to false if you hardcoded middle_proxy_nat_ip above.
|
||||
# Set to true if you want automatic detection.
|
||||
middle_proxy_nat_probe = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Connection Pool
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of persistent multiplexed RPC connections to ME servers.
|
||||
# All client traffic is routed through these "fat pipes".
|
||||
# 8 handles thousands of concurrent users comfortably.
|
||||
middle_proxy_pool_size = 8
|
||||
|
||||
# Legacy field. Connections kept initialized but idle as warm standby.
|
||||
middle_proxy_warm_standby = 16
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Middle-End Keepalive
|
||||
# Telegram ME servers aggressively kill idle TCP connections.
|
||||
# These settings send periodic RPC_PING frames to keep pipes alive.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_keepalive_enabled = true
|
||||
|
||||
# Base interval between pings in seconds.
|
||||
me_keepalive_interval_secs = 25
|
||||
|
||||
# Random jitter added to interval to prevent all connections pinging simultaneously.
|
||||
me_keepalive_jitter_secs = 5
|
||||
|
||||
# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns.
|
||||
me_keepalive_payload_random = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Client-Side Limits
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max buffered ciphertext per client (bytes) when upstream is slow.
|
||||
# Acts as backpressure to prevent memory exhaustion. 256KB is safe.
|
||||
crypto_pending_buffer = 262144
|
||||
|
||||
# Maximum single MTProto frame size from client. 16MB is protocol standard.
|
||||
max_client_frame = 16777216
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Crypto Desynchronization Logging
|
||||
# Desync errors usually mean DPI/GFW is tampering with connections.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event
|
||||
# false = deduplicated logging, one entry per time window (prevents log spam)
|
||||
# Set true if you are actively debugging DPI interference.
|
||||
desync_all_full = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Beobachten — Built-in Honeypot / Active Probe Tracker
|
||||
# Tracks IPs that fail handshakes or behave like TLS scanners.
|
||||
# Output file can be fed into fail2ban or iptables for auto-blocking.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
beobachten = true
|
||||
|
||||
# How long (minutes) to remember a suspicious IP before expiring it.
|
||||
beobachten_minutes = 30
|
||||
|
||||
# How often (seconds) to flush tracker state to disk.
|
||||
beobachten_flush_secs = 15
|
||||
|
||||
# File path for the tracker output.
|
||||
beobachten_file = "cache/beobachten.txt"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap — Zero-Downtime ME Pool Rotation
|
||||
# When Telegram updates ME server IPs, Hardswap creates a completely new pool,
|
||||
# waits until it is fully ready, migrates traffic, then kills the old pool.
|
||||
# Users experience zero interruption.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
hardswap = true
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Pool Warmup Staggering
|
||||
# When creating a new pool, connections are opened one by one with delays
|
||||
# to avoid a burst of SYN packets that could trigger ISP flood protection.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_warmup_stagger_enabled = true
|
||||
|
||||
# Delay between each connection creation (milliseconds).
|
||||
me_warmup_step_delay_ms = 500
|
||||
|
||||
# Random jitter added to the delay (milliseconds).
|
||||
me_warmup_step_jitter_ms = 300
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ME Reconnect Backoff
|
||||
# If an ME server drops the connection, Telemt retries with this strategy.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Max simultaneous reconnect attempts per DC.
|
||||
me_reconnect_max_concurrent_per_dc = 8
|
||||
|
||||
# Exponential backoff base (milliseconds).
|
||||
me_reconnect_backoff_base_ms = 500
|
||||
|
||||
# Backoff ceiling (milliseconds). Will never wait longer than this.
|
||||
me_reconnect_backoff_cap_ms = 30000
|
||||
|
||||
# Number of instant retries before switching to exponential backoff.
|
||||
me_reconnect_fast_retry_count = 12
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NAT Mismatch Behavior
|
||||
# If STUN-detected IP differs from local interface IP (you are behind NAT).
|
||||
# false = abort ME mode (safe default)
|
||||
# true = force ME mode anyway (use if you know your NAT setup is correct)
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
stun_iface_mismatch_ignore = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Logging
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# File to log unknown DC requests (DC IDs outside standard 1-5).
|
||||
unknown_dc_log_path = "unknown-dc.txt"
|
||||
|
||||
# Verbosity: "debug" | "verbose" | "normal" | "silent"
|
||||
log_level = "normal"
|
||||
|
||||
# Disable ANSI color codes in log output (useful for file logging).
|
||||
disable_colors = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# FakeTLS Record Sizing
|
||||
# Buffer small MTProto packets into larger TLS records to mimic real HTTPS.
|
||||
# Real HTTPS servers send records close to MTU size (~1400 bytes).
|
||||
# A stream of tiny TLS records is a strong DPI signal.
|
||||
# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
fast_mode_min_tls_record = 1400
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Periodic Updates
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# How often (seconds) to re-fetch ME server lists and proxy secrets
|
||||
# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure.
|
||||
update_every = 300
|
||||
|
||||
# How often (seconds) to force a Hardswap even if the ME map is unchanged.
|
||||
# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile.
|
||||
me_reinit_every_secs = 600
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Warmup Tuning
|
||||
# Fine-grained control over how the new pool is warmed up before traffic switch.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
me_hardswap_warmup_delay_min_ms = 1000
|
||||
me_hardswap_warmup_delay_max_ms = 2000
|
||||
me_hardswap_warmup_extra_passes = 3
|
||||
me_hardswap_warmup_pass_backoff_base_ms = 500
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Config Update Debouncing
|
||||
# Telegram sometimes pushes transient/broken configs. Debouncing requires
|
||||
# N consecutive identical fetches before applying a change.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# ME server list must be identical for this many fetches before applying.
|
||||
me_config_stable_snapshots = 2
|
||||
|
||||
# Minimum seconds between config applications.
|
||||
me_config_apply_cooldown_secs = 300
|
||||
|
||||
# Proxy secret must be identical for this many fetches before applying.
|
||||
proxy_secret_stable_snapshots = 2
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Proxy Secret Rotation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Apply newly downloaded secrets at runtime without restart.
|
||||
proxy_secret_rotate_runtime = true
|
||||
|
||||
# Maximum acceptable secret length (bytes). Rejects abnormally large secrets.
|
||||
proxy_secret_len_max = 256
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Hardswap Drain Settings
|
||||
# Controls graceful shutdown of old ME connections during pool rotation.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Seconds to keep old connections alive for in-flight data before force-closing.
|
||||
me_pool_drain_ttl_secs = 90
|
||||
|
||||
# Minimum ratio of healthy connections in new pool before draining old pool.
|
||||
# 0.8 = at least 80% of new pool must be ready.
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
|
||||
# Maximum seconds to wait for drain to complete before force-killing.
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# NTP Clock Check
|
||||
# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes.
|
||||
# Telemt checks on startup and warns if out of sync.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
ntp_check = true
|
||||
ntp_servers = ["pool.ntp.org"]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Auto-Degradation
|
||||
# If ME servers become completely unreachable (ISP blocking),
|
||||
# automatically fall back to Direct Mode so users stay connected.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
auto_degradation_enabled = true
|
||||
|
||||
# Number of DC groups that must be unreachable before triggering fallback.
|
||||
degradation_min_unavailable_dc_groups = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ALLOWED CLIENT PROTOCOLS
|
||||
# Only enable what you need. In censored regions, TLS-only is safest.
|
||||
# ==============================================================================
|
||||
|
||||
[general.modes]
|
||||
|
||||
# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI.
|
||||
# No reason to enable unless you have ancient clients.
|
||||
classic = false
|
||||
|
||||
# Obfuscated MTProto with randomized padding. Better than classic, but
|
||||
# still detectable by statistical analysis of packet sizes.
|
||||
secure = false
|
||||
|
||||
# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing.
|
||||
# To DPI, it looks like a normal HTTPS connection.
|
||||
# This should be the ONLY enabled mode in censored environments.
|
||||
tls = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# STARTUP LINK GENERATION
|
||||
# Controls what tg:// invite links are printed to console on startup.
|
||||
# ==============================================================================
|
||||
|
||||
[general.links]
|
||||
|
||||
# Which users to generate links for.
|
||||
# "*" = all users, or an array like ["alice", "bob"].
|
||||
show = "*"
|
||||
|
||||
# IP or domain to embed in the tg:// link.
|
||||
# If omitted, Telemt uses STUN to auto-detect.
|
||||
# Set this to your server's public IP or domain for reliable links.
|
||||
# public_host = "proxy.example.com"
|
||||
|
||||
# Port to embed in the tg:// link.
|
||||
# If omitted, uses [server].port.
|
||||
# public_port = 443
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# NETWORK & IP RESOLUTION
|
||||
# ==============================================================================
|
||||
|
||||
[network]
|
||||
|
||||
# Enable IPv4 for outbound connections to Telegram.
|
||||
ipv4 = true
|
||||
|
||||
# Enable IPv6 for outbound connections to Telegram.
|
||||
ipv6 = false
|
||||
|
||||
# Prefer IPv4 (4) or IPv6 (6) when both are available.
|
||||
prefer = 4
|
||||
|
||||
# Experimental: use both IPv4 and IPv6 ME servers simultaneously.
|
||||
# May improve reliability but doubles connection count.
|
||||
multipath = false
|
||||
|
||||
# STUN servers for external IP discovery.
|
||||
# Used for Middle-Proxy KDF (if nat_probe=true) and link generation.
|
||||
stun_servers = [
|
||||
"stun.l.google.com:5349",
|
||||
"stun1.l.google.com:3478",
|
||||
"stun.gmx.net:3478",
|
||||
"stun.l.google.com:19302"
|
||||
]
|
||||
|
||||
# If UDP STUN is blocked, attempt TCP-based STUN as fallback.
|
||||
stun_tcp_fallback = true
|
||||
|
||||
# If all STUN fails, use HTTP APIs to discover public IP.
|
||||
http_ip_detect_urls = [
|
||||
"https://ifconfig.me/ip",
|
||||
"https://api.ipify.org"
|
||||
]
|
||||
|
||||
# Cache discovered public IP to this file to survive restarts.
|
||||
cache_public_ip_path = "cache/public_ip.txt"
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# SERVER BINDING & METRICS
|
||||
# ==============================================================================
|
||||
|
||||
[server]
|
||||
|
||||
# TCP port to listen on.
|
||||
# 443 is recommended (looks like normal HTTPS traffic).
|
||||
port = 443
|
||||
|
||||
# IPv4 bind address. "0.0.0.0" = all interfaces.
|
||||
listen_addr_ipv4 = "0.0.0.0"
|
||||
|
||||
# IPv6 bind address. "::" = all interfaces.
|
||||
listen_addr_ipv6 = "::"
|
||||
|
||||
# Unix socket listener (for reverse proxy setups with Nginx/HAProxy).
|
||||
# listen_unix_sock = "/var/run/telemt.sock"
|
||||
# listen_unix_sock_perm = "0660"
|
||||
|
||||
# Enable PROXY protocol header parsing.
|
||||
# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers.
|
||||
# If enabled without a proxy in front, clients will fail to connect.
|
||||
proxy_protocol = false
|
||||
|
||||
# Prometheus metrics HTTP endpoint port.
|
||||
# Uncomment to enable. Access at http://your-server:9090/metrics
|
||||
# metrics_port = 9090
|
||||
|
||||
# IP ranges allowed to access the metrics endpoint.
|
||||
metrics_whitelist = [
|
||||
"127.0.0.1/32",
|
||||
"::1/128"
|
||||
]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Listener Overrides
|
||||
# Define explicit listeners with specific bind IPs and announce IPs.
|
||||
# The announce IP is what gets embedded in tg:// links and sent to ME servers.
|
||||
# You MUST set announce to your server's public IP for ME mode to work.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[server.listeners]]
|
||||
# ip = "0.0.0.0"
|
||||
# announce = "203.0.113.10"
|
||||
# reuse_allow = false
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# TIMEOUTS (seconds unless noted)
|
||||
# ==============================================================================
|
||||
|
||||
[timeouts]
|
||||
|
||||
# Maximum time for client to complete FakeTLS + MTProto handshake.
|
||||
client_handshake = 15
|
||||
|
||||
# Maximum time to establish TCP connection to upstream Telegram DC.
|
||||
tg_connect = 10
|
||||
|
||||
# TCP keepalive interval for client connections.
|
||||
client_keepalive = 60
|
||||
|
||||
# Maximum client inactivity before dropping the connection.
|
||||
client_ack = 300
|
||||
|
||||
# Instant retry count for a single ME endpoint before giving up on it.
|
||||
me_one_retry = 3
|
||||
|
||||
# Timeout (milliseconds) for a single ME endpoint connection attempt.
|
||||
me_one_timeout_ms = 1500
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ANTI-CENSORSHIP / FAKETLS / MASKING
|
||||
# This is where Telemt becomes invisible to Deep Packet Inspection.
|
||||
# ==============================================================================
|
||||
|
||||
[censorship]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Domain Fronting
|
||||
# The SNI (Server Name Indication) your proxy presents to connecting clients.
|
||||
# Must be a popular, unblocked HTTPS website in your target country.
|
||||
# DPI sees traffic to this domain. Choose carefully.
|
||||
# Good choices: major CDNs, banks, government sites, search engines.
|
||||
# Bad choices: obscure sites, already-blocked domains.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_domain = "www.google.com"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Active Probe Masking
|
||||
# When someone connects but fails the MTProto handshake (wrong secret),
|
||||
# they might be an ISP active prober testing if this is a proxy.
|
||||
#
|
||||
# mask = false: drop the connection (prober knows something is here)
|
||||
# mask = true: transparently proxy them to mask_host (prober sees a real website)
|
||||
#
|
||||
# With mask enabled, your server is indistinguishable from a real web server
|
||||
# to anyone who doesn't have the correct secret.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
mask = true
|
||||
|
||||
# The real web server to forward failed handshakes to.
|
||||
# If omitted, defaults to tls_domain.
|
||||
# mask_host = "www.google.com"
|
||||
|
||||
# Port on the mask host to connect to.
|
||||
mask_port = 443
|
||||
|
||||
# Inject PROXY protocol header when forwarding to mask host.
|
||||
# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it.
|
||||
# mask_proxy_protocol = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Certificate Emulation
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Size (bytes) of the locally generated fake TLS certificate.
|
||||
# Only used when tls_emulation is disabled.
|
||||
fake_cert_len = 2048
|
||||
|
||||
# KILLER FEATURE: Real-Time TLS Emulation.
|
||||
# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain,
|
||||
# and exactly replicates the byte sizes of ServerHello and Certificate records.
|
||||
# Defeats DPI that uses TLS record length heuristics to detect proxies.
|
||||
# Strongly recommended in censored environments.
|
||||
tls_emulation = true
|
||||
|
||||
# Directory to cache fetched TLS certificates.
|
||||
tls_front_dir = "tlsfront"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ServerHello Timing
|
||||
# Real web servers take 30-150ms to respond to ClientHello due to network
|
||||
# latency and crypto processing. A proxy responding in <1ms is suspicious.
|
||||
# These settings add realistic delay to mimic genuine server behavior.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Minimum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_min_ms = 50
|
||||
|
||||
# Maximum delay before sending ServerHello (milliseconds).
|
||||
server_hello_delay_max_ms = 150
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# TLS Session Tickets
|
||||
# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake.
|
||||
# A server that sends zero tickets is anomalous and may trigger DPI flags.
|
||||
# Set this to match your tls_domain's behavior (usually 2).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# tls_new_session_tickets = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Full Certificate Frequency
|
||||
# When tls_emulation is enabled, this controls how often (per client IP)
|
||||
# to send the complete emulated certificate chain.
|
||||
#
|
||||
# > 0: Subsequent connections within TTL seconds get a smaller cached version.
|
||||
# Saves bandwidth but creates a detectable size difference between
|
||||
# first and repeat connections.
|
||||
#
|
||||
# = 0: Every connection gets the full certificate. More bandwidth but
|
||||
# perfectly consistent behavior, no anomalies for DPI to detect.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
tls_full_cert_ttl_secs = 0
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# ALPN Enforcement
|
||||
# Ensure ServerHello responds with the exact ALPN protocol the client requested.
|
||||
# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
alpn_enforce = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# ACCESS CONTROL & USERS
|
||||
# ==============================================================================
|
||||
|
||||
[access]
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Replay Attack Protection
|
||||
# DPI can record a legitimate user's handshake and replay it later to probe
|
||||
# whether the server is a proxy. Telemt remembers recent handshake nonces
|
||||
# and rejects duplicates.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# Number of nonce slots in the replay detection buffer.
|
||||
replay_check_len = 65536
|
||||
|
||||
# How long (seconds) to remember nonces before expiring them.
|
||||
replay_window_secs = 1800
|
||||
|
||||
# Allow clients with incorrect system clocks to connect.
|
||||
# false = reject clients with significant time skew (more secure)
|
||||
# true = accept anyone regardless of clock (more permissive)
|
||||
ignore_time_skew = false
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# User Secrets
|
||||
# Each user needs a unique 32-character hex string as their secret.
|
||||
# Generate with: openssl rand -hex 16
|
||||
#
|
||||
# This secret is embedded in the tg:// link. Anyone with it can connect.
|
||||
# Format: username = "hex_secret"
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.users]
|
||||
# alice = "0123456789abcdef0123456789abcdef"
|
||||
# bob = "fedcba9876543210fedcba9876543210"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Connection Limits
|
||||
# Limits concurrent TCP connections per user to prevent secret sharing.
|
||||
# Uncomment and set for each user as needed.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_tcp_conns]
|
||||
# alice = 100
|
||||
# bob = 50
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Expiration Dates
|
||||
# Automatically revoke access after the specified date (ISO 8601 format).
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_expirations]
|
||||
# alice = "2025-12-31T23:59:59Z"
|
||||
# bob = "2026-06-15T00:00:00Z"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Data Quotas
|
||||
# Maximum total bytes transferred per user. Connection refused after limit.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_data_quota]
|
||||
# alice = 107374182400
|
||||
# bob = 53687091200
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Per-User Unique IP Limits
|
||||
# Maximum number of different IP addresses that can use this secret
|
||||
# at the same time. Highly effective against secret leaking/sharing.
|
||||
# Set to 1 for single-device, 2-3 for phone+desktop, etc.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
[access.user_max_unique_ips]
|
||||
# alice = 3
|
||||
# bob = 2
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# UPSTREAM ROUTING
|
||||
# Controls how Telemt connects to Telegram servers (or ME servers).
|
||||
# If omitted entirely, uses the OS default route.
|
||||
# ==============================================================================
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# Direct upstream: use the server's own network interface.
|
||||
# You can optionally bind to a specific interface or local IP.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "direct"
|
||||
# interface = "eth0"
|
||||
# bind_addresses = ["192.0.2.10"]
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
# scopes = "*"
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy.
|
||||
# Useful if your server's IP is blocked from reaching Telegram DCs.
|
||||
# ------------------------------------------------------------------------------
|
||||
|
||||
# [[upstreams]]
|
||||
# type = "socks5"
|
||||
# address = "198.51.100.30:1080"
|
||||
# username = "proxy-user"
|
||||
# password = "proxy-pass"
|
||||
# weight = 1
|
||||
# enabled = true
|
||||
|
||||
|
||||
# ==============================================================================
|
||||
# DATACENTER OVERRIDES
|
||||
# Force specific DC IDs to route to specific IP:Port combinations.
|
||||
# DC 203 (CDN) is auto-injected by Telemt if not specified here.
|
||||
# ==============================================================================
|
||||
|
||||
# [dc_overrides]
|
||||
# "201" = "149.154.175.50:443"
|
||||
# "202" = ["149.154.167.51:443", "149.154.175.100:443"]
|
||||
@@ -32,6 +32,7 @@ show = "*"
|
||||
port = 443
|
||||
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||
# metrics_port = 9090
|
||||
# metrics_listen = "0.0.0.0:9090" # Listen address for metrics (overrides metrics_port)
|
||||
# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"]
|
||||
|
||||
[server.api]
|
||||
|
||||
3
contrib/systemd/system-user-telemt.conf
Normal file
3
contrib/systemd/system-user-telemt.conf
Normal file
@@ -0,0 +1,3 @@
|
||||
u telemt - "telemt user" /var/lib/telemt -
|
||||
g telemt - -
|
||||
m telemt telemt
|
||||
21
contrib/systemd/telemt.service
Normal file
21
contrib/systemd/telemt.service
Normal file
@@ -0,0 +1,21 @@
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
Wants=network-online.target
|
||||
After=multi-user.target network.target network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=/var/lib/telemt
|
||||
ExecStart=/usr/bin/telemt /etc/telemt/telemt.toml
|
||||
Restart=on-failure
|
||||
RestartSec=10
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
NoNewPrivileges=true
|
||||
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
1
contrib/systemd/tmpfiles-telemt.conf
Normal file
1
contrib/systemd/tmpfiles-telemt.conf
Normal file
@@ -0,0 +1 @@
|
||||
d /var/lib/telemt 700 telemt telemt
|
||||
@@ -7,6 +7,7 @@ services:
|
||||
ports:
|
||||
- "443:443"
|
||||
- "127.0.0.1:9090:9090"
|
||||
- "127.0.0.1:9091:9091"
|
||||
# Allow caching 'proxy-secret' in read-only container
|
||||
working_dir: /run/telemt
|
||||
volumes:
|
||||
|
||||
10
docs/API.md
10
docs/API.md
@@ -497,13 +497,14 @@ Note: the request contract is defined, but the corresponding route currently ret
|
||||
| `direct_total` | `usize` | Direct-route upstream entries. |
|
||||
| `socks4_total` | `usize` | SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | SOCKS5 upstream entries. |
|
||||
| `shadowsocks_total` | `usize` | Shadowsocks upstream entries. |
|
||||
|
||||
#### `RuntimeUpstreamQualityUpstreamData`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | `direct`, `socks4`, `socks5`. |
|
||||
| `address` | `string` | Upstream address (`direct` literal for direct route kind). |
|
||||
| `route_kind` | `string` | `direct`, `socks4`, `socks5`, `shadowsocks`. |
|
||||
| `address` | `string` | Upstream address (`direct` literal for direct route kind, `host:port` only for proxied upstreams). |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
@@ -757,13 +758,14 @@ Note: the request contract is defined, but the corresponding route currently ret
|
||||
| `direct_total` | `usize` | Number of direct upstream entries. |
|
||||
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
|
||||
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
|
||||
| `shadowsocks_total` | `usize` | Number of Shadowsocks upstream entries. |
|
||||
|
||||
#### `UpstreamStatus`
|
||||
| Field | Type | Description |
|
||||
| --- | --- | --- |
|
||||
| `upstream_id` | `usize` | Runtime upstream index. |
|
||||
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`. |
|
||||
| `address` | `string` | Upstream address (`direct` for direct route kind). Authentication fields are intentionally omitted. |
|
||||
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`, `shadowsocks`. |
|
||||
| `address` | `string` | Upstream address (`direct` for direct route kind, `host:port` for Shadowsocks). Authentication fields are intentionally omitted. |
|
||||
| `weight` | `u16` | Selection weight. |
|
||||
| `scopes` | `string` | Configured scope selector string. |
|
||||
| `healthy` | `bool` | Current health flag. |
|
||||
|
||||
417
docs/CONFIG_PARAMS.en.md
Normal file
417
docs/CONFIG_PARAMS.en.md
Normal file
@@ -0,0 +1,417 @@
|
||||
# Telemt Config Parameters Reference
|
||||
|
||||
This document lists all configuration keys accepted by `config.toml`.
|
||||
|
||||
> [!WARNING]
|
||||
>
|
||||
> The configuration parameters detailed in this document are intended for advanced users and fine-tuning purposes. Modifying these settings without a clear understanding of their function may lead to application instability or other unexpected behavior. Please proceed with caution and at your own risk.
|
||||
|
||||
## Top-level keys
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| include | `String` (special directive) | `null` | — | Includes another TOML file with `include = "relative/or/absolute/path.toml"`; includes are processed recursively before parsing. |
|
||||
| show_link | `"*" \| String[]` | `[]` (`ShowLink::None`) | — | Legacy top-level link visibility selector (`"*"` for all users or explicit usernames list). |
|
||||
| dc_overrides | `Map<String, String[]>` | `{}` | — | Overrides DC endpoints for non-standard DCs; key is DC id string, value is `ip:port` list. |
|
||||
| default_dc | `u8 \| null` | `null` (effective fallback: `2` in ME routing) | — | Default DC index used for unmapped non-standard DCs. |
|
||||
|
||||
## [general]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| data_path | `String \| null` | `null` | — | Optional runtime data directory path. |
|
||||
| prefer_ipv6 | `bool` | `false` | — | Prefer IPv6 where applicable in runtime logic. |
|
||||
| fast_mode | `bool` | `true` | — | Enables fast-path optimizations for traffic processing. |
|
||||
| use_middle_proxy | `bool` | `true` | none | Enables ME transport mode; if `false`, runtime falls back to direct DC routing. |
|
||||
| proxy_secret_path | `String \| null` | `"proxy-secret"` | Path may be `null`. | Path to Telegram infrastructure proxy-secret file used by ME handshake logic. |
|
||||
| proxy_config_v4_cache_path | `String \| null` | `"cache/proxy-config-v4.txt"` | — | Optional cache path for raw `getProxyConfig` (IPv4) snapshot. |
|
||||
| proxy_config_v6_cache_path | `String \| null` | `"cache/proxy-config-v6.txt"` | — | Optional cache path for raw `getProxyConfigV6` (IPv6) snapshot. |
|
||||
| ad_tag | `String \| null` | `null` | — | Global fallback ad tag (32 hex characters). |
|
||||
| middle_proxy_nat_ip | `IpAddr \| null` | `null` | Must be a valid IP when set. | Manual public NAT IP override used as ME address material when set. |
|
||||
| middle_proxy_nat_probe | `bool` | `true` | Auto-forced to `true` when `use_middle_proxy = true`. | Enables ME NAT probing; runtime may force it on when ME mode is active. |
|
||||
| middle_proxy_nat_stun | `String \| null` | `null` | Deprecated. Use `network.stun_servers`. | Deprecated legacy single STUN server for NAT probing. |
|
||||
| middle_proxy_nat_stun_servers | `String[]` | `[]` | Deprecated. Use `network.stun_servers`. | Deprecated legacy STUN list for NAT probing fallback. |
|
||||
| stun_nat_probe_concurrency | `usize` | `8` | Must be `> 0`. | Maximum number of parallel STUN probes during NAT/public endpoint discovery. |
|
||||
| middle_proxy_pool_size | `usize` | `8` | none | Target size of active ME writer pool. |
|
||||
| middle_proxy_warm_standby | `usize` | `16` | none | Reserved compatibility field in current runtime revision. |
|
||||
| me_init_retry_attempts | `u32` | `0` | `0..=1_000_000`. | Startup retries for ME pool initialization (`0` means unlimited). |
|
||||
| me2dc_fallback | `bool` | `true` | — | Allows fallback from ME mode to direct DC when ME startup fails. |
|
||||
| me_keepalive_enabled | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. |
|
||||
| me_keepalive_interval_secs | `u64` | `8` | none | Base ME keepalive interval in seconds. |
|
||||
| me_keepalive_jitter_secs | `u64` | `2` | none | Keepalive jitter in seconds to reduce synchronized bursts. |
|
||||
| me_keepalive_payload_random | `bool` | `true` | none | Randomizes keepalive payload bytes instead of fixed zero payload. |
|
||||
| rpc_proxy_req_every | `u64` | `0` | `0` or `10..=300`. | Interval for service `RPC_PROXY_REQ` activity signals (`0` disables). |
|
||||
| me_writer_cmd_channel_capacity | `usize` | `4096` | Must be `> 0`. | Capacity of per-writer command channel. |
|
||||
| me_route_channel_capacity | `usize` | `768` | Must be `> 0`. | Capacity of per-connection ME response route channel. |
|
||||
| me_c2me_channel_capacity | `usize` | `1024` | Must be `> 0`. | Capacity of per-client command queue (client reader -> ME sender). |
|
||||
| me_reader_route_data_wait_ms | `u64` | `2` | `0..=20`. | Bounded wait for routing ME DATA to per-connection queue (`0` = no wait). |
|
||||
| me_d2c_flush_batch_max_frames | `usize` | `32` | `1..=512`. | Max ME->client frames coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_bytes | `usize` | `131072` | `4096..=2_097_152`. | Max ME->client payload bytes coalesced before flush. |
|
||||
| me_d2c_flush_batch_max_delay_us | `u64` | `500` | `0..=5000`. | Max microsecond wait for coalescing more ME->client frames (`0` disables timed coalescing). |
|
||||
| me_d2c_ack_flush_immediate | `bool` | `true` | — | Flushes client writer immediately after quick-ack write. |
|
||||
| direct_relay_copy_buf_c2s_bytes | `usize` | `65536` | `4096..=1_048_576`. | Copy buffer size for client->DC direction in direct relay. |
|
||||
| direct_relay_copy_buf_s2c_bytes | `usize` | `262144` | `8192..=2_097_152`. | Copy buffer size for DC->client direction in direct relay. |
|
||||
| crypto_pending_buffer | `usize` | `262144` | — | Max pending ciphertext buffer per client writer (bytes). |
|
||||
| max_client_frame | `usize` | `16777216` | — | Maximum allowed client MTProto frame size (bytes). |
|
||||
| desync_all_full | `bool` | `false` | — | Emits full crypto-desync forensic logs for every event. |
|
||||
| beobachten | `bool` | `true` | — | Enables per-IP forensic observation buckets. |
|
||||
| beobachten_minutes | `u64` | `10` | Must be `> 0`. | Retention window (minutes) for per-IP observation buckets. |
|
||||
| beobachten_flush_secs | `u64` | `15` | Must be `> 0`. | Snapshot flush interval (seconds) for observation output file. |
|
||||
| beobachten_file | `String` | `"cache/beobachten.txt"` | — | Observation snapshot output file path. |
|
||||
| hardswap | `bool` | `true` | none | Enables generation-based ME hardswap strategy. |
|
||||
| me_warmup_stagger_enabled | `bool` | `true` | none | Staggers extra ME warmup dials to avoid connection spikes. |
|
||||
| me_warmup_step_delay_ms | `u64` | `500` | none | Base delay in milliseconds between warmup dial steps. |
|
||||
| me_warmup_step_jitter_ms | `u64` | `300` | none | Additional random delay in milliseconds for warmup steps. |
|
||||
| me_reconnect_max_concurrent_per_dc | `u32` | `8` | none | Limits concurrent reconnect workers per DC during health recovery. |
|
||||
| me_reconnect_backoff_base_ms | `u64` | `500` | none | Initial reconnect backoff in milliseconds. |
|
||||
| me_reconnect_backoff_cap_ms | `u64` | `30000` | none | Maximum reconnect backoff cap in milliseconds. |
|
||||
| me_reconnect_fast_retry_count | `u32` | `16` | none | Immediate retry budget before long backoff behavior applies. |
|
||||
| me_single_endpoint_shadow_writers | `u8` | `2` | `0..=32`. | Additional reserve writers for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_mode_enabled | `bool` | `true` | — | Enables aggressive outage recovery for one-endpoint DC groups. |
|
||||
| me_single_endpoint_outage_disable_quarantine | `bool` | `true` | — | Ignores endpoint quarantine in one-endpoint outage mode. |
|
||||
| me_single_endpoint_outage_backoff_min_ms | `u64` | `250` | Must be `> 0`; also `<= me_single_endpoint_outage_backoff_max_ms`. | Minimum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_outage_backoff_max_ms | `u64` | `3000` | Must be `> 0`; also `>= me_single_endpoint_outage_backoff_min_ms`. | Maximum reconnect backoff in outage mode (ms). |
|
||||
| me_single_endpoint_shadow_rotate_every_secs | `u64` | `900` | — | Periodic shadow writer rotation interval (`0` disables). |
|
||||
| me_floor_mode | `"static" \| "adaptive"` | `"adaptive"` | — | Writer floor policy mode. |
|
||||
| me_adaptive_floor_idle_secs | `u64` | `90` | — | Idle time before adaptive floor may reduce one-endpoint target. |
|
||||
| me_adaptive_floor_min_writers_single_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for one-endpoint DC groups. |
|
||||
| me_adaptive_floor_min_writers_multi_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for multi-endpoint DC groups. |
|
||||
| me_adaptive_floor_recover_grace_secs | `u64` | `180` | — | Grace period to hold static floor after activity. |
|
||||
| me_adaptive_floor_writers_per_core_total | `u16` | `48` | Must be `> 0`. | Global writer budget per logical CPU core in adaptive mode. |
|
||||
| me_adaptive_floor_cpu_cores_override | `u16` | `0` | — | Manual CPU core count override (`0` uses auto-detection). |
|
||||
| me_adaptive_floor_max_extra_writers_single_per_core | `u16` | `1` | — | Per-core max extra writers above base floor for one-endpoint DCs. |
|
||||
| me_adaptive_floor_max_extra_writers_multi_per_core | `u16` | `2` | — | Per-core max extra writers above base floor for multi-endpoint DCs. |
|
||||
| me_adaptive_floor_max_active_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for active ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_warm_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for warm ME writers per logical CPU core. |
|
||||
| me_adaptive_floor_max_active_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for active ME writers. |
|
||||
| me_adaptive_floor_max_warm_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for warm ME writers. |
|
||||
| upstream_connect_retry_attempts | `u32` | `2` | Must be `> 0`. | Connect attempts for selected upstream before error/fallback. |
|
||||
| upstream_connect_retry_backoff_ms | `u64` | `100` | — | Delay between upstream connect attempts (ms). |
|
||||
| upstream_connect_budget_ms | `u64` | `3000` | Must be `> 0`. | Total wall-clock budget for one upstream connect request (ms). |
|
||||
| upstream_unhealthy_fail_threshold | `u32` | `5` | Must be `> 0`. | Consecutive failed requests before upstream is marked unhealthy. |
|
||||
| upstream_connect_failfast_hard_errors | `bool` | `false` | — | Skips additional retries for hard non-transient connect errors. |
|
||||
| stun_iface_mismatch_ignore | `bool` | `false` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| unknown_dc_log_path | `String \| null` | `"unknown-dc.txt"` | — | File path for unknown-DC request logging (`null` disables file path). |
|
||||
| unknown_dc_file_log_enabled | `bool` | `false` | — | Enables unknown-DC file logging. |
|
||||
| log_level | `"debug" \| "verbose" \| "normal" \| "silent"` | `"normal"` | — | Runtime logging verbosity. |
|
||||
| disable_colors | `bool` | `false` | — | Disables ANSI colors in logs. |
|
||||
| me_socks_kdf_policy | `"strict" \| "compat"` | `"strict"` | — | SOCKS-bound KDF fallback policy for ME handshake. |
|
||||
| me_route_backpressure_base_timeout_ms | `u64` | `25` | Must be `> 0`. | Base backpressure timeout for route-channel send (ms). |
|
||||
| me_route_backpressure_high_timeout_ms | `u64` | `120` | Must be `>= me_route_backpressure_base_timeout_ms`. | High backpressure timeout when queue occupancy exceeds watermark (ms). |
|
||||
| me_route_backpressure_high_watermark_pct | `u8` | `80` | `1..=100`. | Queue occupancy threshold (%) for high timeout mode. |
|
||||
| me_health_interval_ms_unhealthy | `u64` | `1000` | Must be `> 0`. | Health monitor interval while writer coverage is degraded (ms). |
|
||||
| me_health_interval_ms_healthy | `u64` | `3000` | Must be `> 0`. | Health monitor interval while writer coverage is healthy (ms). |
|
||||
| me_admission_poll_ms | `u64` | `1000` | Must be `> 0`. | Poll interval for conditional-admission checks (ms). |
|
||||
| me_warn_rate_limit_ms | `u64` | `5000` | Must be `> 0`. | Cooldown for repetitive ME warning logs (ms). |
|
||||
| me_route_no_writer_mode | `"async_recovery_failfast" \| "inline_recovery_legacy" \| "hybrid_async_persistent"` | `"hybrid_async_persistent"` | — | Route behavior when no writer is immediately available. |
|
||||
| me_route_no_writer_wait_ms | `u64` | `250` | `10..=5000`. | Max wait in async-recovery failfast mode (ms). |
|
||||
| me_route_inline_recovery_attempts | `u32` | `3` | Must be `> 0`. | Inline recovery attempts in legacy mode. |
|
||||
| me_route_inline_recovery_wait_ms | `u64` | `3000` | `10..=30000`. | Max inline recovery wait in legacy mode (ms). |
|
||||
| fast_mode_min_tls_record | `usize` | `0` | — | Minimum TLS record size when fast-mode coalescing is enabled (`0` disables). |
|
||||
| update_every | `u64 \| null` | `300` | If set: must be `> 0`; if `null`: legacy fallback path is used. | Unified refresh interval for ME config and proxy-secret updater tasks. |
|
||||
| me_reinit_every_secs | `u64` | `900` | Must be `> 0`. | Periodic interval for zero-downtime ME reinit cycle. |
|
||||
| me_hardswap_warmup_delay_min_ms | `u64` | `1000` | Must be `<= me_hardswap_warmup_delay_max_ms`. | Lower bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_delay_max_ms | `u64` | `2000` | Must be `> 0`. | Upper bound for hardswap warmup dial spacing. |
|
||||
| me_hardswap_warmup_extra_passes | `u8` | `3` | Must be within `[0, 10]`. | Additional warmup passes after the base pass in one hardswap cycle. |
|
||||
| me_hardswap_warmup_pass_backoff_base_ms | `u64` | `500` | Must be `> 0`. | Base backoff between extra hardswap warmup passes. |
|
||||
| me_config_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical ME config snapshots required before apply. |
|
||||
| me_config_apply_cooldown_secs | `u64` | `300` | none | Cooldown between applied ME endpoint-map updates. |
|
||||
| me_snapshot_require_http_2xx | `bool` | `true` | — | Requires 2xx HTTP responses for applying config snapshots. |
|
||||
| me_snapshot_reject_empty_map | `bool` | `true` | — | Rejects empty config snapshots. |
|
||||
| me_snapshot_min_proxy_for_lines | `u32` | `1` | Must be `> 0`. | Minimum parsed `proxy_for` rows required to accept snapshot. |
|
||||
| proxy_secret_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical proxy-secret snapshots required before rotation. |
|
||||
| proxy_secret_rotate_runtime | `bool` | `true` | none | Enables runtime proxy-secret rotation from updater snapshots. |
|
||||
| me_secret_atomic_snapshot | `bool` | `true` | — | Keeps selector and secret bytes from the same snapshot atomically. |
|
||||
| proxy_secret_len_max | `usize` | `256` | Must be within `[32, 4096]`. | Upper length limit for accepted proxy-secret bytes. |
|
||||
| me_pool_drain_ttl_secs | `u64` | `90` | none | Time window where stale writers remain fallback-eligible after map change. |
|
||||
| me_pool_drain_threshold | `u64` | `128` | — | Max draining stale writers before batch force-close (`0` disables threshold cleanup). |
|
||||
| me_pool_drain_soft_evict_enabled | `bool` | `true` | — | Enables gradual soft-eviction of stale writers during drain/reinit instead of immediate hard close. |
|
||||
| me_pool_drain_soft_evict_grace_secs | `u64` | `30` | `0..=3600`. | Grace period before stale writers become soft-evict candidates. |
|
||||
| me_pool_drain_soft_evict_per_writer | `u8` | `1` | `1..=16`. | Maximum stale routes soft-evicted per writer in one eviction pass. |
|
||||
| me_pool_drain_soft_evict_budget_per_core | `u16` | `8` | `1..=64`. | Per-core budget limiting aggregate soft-eviction work per pass. |
|
||||
| me_pool_drain_soft_evict_cooldown_ms | `u64` | `5000` | Must be `> 0`. | Cooldown between consecutive soft-eviction passes (ms). |
|
||||
| me_bind_stale_mode | `"never" \| "ttl" \| "always"` | `"ttl"` | — | Policy for new binds on stale draining writers. |
|
||||
| me_bind_stale_ttl_secs | `u64` | `90` | — | TTL for stale bind allowance when stale mode is `ttl`. |
|
||||
| me_pool_min_fresh_ratio | `f32` | `0.8` | Must be within `[0.0, 1.0]`. | Minimum fresh desired-DC coverage ratio before stale writers are drained. |
|
||||
| me_reinit_drain_timeout_secs | `u64` | `120` | `0` disables force-close; if `> 0` and `< me_pool_drain_ttl_secs`, runtime bumps it to TTL. | Force-close timeout for draining stale writers (`0` keeps indefinite draining). |
|
||||
| proxy_secret_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy secret reload interval (fallback when `update_every` is not set). |
|
||||
| proxy_config_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy config reload interval (fallback when `update_every` is not set). |
|
||||
| me_reinit_singleflight | `bool` | `true` | — | Serializes ME reinit cycles across trigger sources. |
|
||||
| me_reinit_trigger_channel | `usize` | `64` | Must be `> 0`. | Trigger queue capacity for reinit scheduler. |
|
||||
| me_reinit_coalesce_window_ms | `u64` | `200` | — | Trigger coalescing window before starting reinit (ms). |
|
||||
| me_deterministic_writer_sort | `bool` | `true` | — | Enables deterministic candidate sort for writer binding path. |
|
||||
| me_writer_pick_mode | `"sorted_rr" \| "p2c"` | `"p2c"` | — | Writer selection mode for route bind path. |
|
||||
| me_writer_pick_sample_size | `u8` | `3` | `2..=4`. | Number of candidates sampled by picker in `p2c` mode. |
|
||||
| ntp_check | `bool` | `true` | — | Enables NTP drift check at startup. |
|
||||
| ntp_servers | `String[]` | `["pool.ntp.org"]` | — | NTP servers used for drift check. |
|
||||
| auto_degradation_enabled | `bool` | `true` | none | Reserved compatibility flag in current runtime revision. |
|
||||
| degradation_min_unavailable_dc_groups | `u8` | `2` | none | Reserved compatibility threshold in current runtime revision. |
|
||||
|
||||
## [general.modes]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| classic | `bool` | `false` | — | Enables classic MTProxy mode. |
|
||||
| secure | `bool` | `false` | — | Enables secure mode. |
|
||||
| tls | `bool` | `true` | — | Enables TLS mode. |
|
||||
|
||||
## [general.links]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| show | `"*" \| String[]` | `"*"` | — | Selects users whose tg:// links are shown at startup. |
|
||||
| public_host | `String \| null` | `null` | — | Public hostname/IP override for generated tg:// links. |
|
||||
| public_port | `u16 \| null` | `null` | — | Public port override for generated tg:// links. |
|
||||
|
||||
## [general.telemetry]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| core_enabled | `bool` | `true` | — | Enables core hot-path telemetry counters. |
|
||||
| user_enabled | `bool` | `true` | — | Enables per-user telemetry counters. |
|
||||
| me_level | `"silent" \| "normal" \| "debug"` | `"normal"` | — | Middle-End telemetry verbosity level. |
|
||||
|
||||
## [network]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ipv4 | `bool` | `true` | — | Enables IPv4 networking. |
|
||||
| ipv6 | `bool` | `false` | — | Enables/disables IPv6 when set |
|
||||
| prefer | `u8` | `4` | Must be `4` or `6`. | Preferred IP family for selection (`4` or `6`). |
|
||||
| multipath | `bool` | `false` | — | Enables multipath behavior where supported. |
|
||||
| stun_use | `bool` | `true` | none | Global STUN switch; when `false`, STUN probing path is disabled. |
|
||||
| stun_servers | `String[]` | Built-in STUN list (13 hosts) | Deduplicated; empty values are removed. | Primary STUN server list for NAT/public endpoint discovery. |
|
||||
| stun_tcp_fallback | `bool` | `true` | none | Enables TCP fallback for STUN when UDP path is blocked. |
|
||||
| http_ip_detect_urls | `String[]` | `["https://ifconfig.me/ip", "https://api.ipify.org"]` | none | HTTP fallback endpoints for public IP detection when STUN is unavailable. |
|
||||
| cache_public_ip_path | `String` | `"cache/public_ip.txt"` | — | File path for caching detected public IP. |
|
||||
| dns_overrides | `String[]` | `[]` | Must match `host:port:ip`; IPv6 must be bracketed. | Runtime DNS overrides in `host:port:ip` format. |
|
||||
|
||||
## [server]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| port | `u16` | `443` | — | Main proxy listen port. |
|
||||
| listen_addr_ipv4 | `String \| null` | `"0.0.0.0"` | — | IPv4 bind address for TCP listener. |
|
||||
| listen_addr_ipv6 | `String \| null` | `"::"` | — | IPv6 bind address for TCP listener. |
|
||||
| listen_unix_sock | `String \| null` | `null` | — | Unix socket path for listener. |
|
||||
| listen_unix_sock_perm | `String \| null` | `null` | — | Unix socket permissions in octal string (e.g., `"0666"`). |
|
||||
| listen_tcp | `bool \| null` | `null` (auto) | — | Explicit TCP listener enable/disable override. |
|
||||
| proxy_protocol | `bool` | `false` | — | Enables HAProxy PROXY protocol parsing on incoming client connections. |
|
||||
| proxy_protocol_header_timeout_ms | `u64` | `500` | Must be `> 0`. | Timeout for PROXY protocol header read/parse (ms). |
|
||||
| metrics_port | `u16 \| null` | `null` | — | Metrics endpoint port (enables metrics listener). |
|
||||
| metrics_listen | `String \| null` | `null` | — | Full metrics bind address (`IP:PORT`), overrides `metrics_port`. |
|
||||
| metrics_whitelist | `IpNetwork[]` | `["127.0.0.1/32", "::1/128"]` | — | CIDR whitelist for metrics endpoint access. |
|
||||
| max_connections | `u32` | `10000` | — | Max concurrent client connections (`0` = unlimited). |
|
||||
|
||||
## [server.api]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| enabled | `bool` | `true` | — | Enables control-plane REST API. |
|
||||
| listen | `String` | `"0.0.0.0:9091"` | Must be valid `IP:PORT`. | API bind address in `IP:PORT` format. |
|
||||
| whitelist | `IpNetwork[]` | `["127.0.0.0/8"]` | — | CIDR whitelist allowed to access API. |
|
||||
| auth_header | `String` | `""` | — | Exact expected `Authorization` header value (empty = disabled). |
|
||||
| request_body_limit_bytes | `usize` | `65536` | Must be `> 0`. | Maximum accepted HTTP request body size. |
|
||||
| minimal_runtime_enabled | `bool` | `true` | — | Enables minimal runtime snapshots endpoint logic. |
|
||||
| minimal_runtime_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for minimal runtime snapshots (ms; `0` disables cache). |
|
||||
| runtime_edge_enabled | `bool` | `false` | — | Enables runtime edge endpoints. |
|
||||
| runtime_edge_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for runtime edge aggregation payloads (ms). |
|
||||
| runtime_edge_top_n | `usize` | `10` | `1..=1000`. | Top-N size for edge connection leaderboard. |
|
||||
| runtime_edge_events_capacity | `usize` | `256` | `16..=4096`. | Ring-buffer capacity for runtime edge events. |
|
||||
| read_only | `bool` | `false` | — | Rejects mutating API endpoints when enabled. |
|
||||
|
||||
## [[server.listeners]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| ip | `IpAddr` | — | — | Listener bind IP. |
|
||||
| announce | `String \| null` | — | — | Public IP/domain announced in proxy links (priority over `announce_ip`). |
|
||||
| announce_ip | `IpAddr \| null` | — | — | Deprecated legacy announce IP (migrated to `announce` if needed). |
|
||||
| proxy_protocol | `bool \| null` | `null` | — | Per-listener override for PROXY protocol enable flag. |
|
||||
| reuse_allow | `bool` | `false` | — | Enables `SO_REUSEPORT` for multi-instance bind sharing. |
|
||||
|
||||
## [timeouts]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| client_handshake | `u64` | `30` | — | Client handshake timeout. |
|
||||
| tg_connect | `u64` | `10` | — | Upstream Telegram connect timeout. |
|
||||
| client_keepalive | `u64` | `15` | — | Client keepalive timeout. |
|
||||
| client_ack | `u64` | `90` | — | Client ACK timeout. |
|
||||
| me_one_retry | `u8` | `12` | none | Fast reconnect attempts budget for single-endpoint DC scenarios. |
|
||||
| me_one_timeout_ms | `u64` | `1200` | none | Timeout in milliseconds for each quick single-endpoint reconnect attempt. |
|
||||
|
||||
## [censorship]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| tls_domain | `String` | `"petrovich.ru"` | — | Primary TLS domain used in fake TLS handshake profile. |
|
||||
| tls_domains | `String[]` | `[]` | — | Additional TLS domains for generating multiple links. |
|
||||
| mask | `bool` | `true` | — | Enables masking/fronting relay mode. |
|
||||
| mask_host | `String \| null` | `null` | — | Upstream mask host for TLS fronting relay. |
|
||||
| mask_port | `u16` | `443` | — | Upstream mask port for TLS fronting relay. |
|
||||
| mask_unix_sock | `String \| null` | `null` | — | Unix socket path for mask backend instead of TCP host/port. |
|
||||
| fake_cert_len | `usize` | `2048` | — | Length of synthetic certificate payload when emulation data is unavailable. |
|
||||
| tls_emulation | `bool` | `true` | — | Enables certificate/TLS behavior emulation from cached real fronts. |
|
||||
| tls_front_dir | `String` | `"tlsfront"` | — | Directory path for TLS front cache storage. |
|
||||
| server_hello_delay_min_ms | `u64` | `0` | — | Minimum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| server_hello_delay_max_ms | `u64` | `0` | — | Maximum server_hello delay for anti-fingerprint behavior (ms). |
|
||||
| tls_new_session_tickets | `u8` | `0` | — | Number of `NewSessionTicket` messages to emit after handshake. |
|
||||
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
|
||||
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
|
||||
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
|
||||
| mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
|
||||
| mask_shape_hardening_aggressive_mode | `bool` | `false` | Requires `mask_shape_hardening = true`. | Opt-in aggressive shaping profile: allows shaping on backend-silent non-EOF paths and switches above-cap blur to strictly positive random tail. |
|
||||
| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
|
||||
| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
|
||||
| mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. |
|
||||
| mask_shape_above_cap_blur_max_bytes | `usize` | `512` | Must be `<= 1048576`; must be `> 0` when `mask_shape_above_cap_blur = true`. | Maximum randomized extra bytes appended above cap. |
|
||||
| mask_timing_normalization_enabled | `bool` | `false` | Requires `mask_timing_normalization_floor_ms > 0`; requires `ceiling >= floor`. | Enables timing envelope normalization on masking outcomes. |
|
||||
| mask_timing_normalization_floor_ms | `u64` | `0` | Must be `> 0` when timing normalization is enabled; must be `<= ceiling`. | Lower bound (ms) for masking outcome normalization target. |
|
||||
| mask_timing_normalization_ceiling_ms | `u64` | `0` | Must be `>= floor`; must be `<= 60000`. | Upper bound (ms) for masking outcome normalization target. |
|
||||
|
||||
### Shape-channel hardening notes (`[censorship]`)
|
||||
|
||||
These parameters are designed to reduce one specific fingerprint source during masking: the exact number of bytes sent from proxy to `mask_host` for invalid or probing traffic.
|
||||
|
||||
Without hardening, a censor can often correlate probe input length with backend-observed length very precisely (for example: `5 + body_sent` on early TLS reject paths). That creates a length-based classifier signal.
|
||||
|
||||
When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail to a bucket boundary at relay shutdown:
|
||||
|
||||
- Total bytes sent to mask are first measured.
|
||||
- A bucket is selected using powers of two starting from `mask_shape_bucket_floor_bytes`.
|
||||
- Padding is added only if total bytes are below `mask_shape_bucket_cap_bytes`.
|
||||
- If bytes already exceed cap, no extra padding is added.
|
||||
|
||||
This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
|
||||
|
||||
What each parameter changes in practice:
|
||||
|
||||
- `mask_shape_hardening`
|
||||
Enables or disables this entire length-shaping stage on the fallback path.
|
||||
When `false`, backend-observed length stays close to the real forwarded probe length.
|
||||
When `true`, clean relay shutdown can append random padding bytes to move the total into a bucket.
|
||||
|
||||
- `mask_shape_bucket_floor_bytes`
|
||||
Sets the first bucket boundary used for small probes.
|
||||
Example: with floor `512`, a malformed probe that would otherwise forward `37` bytes can be expanded to `512` bytes on clean EOF.
|
||||
Larger floor values hide very small probes better, but increase egress cost.
|
||||
|
||||
- `mask_shape_bucket_cap_bytes`
|
||||
Sets the largest bucket Telemt will pad up to with bucket logic.
|
||||
Example: with cap `4096`, a forwarded total of `1800` bytes may be padded to `2048` or `4096` depending on the bucket ladder, but a total already above `4096` will not be bucket-padded further.
|
||||
Larger cap values increase the range over which size classes are collapsed, but also increase worst-case overhead.
|
||||
|
||||
- Clean EOF matters in conservative mode
|
||||
In the default profile, shape padding is intentionally conservative: it is applied on clean relay shutdown, not on every timeout/drip path.
|
||||
This avoids introducing new timeout-tail artifacts that some backends or tests interpret as a separate fingerprint.
|
||||
|
||||
Practical trade-offs:
|
||||
|
||||
- Better anti-fingerprinting on size/shape channel.
|
||||
- Slightly higher egress overhead for small probes due to padding.
|
||||
- Behavior is intentionally conservative and enabled by default.
|
||||
|
||||
Recommended starting profile:
|
||||
|
||||
- `mask_shape_hardening = true` (default)
|
||||
- `mask_shape_bucket_floor_bytes = 512`
|
||||
- `mask_shape_bucket_cap_bytes = 4096`
|
||||
|
||||
### Aggressive mode notes (`[censorship]`)
|
||||
|
||||
`mask_shape_hardening_aggressive_mode` is an opt-in profile for higher anti-classifier pressure.
|
||||
|
||||
- Default is `false` to preserve conservative timeout/no-tail behavior.
|
||||
- Requires `mask_shape_hardening = true`.
|
||||
- When enabled, backend-silent non-EOF masking paths may be shaped.
|
||||
- When enabled together with above-cap blur, the random extra tail uses `[1, max]` instead of `[0, max]`.
|
||||
|
||||
What changes when aggressive mode is enabled:
|
||||
|
||||
- Backend-silent timeout paths can be shaped
|
||||
In default mode, a client that keeps the socket half-open and times out will usually not receive shape padding on that path.
|
||||
In aggressive mode, Telemt may still shape that backend-silent session if no backend bytes were returned.
|
||||
This is specifically aimed at active probes that try to avoid EOF in order to preserve an exact backend-observed length.
|
||||
|
||||
- Above-cap blur always adds at least one byte
|
||||
In default mode, above-cap blur may choose `0`, so some oversized probes still land on their exact base forwarded length.
|
||||
In aggressive mode, that exact-base sample is removed by construction.
|
||||
|
||||
- Tradeoff
|
||||
Aggressive mode improves resistance to active length classifiers, but it is more opinionated and less conservative.
|
||||
If your deployment prioritizes strict compatibility with timeout/no-tail semantics, leave it disabled.
|
||||
If your threat model includes repeated active probing by a censor, this mode is the stronger profile.
|
||||
|
||||
Use this mode only when your threat model prioritizes classifier resistance over strict compatibility with conservative masking semantics.
|
||||
|
||||
### Above-cap blur notes (`[censorship]`)
|
||||
|
||||
`mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`.
|
||||
|
||||
- A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended in default mode.
|
||||
- In aggressive mode, the random tail becomes strictly positive: `[1, mask_shape_above_cap_blur_max_bytes]`.
|
||||
- This reduces exact-size leakage above cap at bounded overhead.
|
||||
- Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth.
|
||||
|
||||
Operational meaning:
|
||||
|
||||
- Without above-cap blur
|
||||
A probe that forwards `5005` bytes will still look like `5005` bytes to the backend if it is already above cap.
|
||||
|
||||
- With above-cap blur enabled
|
||||
That same probe may look like any value in a bounded window above its base length.
|
||||
Example with `mask_shape_above_cap_blur_max_bytes = 64`:
|
||||
backend-observed size becomes `5005..5069` in default mode, or `5006..5069` in aggressive mode.
|
||||
|
||||
- Choosing `mask_shape_above_cap_blur_max_bytes`
|
||||
Small values reduce cost but preserve more separability between far-apart oversized classes.
|
||||
Larger values blur oversized classes more aggressively, but add more egress overhead and more output variance.
|
||||
|
||||
### Timing normalization envelope notes (`[censorship]`)
|
||||
|
||||
`mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope.
|
||||
|
||||
- A random target is selected in `[mask_timing_normalization_floor_ms, mask_timing_normalization_ceiling_ms]`.
|
||||
- Fast paths are delayed up to the selected target.
|
||||
- Slow paths are not forced to finish by the ceiling (the envelope is best-effort shaping, not truncation).
|
||||
|
||||
Recommended starting profile for timing shaping:
|
||||
|
||||
- `mask_timing_normalization_enabled = true`
|
||||
- `mask_timing_normalization_floor_ms = 180`
|
||||
- `mask_timing_normalization_ceiling_ms = 320`
|
||||
|
||||
If your backend or network is very bandwidth-constrained, reduce cap first. If probes are still too distinguishable in your environment, increase floor gradually.
|
||||
|
||||
## [access]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | TOML shape example | Description |
|
||||
|---|---|---|---|---|---|
|
||||
| users | `Map<String, String>` | `{"default": "000…000"}` | Secret must be 32 hex characters. | `[access.users]`<br>`user = "32-hex secret"`<br>`user2 = "32-hex secret"` | User credentials map used for client authentication. |
|
||||
| user_ad_tags | `Map<String, String>` | `{}` | Every value must be exactly 32 hex characters. | `[access.user_ad_tags]`<br>`user = "32-hex ad_tag"` | Per-user ad tags used as override over `general.ad_tag`. |
|
||||
| user_max_tcp_conns | `Map<String, usize>` | `{}` | — | `[access.user_max_tcp_conns]`<br>`user = 500` | Per-user maximum concurrent TCP connections. |
|
||||
| user_expirations | `Map<String, DateTime<Utc>>` | `{}` | Timestamp must be valid RFC3339/ISO-8601 datetime. | `[access.user_expirations]`<br>`user = "2026-12-31T23:59:59Z"` | Per-user account expiration timestamps. |
|
||||
| user_data_quota | `Map<String, u64>` | `{}` | — | `[access.user_data_quota]`<br>`user = 1073741824` | Per-user traffic quota in bytes. |
|
||||
| user_max_unique_ips | `Map<String, usize>` | `{}` | — | `[access.user_max_unique_ips]`<br>`user = 16` | Per-user unique source IP limits. |
|
||||
| user_max_unique_ips_global_each | `usize` | `0` | — | `user_max_unique_ips_global_each = 0` | Global fallback used when `[access.user_max_unique_ips]` has no per-user override. |
|
||||
| user_max_unique_ips_mode | `"active_window" \| "time_window" \| "combined"` | `"active_window"` | — | `user_max_unique_ips_mode = "active_window"` | Unique source IP limit accounting mode. |
|
||||
| user_max_unique_ips_window_secs | `u64` | `30` | Must be `> 0`. | `user_max_unique_ips_window_secs = 30` | Window size (seconds) used by unique-IP accounting modes that use time windows. |
|
||||
| replay_check_len | `usize` | `65536` | — | `replay_check_len = 65536` | Replay-protection storage length. |
|
||||
| replay_window_secs | `u64` | `1800` | — | `replay_window_secs = 1800` | Replay-protection window in seconds. |
|
||||
| ignore_time_skew | `bool` | `false` | — | `ignore_time_skew = false` | Disables client/server timestamp skew checks in replay validation when enabled. |
|
||||
|
||||
## [[upstreams]]
|
||||
|
||||
| Parameter | Type | Default | Constraints / validation | Description |
|
||||
|---|---|---|---|---|
|
||||
| type | `"direct" \| "socks4" \| "socks5"` | — | Required field. | Upstream transport type selector. |
|
||||
| weight | `u16` | `1` | none | Base weight used by weighted-random upstream selection. |
|
||||
| enabled | `bool` | `true` | none | Disabled entries are excluded from upstream selection at runtime. |
|
||||
| scopes | `String` | `""` | none | Comma-separated scope tags used for request-level upstream filtering. |
|
||||
| interface | `String \| null` | `null` | Optional; type-specific runtime rules apply. | Optional outbound interface/local bind hint (supported with type-specific rules). |
|
||||
| bind_addresses | `String[] \| null` | `null` | Applies to `type = "direct"`. | Optional explicit local source bind addresses for `type = "direct"`. |
|
||||
| address | `String` | — | Required for `type = "socks4"` and `type = "socks5"`. | SOCKS server endpoint (`host:port` or `ip:port`) for SOCKS upstream types. |
|
||||
| user_id | `String \| null` | `null` | Only for `type = "socks4"`. | SOCKS4 CONNECT user ID (`type = "socks4"` only). |
|
||||
| username | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 username (`type = "socks5"` only). |
|
||||
| password | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 password (`type = "socks5"` only). |
|
||||
@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Save the config. Ctrl+S -> Ctrl+X. You don't need to restart telemt.
|
||||
5. Get the links via `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
5. Get the links via
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## How to view metrics
|
||||
|
||||
@@ -80,6 +83,13 @@ To specify a domain in the links, add to the `[general.links]` section of the co
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Server connection limit
|
||||
Limits the total number of open connections to the server:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
To specify an upstream, add to the `[[upstreams]]` section of the config.toml file:
|
||||
#### Binding to IP
|
||||
@@ -110,3 +120,17 @@ password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks as Upstream
|
||||
Requires `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
@@ -55,7 +55,10 @@ user2 = "00000000000000000000000000000002"
|
||||
user3 = "00000000000000000000000000000003"
|
||||
```
|
||||
4. Сохранить конфиг. Ctrl+S -> Ctrl+X. Перезапускать telemt не нужно.
|
||||
5. Получить ссылки через `journalctl -u telemt -n -g "links" --no-pager -o cat | tac`
|
||||
5. Получить ссылки через
|
||||
```bash
|
||||
curl -s http://127.0.0.1:9091/v1/users | jq
|
||||
```
|
||||
|
||||
## Как посмотреть метрики
|
||||
|
||||
@@ -80,6 +83,13 @@ metrics_whitelist = ["127.0.0.1/32", "::1/128", "0.0.0.0/0"]
|
||||
public_host = "proxy.example.com"
|
||||
```
|
||||
|
||||
### Общий лимит подключений к серверу
|
||||
Ограничивает общее число открытых подключений к серверу:
|
||||
```toml
|
||||
[server]
|
||||
max_connections = 10000 # 0 - unlimited, 10000 - default
|
||||
```
|
||||
|
||||
### Upstream Manager
|
||||
Чтобы указать апстрим, добавьте в секцию `[[upstreams]]` файла config.toml:
|
||||
#### Привязка к IP
|
||||
@@ -110,3 +120,17 @@ password = "pass" # Password for Auth on SOCKS-server
|
||||
weight = 1 # Set Weight for Scenarios
|
||||
enabled = true
|
||||
```
|
||||
|
||||
#### Shadowsocks как Upstream
|
||||
Требует `use_middle_proxy = false`.
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
|
||||
weight = 1
|
||||
enabled = true
|
||||
```
|
||||
|
||||
92
docs/LICENSE/LICENSE.de.md
Normal file
92
docs/LICENSE/LICENSE.de.md
Normal file
@@ -0,0 +1,92 @@
|
||||
# Öffentliche TELEMT-Lizenz 3
|
||||
|
||||
***Alle Rechte vorbehalten (c) 2026 Telemt***
|
||||
|
||||
Hiermit wird jeder Person, die eine Kopie dieser Software und der dazugehörigen Dokumentation (nachfolgend "Software") erhält, unentgeltlich die Erlaubnis erteilt, die Software ohne Einschränkungen zu nutzen, einschließlich des Rechts, die Software zu verwenden, zu vervielfältigen, zu ändern, abgeleitete Werke zu erstellen, zu verbinden, zu veröffentlichen, zu verbreiten, zu unterlizenzieren und/oder Kopien der Software zu verkaufen sowie diese Rechte auch denjenigen einzuräumen, denen die Software zur Verfügung gestellt wird, vorausgesetzt, dass sämtliche Urheberrechtshinweise sowie die Bedingungen und Bestimmungen dieser Lizenz eingehalten werden.
|
||||
|
||||
### Begriffsbestimmungen
|
||||
|
||||
Für die Zwecke dieser Lizenz gelten die folgenden Definitionen:
|
||||
|
||||
**"Software" (Software)** — die Telemt-Software einschließlich Quellcode, Dokumentation und sämtlicher zugehöriger Dateien, die unter den Bedingungen dieser Lizenz verbreitet werden.
|
||||
|
||||
**"Contributor" (Contributor)** — jede natürliche oder juristische Person, die Code, Patches, Dokumentation oder andere Materialien eingereicht hat, die von den Maintainers des Projekts angenommen und in die Software aufgenommen wurden.
|
||||
|
||||
**"Beitrag" (Contribution)** — jedes urheberrechtlich geschützte Werk, das bewusst zur Aufnahme in die Software eingereicht wurde.
|
||||
|
||||
**"Modifizierte Version" (Modified Version)** — jede Version der Software, die gegenüber der ursprünglichen Software geändert, angepasst, erweitert oder anderweitig modifiziert wurde.
|
||||
|
||||
**"Maintainers" (Maintainers)** — natürliche oder juristische Personen, die für das offizielle Telemt-Projekt und dessen offizielle Veröffentlichungen verantwortlich sind.
|
||||
|
||||
### 1 Urheberrechtshinweis (Attribution)
|
||||
|
||||
Bei der Weitergabe der Software, sowohl in Form des Quellcodes als auch in binärer Form, MÜSSEN folgende Elemente erhalten bleiben:
|
||||
|
||||
- der oben genannte Urheberrechtshinweis;
|
||||
- der vollständige Text dieser Lizenz;
|
||||
- sämtliche bestehenden Hinweise auf Urheberschaft.
|
||||
|
||||
### 2 Hinweis auf Modifikationen
|
||||
|
||||
Wenn Änderungen an der Software vorgenommen werden, MUSS die Person, die diese Änderungen vorgenommen hat, eindeutig darauf hinweisen, dass die Software modifiziert wurde, und eine kurze Beschreibung der vorgenommenen Änderungen beifügen.
|
||||
|
||||
Modifizierte Versionen der Software DÜRFEN NICHT als die originale Version von Telemt dargestellt werden.
|
||||
|
||||
### 3 Marken und Bezeichnungen
|
||||
|
||||
Diese Lizenz GEWÄHRT KEINE Rechte zur Nutzung der Bezeichnung **"Telemt"**, des Telemt-Logos oder sonstiger Marken, Kennzeichen oder Branding-Elemente von Telemt.
|
||||
|
||||
Weiterverbreitete oder modifizierte Versionen der Software DÜRFEN die Bezeichnung Telemt nicht in einer Weise verwenden, die bei Nutzern den Eindruck eines offiziellen Ursprungs oder einer Billigung durch das Telemt-Projekt erwecken könnte, sofern hierfür keine ausdrückliche Genehmigung der Maintainers vorliegt.
|
||||
|
||||
Die Verwendung der Bezeichnung **Telemt** zur Beschreibung einer modifizierten Version der Software ist nur zulässig, wenn diese Version eindeutig als modifiziert oder inoffiziell gekennzeichnet ist.
|
||||
|
||||
Jegliche Verbreitung, die Nutzer vernünftigerweise darüber täuschen könnte, dass es sich um eine offizielle Veröffentlichung von Telemt handelt, ist untersagt.
|
||||
|
||||
### 4 Transparenz bei der Verbreitung von Binärversionen
|
||||
|
||||
Im Falle der Verbreitung kompilierter Binärversionen der Software wird der Verbreiter HIERMIT ERMUTIGT (encouraged), soweit dies vernünftigerweise möglich ist, Zugang zum entsprechenden Quellcode sowie zu den Build-Anweisungen bereitzustellen.
|
||||
|
||||
Diese Praxis trägt zur Transparenz bei und ermöglicht es Empfängern, die Integrität und Reproduzierbarkeit der verbreiteten Builds zu überprüfen.
|
||||
|
||||
## 5 Gewährung einer Patentlizenz und Beendigung von Rechten
|
||||
|
||||
Jeder Contributor gewährt den Empfängern der Software eine unbefristete, weltweite, nicht-exklusive, unentgeltliche, lizenzgebührenfreie und unwiderrufliche Patentlizenz für:
|
||||
|
||||
- die Herstellung,
|
||||
- die Beauftragung der Herstellung,
|
||||
- die Nutzung,
|
||||
- das Anbieten zum Verkauf,
|
||||
- den Verkauf,
|
||||
- den Import,
|
||||
- sowie jede sonstige Verbreitung der Software.
|
||||
|
||||
Diese Patentlizenz erstreckt sich ausschließlich auf solche Patentansprüche, die notwendigerweise durch den jeweiligen Beitrag des Contributors allein oder in Kombination mit der Software verletzt würden.
|
||||
|
||||
Leitet eine Person ein Patentverfahren ein oder beteiligt sich daran, einschließlich Gegenklagen oder Kreuzklagen, mit der Behauptung, dass die Software oder ein darin enthaltener Beitrag ein Patent verletzt, **erlöschen sämtliche durch diese Lizenz gewährten Rechte für diese Person unmittelbar mit Einreichung der Klage**.
|
||||
|
||||
Darüber hinaus erlöschen alle durch diese Lizenz gewährten Rechte **automatisch**, wenn eine Person ein gerichtliches Verfahren einleitet, in dem behauptet wird, dass die Software selbst ein Patent oder andere Rechte des geistigen Eigentums verletzt.
|
||||
|
||||
### 6 Beteiligung und Beiträge zur Entwicklung
|
||||
|
||||
Sofern ein Contributor nicht ausdrücklich etwas anderes erklärt, gilt jeder Beitrag, der bewusst zur Aufnahme in die Software eingereicht wird, als unter den Bedingungen dieser Lizenz lizenziert.
|
||||
|
||||
Durch die Einreichung eines Beitrags gewährt der Contributor den Maintainers des Telemt-Projekts sowie allen Empfängern der Software die in dieser Lizenz beschriebenen Rechte in Bezug auf diesen Beitrag.
|
||||
|
||||
### 7 Urheberhinweis bei Netzwerk- und Servicenutzung
|
||||
|
||||
Wird die Software zur Bereitstellung eines öffentlich zugänglichen Netzwerkdienstes verwendet, MUSS der Betreiber dieses Dienstes einen Hinweis auf die Urheberschaft von Telemt an mindestens einer der folgenden Stellen anbringen:
|
||||
|
||||
* in der Servicedokumentation;
|
||||
* in der Dienstbeschreibung;
|
||||
* auf einer Seite "Über" oder einer vergleichbaren Informationsseite;
|
||||
* in anderen für Nutzer zugänglichen Materialien, die in angemessenem Zusammenhang mit dem Dienst stehen.
|
||||
|
||||
Ein solcher Hinweis DARF NICHT den Eindruck erwecken, dass der Dienst vom Telemt-Projekt oder dessen Maintainers unterstützt oder offiziell gebilligt wird.
|
||||
|
||||
### 8 Haftungsausschluss und salvatorische Klausel
|
||||
|
||||
DIE SOFTWARE WIRD "WIE BESEHEN" BEREITGESTELLT, OHNE JEGLICHE AUSDRÜCKLICHE ODER STILLSCHWEIGENDE GEWÄHRLEISTUNG, EINSCHLIESSLICH, ABER NICHT BESCHRÄNKT AUF GEWÄHRLEISTUNGEN DER MARKTGÄNGIGKEIT, DER EIGNUNG FÜR EINEN BESTIMMTEN ZWECK UND DER NICHTVERLETZUNG VON RECHTEN.
|
||||
|
||||
IN KEINEM FALL HAFTEN DIE AUTOREN ODER RECHTEINHABER FÜR IRGENDWELCHE ANSPRÜCHE, SCHÄDEN ODER SONSTIGE HAFTUNG, DIE AUS VERTRAG, UNERLAUBTER HANDLUNG ODER AUF ANDERE WEISE AUS DER SOFTWARE ODER DER NUTZUNG DER SOFTWARE ENTSTEHEN.
|
||||
|
||||
SOLLTE EINE BESTIMMUNG DIESER LIZENZ ALS UNWIRKSAM ODER NICHT DURCHSETZBAR ANGESEHEN WERDEN, IST DIESE BESTIMMUNG SO AUSZULEGEN, DASS SIE DEM URSPRÜNGLICHEN WILLEN DER PARTEIEN MÖGLICHST NAHEKOMMT; DIE ÜBRIGEN BESTIMMUNGEN BLEIBEN DAVON UNBERÜHRT UND IN VOLLER WIRKUNG.
|
||||
143
docs/LICENSE/LICENSE.en.md
Normal file
143
docs/LICENSE/LICENSE.en.md
Normal file
@@ -0,0 +1,143 @@
|
||||
###### TELEMT Public License 3 ######
|
||||
##### Copyright (c) 2026 Telemt #####
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this Software and associated documentation files (the "Software"),
|
||||
to use, reproduce, modify, prepare derivative works of, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to permit
|
||||
persons to whom the Software is furnished to do so, provided that all
|
||||
copyright notices, license terms, and conditions set forth in this License
|
||||
are preserved and complied with.
|
||||
|
||||
### Official Translations
|
||||
|
||||
The canonical version of this License is the English version.
|
||||
|
||||
Official translations are provided for informational purposes only
|
||||
and for convenience, and do not have legal force. In case of any
|
||||
discrepancy, the English version of this License shall prevail.
|
||||
|
||||
Available versions:
|
||||
- English in Markdown: docs/LICENSE/LICENSE.md
|
||||
- German: docs/LICENSE/LICENSE.de.md
|
||||
- Russian: docs/LICENSE/LICENSE.ru.md
|
||||
|
||||
### Definitions
|
||||
|
||||
For the purposes of this License:
|
||||
|
||||
"Software" means the Telemt software, including source code, documentation,
|
||||
and any associated files distributed under this License.
|
||||
|
||||
"Contributor" means any person or entity that submits code, patches,
|
||||
documentation, or other contributions to the Software that are accepted
|
||||
into the Software by the maintainers.
|
||||
|
||||
"Contribution" means any work of authorship intentionally submitted
|
||||
to the Software for inclusion in the Software.
|
||||
|
||||
"Modified Version" means any version of the Software that has been
|
||||
changed, adapted, extended, or otherwise modified from the original
|
||||
Software.
|
||||
|
||||
"Maintainers" means the individuals or entities responsible for
|
||||
the official Telemt project and its releases.
|
||||
|
||||
#### 1 Attribution
|
||||
|
||||
Redistributions of the Software, in source or binary form, MUST RETAIN the
|
||||
above copyright notice, this license text, and any existing attribution
|
||||
notices.
|
||||
|
||||
#### 2 Modification Notice
|
||||
|
||||
If you modify the Software, you MUST clearly state that the Software has been
|
||||
modified and include a brief description of the changes made.
|
||||
|
||||
Modified versions MUST NOT be presented as the original Telemt.
|
||||
|
||||
#### 3 Trademark and Branding
|
||||
|
||||
This license DOES NOT grant permission to use the name "Telemt",
|
||||
the Telemt logo, or any Telemt trademarks or branding.
|
||||
|
||||
Redistributed or modified versions of the Software MAY NOT use the Telemt
|
||||
name in a way that suggests endorsement or official origin without explicit
|
||||
permission from the Telemt maintainers.
|
||||
|
||||
Use of the name "Telemt" to describe a modified version of the Software
|
||||
is permitted only if the modified version is clearly identified as a
|
||||
modified or unofficial version.
|
||||
|
||||
Any distribution that could reasonably confuse users into believing that
|
||||
the software is an official Telemt release is prohibited.
|
||||
|
||||
#### 4 Binary Distribution Transparency
|
||||
|
||||
If you distribute compiled binaries of the Software,
|
||||
you are ENCOURAGED to provide access to the corresponding
|
||||
source code and build instructions where reasonably possible.
|
||||
|
||||
This helps preserve transparency and allows recipients to verify the
|
||||
integrity and reproducibility of distributed builds.
|
||||
|
||||
#### 5 Patent Grant and Defensive Termination Clause
|
||||
|
||||
Each contributor grants you a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Software.
|
||||
|
||||
This patent license applies only to those patent claims necessarily
|
||||
infringed by the contributor’s contribution alone or by combination of
|
||||
their contribution with the Software.
|
||||
|
||||
If you initiate or participate in any patent litigation, including
|
||||
cross-claims or counterclaims, alleging that the Software or any
|
||||
contribution incorporated within the Software constitutes patent
|
||||
infringement, then **all rights granted to you under this license shall
|
||||
terminate immediately** as of the date such litigation is filed.
|
||||
|
||||
Additionally, if you initiate legal action alleging that the
|
||||
Software itself infringes your patent or other intellectual
|
||||
property rights, then all rights granted to you under this
|
||||
license SHALL TERMINATE automatically.
|
||||
|
||||
#### 6 Contributions
|
||||
|
||||
Unless you explicitly state otherwise, any Contribution intentionally
|
||||
submitted for inclusion in the Software shall be licensed under the terms
|
||||
of this License.
|
||||
|
||||
By submitting a Contribution, you grant the Telemt maintainers and all
|
||||
recipients of the Software the rights described in this License with
|
||||
respect to that Contribution.
|
||||
|
||||
#### 7 Network Use Attribution
|
||||
|
||||
If the Software is used to provide a publicly accessible network service,
|
||||
the operator of such service MUST provide attribution to Telemt in at least
|
||||
one of the following locations:
|
||||
|
||||
- service documentation
|
||||
- service description
|
||||
- an "About" or similar informational page
|
||||
- other user-visible materials reasonably associated with the service
|
||||
|
||||
Such attribution MUST NOT imply endorsement by the Telemt project or its
|
||||
maintainers.
|
||||
|
||||
#### 8 Disclaimer of Warranty and Severability Clause
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
|
||||
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
|
||||
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
|
||||
USE OR OTHER DEALINGS IN THE SOFTWARE
|
||||
|
||||
IF ANY PROVISION OF THIS LICENSE IS HELD TO BE INVALID OR UNENFORCEABLE,
|
||||
SUCH PROVISION SHALL BE INTERPRETED TO REFLECT THE ORIGINAL INTENT
|
||||
OF THE PARTIES AS CLOSELY AS POSSIBLE, AND THE REMAINING PROVISIONS
|
||||
SHALL REMAIN IN FULL FORCE AND EFFECT
|
||||
90
docs/LICENSE/LICENSE.ru.md
Normal file
90
docs/LICENSE/LICENSE.ru.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# Публичная лицензия TELEMT 3
|
||||
|
||||
***Все права защищёны (c) 2026 Telemt***
|
||||
|
||||
Настоящим любому лицу, получившему копию данного программного обеспечения и сопутствующей документации (далее — "Программное обеспечение"), безвозмездно предоставляется разрешение использовать Программное обеспечение без ограничений, включая право использовать, воспроизводить, изменять, создавать производные произведения, объединять, публиковать, распространять, сублицензировать и (или) продавать копии Программного обеспечения, а также предоставлять такие права лицам, которым предоставляется Программное обеспечение, при условии соблюдения всех уведомлений об авторских правах, условий и положений настоящей Лицензии.
|
||||
|
||||
### Определения
|
||||
|
||||
Для целей настоящей Лицензии применяются следующие определения:
|
||||
|
||||
**"Программное обеспечение" (Software)** — программное обеспечение Telemt, включая исходный код, документацию и любые связанные файлы, распространяемые на условиях настоящей Лицензии.
|
||||
|
||||
**"Контрибьютор" (Contributor)** — любое физическое или юридическое лицо, направившее код, исправления (патчи), документацию или иные материалы, которые были приняты мейнтейнерами проекта и включены в состав Программного обеспечения.
|
||||
|
||||
**"Вклад" (Contribution)** — любое произведение авторского права, намеренно представленное для включения в состав Программного обеспечения.
|
||||
|
||||
**"Модифицированная версия" (Modified Version)** — любая версия Программного обеспечения, которая была изменена, адаптирована, расширена или иным образом модифицирована по сравнению с исходным Программным обеспечением.
|
||||
|
||||
**"Мейнтейнеры" (Maintainers)** — физические или юридические лица, ответственные за официальный проект Telemt и его официальные релизы.
|
||||
|
||||
### 1 Указание авторства
|
||||
|
||||
При распространении Программного обеспечения, как в форме исходного кода, так и в бинарной форме, ДОЛЖНЫ СОХРАНЯТЬСЯ:
|
||||
|
||||
- указанное выше уведомление об авторских правах;
|
||||
- текст настоящей Лицензии;
|
||||
- любые существующие уведомления об авторстве.
|
||||
|
||||
### 2 Уведомление о модификации
|
||||
|
||||
В случае внесения изменений в Программное обеспечение лицо, осуществившее такие изменения, ОБЯЗАНО явно указать, что Программное обеспечение было модифицировано, а также включить краткое описание внесённых изменений.
|
||||
|
||||
Модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ представляться как оригинальная версия Telemt.
|
||||
|
||||
### 3 Товарные знаки и обозначения
|
||||
|
||||
Настоящая Лицензия НЕ ПРЕДОСТАВЛЯЕТ права использовать наименование **"Telemt"**, логотип Telemt, а также любые товарные знаки, фирменные обозначения или элементы бренда Telemt.
|
||||
|
||||
Распространяемые или модифицированные версии Программного обеспечения НЕ ДОЛЖНЫ использовать наименование Telemt таким образом, который может создавать у пользователей впечатление официального происхождения либо одобрения со стороны проекта Telemt без явного разрешения мейнтейнеров проекта.
|
||||
|
||||
Использование наименования **Telemt** для описания модифицированной версии Программного обеспечения допускается только при условии, что такая версия ясно обозначена как модифицированная или неофициальная.
|
||||
|
||||
Запрещается любое распространение, которое может разумно вводить пользователей в заблуждение относительно того, что программное обеспечение является официальным релизом Telemt.
|
||||
|
||||
### 4 Прозрачность распространения бинарных версий
|
||||
|
||||
В случае распространения скомпилированных бинарных версий Программного обеспечения распространитель НАСТОЯЩИМ ПОБУЖДАЕТСЯ предоставлять доступ к соответствующему исходному коду и инструкциям по сборке, если это разумно возможно.
|
||||
|
||||
Такая практика способствует прозрачности распространения и позволяет получателям проверять целостность и воспроизводимость распространяемых сборок.
|
||||
|
||||
### 5 Предоставление патентной лицензии и прекращение прав
|
||||
|
||||
Каждый контрибьютор предоставляет получателям Программного обеспечения бессрочную, всемирную, неисключительную, безвозмездную, не требующую выплаты роялти и безотзывную патентную лицензию на:
|
||||
|
||||
- изготовление,
|
||||
- поручение изготовления,
|
||||
- использование,
|
||||
- предложение к продаже,
|
||||
- продажу,
|
||||
- импорт,
|
||||
- и иное распространение Программного обеспечения.
|
||||
|
||||
Такая патентная лицензия распространяется исключительно на те патентные требования, которые неизбежно нарушаются соответствующим вкладом контрибьютора как таковым либо его сочетанием с Программным обеспечением.
|
||||
|
||||
Если лицо инициирует либо участвует в каком-либо судебном разбирательстве по патентному спору, включая встречные или перекрёстные иски, утверждая, что Программное обеспечение либо любой вклад, включённый в него, нарушает патент, **все права, предоставленные такому лицу настоящей Лицензией, немедленно прекращаются** с даты подачи соответствующего иска.
|
||||
|
||||
Кроме того, если лицо инициирует судебное разбирательство, утверждая, что само Программное обеспечение нарушает его патентные либо иные права интеллектуальной собственности, все права, предоставленные настоящей Лицензией, **автоматически прекращаются**.
|
||||
|
||||
### 6 Участие и вклад в разработку
|
||||
|
||||
Если контрибьютор явно не указал иное, любой Вклад, намеренно представленный для включения в Программное обеспечение, считается лицензированным на условиях настоящей Лицензии.
|
||||
Путём предоставления Вклада контрибьютор предоставляет мейнтейнером проекта Telemt и всем получателям Программного обеспечения права, предусмотренные настоящей Лицензией, в отношении такого Вклада.
|
||||
|
||||
### 7 Указание авторства при сетевом и сервисном использовании
|
||||
|
||||
В случае использования Программного обеспечения для предоставления публично доступного сетевого сервиса оператор такого сервиса ОБЯЗАН обеспечить указание авторства Telemt как минимум в одном из следующих мест:
|
||||
- документация сервиса;
|
||||
- описание сервиса;
|
||||
- страница "О программе" или аналогичная информационная страница;
|
||||
- иные материалы, доступные пользователям и разумно связанные с данным сервисом.
|
||||
|
||||
Такое указание авторства НЕ ДОЛЖНО создавать впечатление одобрения или официальной поддержки со стороны проекта Telemt либо его мейнтейнеров.
|
||||
|
||||
### 8 Отказ от гарантий и делимость положений
|
||||
|
||||
ПРОГРАММНОЕ ОБЕСПЕЧЕНИЕ ПРЕДОСТАВЛЯЕТСЯ "КАК ЕСТЬ", БЕЗ КАКИХ-ЛИБО ГАРАНТИЙ, ЯВНЫХ ИЛИ ПОДРАЗУМЕВАЕМЫХ, ВКЛЮЧАЯ, НО НЕ ОГРАНИЧИВАЯСЬ ГАРАНТИЯМИ КОММЕРЧЕСКОЙ ПРИГОДНОСТИ, ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ И НЕНАРУШЕНИЯ ПРАВ.
|
||||
|
||||
НИ ПРИ КАКИХ ОБСТОЯТЕЛЬСТВАХ АВТОРЫ ИЛИ ПРАВООБЛАДАТЕЛИ НЕ НЕСУТ ОТВЕТСТВЕННОСТИ ПО КАКИМ-ЛИБО ТРЕБОВАНИЯМ, УБЫТКАМ ИЛИ ИНОЙ ОТВЕТСТВЕННОСТИ, ВОЗНИКАЮЩЕЙ В РЕЗУЛЬТАТЕ ДОГОВОРА, ДЕЛИКТА ИЛИ ИНЫМ ОБРАЗОМ, СВЯЗАННЫМ С ПРОГРАММНЫМ ОБЕСПЕЧЕНИЕМ ИЛИ ЕГО ИСПОЛЬЗОВАНИЕМ.
|
||||
|
||||
В СЛУЧАЕ ЕСЛИ КАКОЕ-ЛИБО ПОЛОЖЕНИЕ НАСТОЯЩЕЙ ЛИЦЕНЗИИ ПРИЗНАЁТСЯ НЕДЕЙСТВИТЕЛЬНЫМ ИЛИ НЕПРИМЕНИМЫМ, ТАКОЕ ПОЛОЖЕНИЕ ПОДЛЕЖИТ ТОЛКОВАНИЮ МАКСИМАЛЬНО БЛИЗКО К ИСХОДНОМУ НАМЕРЕНИЮ СТОРОН, ПРИ ЭТОМ ОСТАЛЬНЫЕ ПОЛОЖЕНИЯ СОХРАНЯЮТ ПОЛНУЮ ЮРИДИЧЕСКУЮ СИЛУ.
|
||||
@@ -72,6 +72,9 @@ classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
@@ -178,6 +181,8 @@ docker compose down
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
||||
@@ -72,6 +72,9 @@ classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
# listen = "127.0.0.1:9091"
|
||||
@@ -175,11 +178,13 @@ docker compose down
|
||||
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
|
||||
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
|
||||
|
||||
**Запуск в Docker Compose**
|
||||
**Запуск без Docker Compose**
|
||||
```bash
|
||||
docker build -t telemt:local .
|
||||
docker run --name telemt --restart unless-stopped \
|
||||
-p 443:443 \
|
||||
-p 9090:9090 \
|
||||
-p 9091:9091 \
|
||||
-e RUST_LOG=info \
|
||||
-v "$PWD/config.toml:/app/config.toml:ro" \
|
||||
--read-only \
|
||||
|
||||
@@ -82,7 +82,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
|
||||
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | ja | n/a | Upstream-Transporttyp. |
|
||||
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
|
||||
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
|
||||
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
|
||||
@@ -95,6 +95,8 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
|
||||
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
|
||||
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
|
||||
| `url` | `shadowsocks` | `String` | ja | n/a | Shadowsocks-SIP002-URL (`ss://...`). In Runtime-APIs wird nur `host:port` offengelegt. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | nein | `null` | Optionales ausgehendes Bind-Interface oder lokale Literal-IP. |
|
||||
|
||||
### Runtime-Regeln (wichtig)
|
||||
|
||||
@@ -115,6 +117,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
|
||||
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
|
||||
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
|
||||
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
|
||||
11. `shadowsocks`-Upstreams erfordern `general.use_middle_proxy = false`. Mit aktiviertem ME-Modus schlägt das Laden der Config sofort fehl.
|
||||
|
||||
## Upstream-Konfigurationsbeispiele
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 4: Gemischte Upstreams mit Scopes
|
||||
### Beispiel 4: Shadowsocks-Upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Beispiel 5: Gemischte Upstreams mit Scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
@@ -82,7 +82,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
|
||||
| Field | Applies to | Type | Required | Default | Meaning |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | yes | n/a | Upstream transport type. |
|
||||
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
|
||||
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
|
||||
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
|
||||
@@ -95,6 +95,8 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
|
||||
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
|
||||
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
|
||||
| `url` | `shadowsocks` | `String` | yes | n/a | Shadowsocks SIP002 URL (`ss://...`). Only `host:port` is exposed in runtime APIs. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | no | `null` | Optional outgoing bind interface or literal local IP. |
|
||||
|
||||
### Runtime rules (important)
|
||||
|
||||
@@ -115,6 +117,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
|
||||
8. In ME mode, the selected upstream is also used for ME TCP dial path.
|
||||
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
|
||||
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
|
||||
11. `shadowsocks` upstreams require `general.use_middle_proxy = false`. Config load fails fast if ME mode is enabled.
|
||||
|
||||
## Upstream Configuration Examples
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 4: Mixed upstreams with scopes
|
||||
### Example 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Example 5: Mixed upstreams with scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
@@ -82,7 +82,7 @@
|
||||
|
||||
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|
||||
|---|---|---|---|---|---|
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | да | n/a | Тип upstream транспорта. |
|
||||
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
|
||||
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
|
||||
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
|
||||
@@ -95,6 +95,8 @@
|
||||
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
|
||||
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
|
||||
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
|
||||
| `url` | `shadowsocks` | `String` | да | n/a | Shadowsocks SIP002 URL (`ss://...`). В runtime API раскрывается только `host:port`. |
|
||||
| `interface` | `shadowsocks` | `Option<String>` | нет | `null` | Необязательный исходящий bind-интерфейс или literal локальный IP. |
|
||||
|
||||
### Runtime-правила
|
||||
|
||||
@@ -115,6 +117,7 @@
|
||||
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
|
||||
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
|
||||
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
|
||||
11. `shadowsocks` upstream требует `general.use_middle_proxy = false`. При включенном ME-режиме конфиг отклоняется при загрузке.
|
||||
|
||||
## Примеры конфигурации Upstreams
|
||||
|
||||
@@ -150,7 +153,20 @@ weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 4: смешанные upstream с scopes
|
||||
### Пример 4: Shadowsocks upstream
|
||||
|
||||
```toml
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
|
||||
weight = 2
|
||||
enabled = true
|
||||
```
|
||||
|
||||
### Пример 5: смешанные upstream с scopes
|
||||
|
||||
```toml
|
||||
[[upstreams]]
|
||||
|
||||
278
docs/fronting-splitting/TLS-F-TCP-S.ru.md
Normal file
278
docs/fronting-splitting/TLS-F-TCP-S.ru.md
Normal file
@@ -0,0 +1,278 @@
|
||||
# TLS-F и TCP-S в Telemt
|
||||
|
||||
## Общая архитектура
|
||||
|
||||
**Telemt** - это прежде всего реализация **MTProxy**, через которую проходит payload Telegram
|
||||
|
||||
Подсистема **TLS-Fronting / TCP-Splitting** служит **маскировочным транспортным слоем**, задача которого - сделать MTProxy-соединение внешне похожим на обычное TLS-подключение к легитимному сайту
|
||||
|
||||
Таким образом:
|
||||
|
||||
- **MTProxy** - основной функциональный слой Telemt для обработки Telegram-трафика
|
||||
- **TLS-Fronting / TCP-Splitting** - подсистема маскировки транспорта
|
||||
|
||||
С точки зрения сети Telemt ведёт себя как **TLS-сервер**, но фактически:
|
||||
|
||||
- валидные MTProxy-клиенты остаются внутри контура Telemt
|
||||
- любые другие TLS-клиенты проксируются на обычный HTTPS-сервер-заглушку
|
||||
|
||||
# Базовый сценарий / Best-practice
|
||||
|
||||
Предположим, у вас есть домен:
|
||||
|
||||
```
|
||||
umweltschutz.de
|
||||
```
|
||||
|
||||
### 1 DNS
|
||||
|
||||
Вы создаёте A-запись:
|
||||
|
||||
```
|
||||
umweltschutz.de -> A-запись 198.18.88.88
|
||||
```
|
||||
|
||||
где `198.18.88.88` - IP вашего сервера с telemt
|
||||
|
||||
### 2 TLS-домен
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "umweltschutz.de"
|
||||
```
|
||||
|
||||
Этот домен используется клиентом как SNI в ClientHello
|
||||
|
||||
### 3 Сервер-заглушка
|
||||
|
||||
Вы поднимаете обычный HTTPS-сервер, например **nginx**, с сертификатом для этого домена.
|
||||
|
||||
Он может работать:
|
||||
|
||||
- на том же сервере
|
||||
- на другом сервере
|
||||
- на другом порту
|
||||
|
||||
В конфигурации Telemt:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "127.0.0.1"
|
||||
mask_port = 8443
|
||||
```
|
||||
|
||||
где `127.0.0.1` - IP сервера-заглушки, а 8443 - порт, который он слушает
|
||||
|
||||
Этот сервер нужен **для обработки любых non-MTProxy запросов**
|
||||
|
||||
### 4 Работа Telemt
|
||||
|
||||
После запуска Telemt действует следующим образом:
|
||||
|
||||
1) принимает входящее TCP-соединение
|
||||
2) анализирует TLS-ClientHello
|
||||
3) пытается определить, является ли соединение валидным **MTProxy FakeTLS**
|
||||
|
||||
Далее работают два варианта логики:
|
||||
|
||||
---
|
||||
|
||||
# Сценарий 1 - MTProxy клиент с валидным ключом
|
||||
|
||||
Если клиент предъявил **валидный MTProxy-ключ**:
|
||||
|
||||
- соединение **остаётся внутри Telemt**
|
||||
- TLS используется только как **транспортная маскировка**
|
||||
- далее запускается обычная логика **MTProxy**
|
||||
|
||||
Для внешнего наблюдателя это выглядит как:
|
||||
|
||||
```
|
||||
TLS connection -> umweltschutz.de
|
||||
```
|
||||
|
||||
Хотя внутри передаётся **MTProto-трафик Telegram**
|
||||
|
||||
# Сценарий 2 - обычный TLS-клиент - crawler / scanner / browser
|
||||
|
||||
Если Telemt не обнаруживает валидный MTProxy-ключ:
|
||||
|
||||
соединение **переключается в режим TCP-Splitting / TCP-Splicing**.
|
||||
|
||||
В этом режиме Telemt:
|
||||
|
||||
1. открывает новое TCP-соединение к
|
||||
|
||||
```
|
||||
mask_host:mask_port
|
||||
```
|
||||
|
||||
2. начинает **проксировать TCP-трафик**
|
||||
|
||||
Важно:
|
||||
|
||||
* клиентский TLS-запрос **НЕ модифицируется**
|
||||
* **ClientHello передаётся "как есть", без изменений**
|
||||
* **SNI остаётся неизменным**
|
||||
* Telemt **не завершает TLS-рукопожатие**, а только перенаправляет его на более низком уровне сетевого стека - L4
|
||||
|
||||
Таким образом upstream-сервер получает **оригинальное TLS-соединение клиента**:
|
||||
|
||||
- если это nginx-заглушка, он просто отдаёт обычный сайт
|
||||
- для внешнего наблюдателя это выглядит как обычный HTTPS-сервер
|
||||
|
||||
# TCP-S / TCP-Splitting / TCP-Splicing
|
||||
|
||||
Ключевые свойства механизма:
|
||||
|
||||
**Telemt работает как TCP-переключатель:**
|
||||
|
||||
1) принимает соединение
|
||||
2️) определяет тип клиента
|
||||
3) либо:
|
||||
|
||||
- обрабатывает MTProxy внутри
|
||||
- либо проксирует TCP-поток
|
||||
|
||||
При проксировании:
|
||||
|
||||
- Telemt **разрешает `mask_host` в IP**
|
||||
- устанавливает TCP-соединение
|
||||
- начинает **bidirectional TCP relay**
|
||||
|
||||
При этом:
|
||||
|
||||
- TLS-рукопожатие происходит **между клиентом и `mask_host`**
|
||||
- Telemt выступает только **на уровне L4 - как TCP-релей**, такой же как HAProxy в TCP-режиме
|
||||
|
||||
# Использование чужого домена
|
||||
|
||||
Можно использовать и внешний сайт.
|
||||
|
||||
Например:
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
tls_domain = "github.com"
|
||||
mask_host = "github.com"
|
||||
mask_port = 443
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```toml
|
||||
[censorship]
|
||||
mask_host = "140.82.121.4"
|
||||
```
|
||||
|
||||
В этом случае:
|
||||
|
||||
- цензор видит **TLS-подключение к github.com**
|
||||
- обычные клиенты/краулер действительно получают **настоящий GitHub**
|
||||
|
||||
Telemt просто **проксирует TCP-соединение на GitHub**
|
||||
|
||||
# Что видит анализатор трафика?
|
||||
|
||||
Для DPI это выглядит так:
|
||||
|
||||
```
|
||||
client -> TLS -> github.com
|
||||
```
|
||||
|
||||
или
|
||||
|
||||
```
|
||||
client -> TLS -> umweltschutz.de
|
||||
```
|
||||
|
||||
TLS-handshake выглядит валидным, SNI соответствует домену, сертификат корректный - от целевого `mask_host:mask_port`
|
||||
|
||||
# Что видит сканер / краулер?
|
||||
|
||||
Если сканер попытается подключиться:
|
||||
|
||||
```
|
||||
openssl s_client -connect 198.18.88.88:443 -servername umweltschutz.de
|
||||
```
|
||||
|
||||
он получит **обычный HTTPS-сайт-заглушку**
|
||||
|
||||
Потому что:
|
||||
|
||||
- он не предъявил MTProxy-ключ
|
||||
- Telemt отправил соединение на `mask_host:mask_port`, на котором находится nginx
|
||||
|
||||
# Какую проблему решает TLS-Fronting / TCP-Splitting?
|
||||
|
||||
Эта архитектура решает сразу несколько проблем обхода цензуры.
|
||||
|
||||
## 1 Закрытие плоскости MTProxy от активного сканирования
|
||||
|
||||
Многие цензоры:
|
||||
|
||||
- сканируют IP-адреса
|
||||
- проверяют известные сигнатуры прокси
|
||||
|
||||
Telemt отвечает на такие проверки **обычным HTTPS-сайтом**, поэтому прокси невозможно обнаружить простым сканированием
|
||||
|
||||
---
|
||||
|
||||
## 2 Маскировка трафика под легитимный TLS
|
||||
|
||||
Для DPI-систем соединение выглядит как:
|
||||
|
||||
```
|
||||
обычный TLS-трафик к популярному домену
|
||||
```
|
||||
|
||||
Это делает блокировку значительно сложнее и непредсказуемее
|
||||
|
||||
---
|
||||
|
||||
## 3 Устойчивость к протокольному анализу
|
||||
|
||||
MTProxy трафик проходит **внутри TLS-like-потока**, поэтому:
|
||||
|
||||
- не видны характерные сигнатуры MTProto
|
||||
- соединение выглядит как обычный HTTPS
|
||||
|
||||
---
|
||||
|
||||
## 4 Правдоподобное поведение сервера
|
||||
|
||||
Даже если краулер:
|
||||
|
||||
- подключится сам
|
||||
- выполнит TLS-handshake
|
||||
- попытается получить HTTP-ответ
|
||||
|
||||
он увидит **реальный сайт**, а не telemt
|
||||
|
||||
Это устраняет один из главных признаков для антифрод-краулеров мобильных операторов
|
||||
|
||||
# Схема
|
||||
|
||||
```text
|
||||
Client
|
||||
│
|
||||
│ TCP
|
||||
│
|
||||
V
|
||||
Telemt
|
||||
│
|
||||
├── valid MTProxy key
|
||||
│ │
|
||||
│ V
|
||||
│ MTProxy logic
|
||||
│
|
||||
└── обычный TLS клиент
|
||||
│
|
||||
V
|
||||
TCP-Splitting
|
||||
│
|
||||
V
|
||||
mask_host:mask_port
|
||||
```
|
||||
BIN
docs/model/FakeTLS.png
Normal file
BIN
docs/model/FakeTLS.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 650 KiB |
BIN
docs/model/architecture.png
Normal file
BIN
docs/model/architecture.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 838 KiB |
595
install.sh
595
install.sh
@@ -3,113 +3,554 @@ set -eu
|
||||
|
||||
REPO="${REPO:-telemt/telemt}"
|
||||
BIN_NAME="${BIN_NAME:-telemt}"
|
||||
VERSION="${1:-${VERSION:-latest}}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
|
||||
INSTALL_DIR="${INSTALL_DIR:-/bin}"
|
||||
CONFIG_DIR="${CONFIG_DIR:-/etc/telemt}"
|
||||
CONFIG_FILE="${CONFIG_FILE:-${CONFIG_DIR}/telemt.toml}"
|
||||
WORK_DIR="${WORK_DIR:-/opt/telemt}"
|
||||
TLS_DOMAIN="${TLS_DOMAIN:-petrovich.ru}"
|
||||
SERVICE_NAME="telemt"
|
||||
TEMP_DIR=""
|
||||
SUDO=""
|
||||
CONFIG_PARENT_DIR=""
|
||||
SERVICE_START_FAILED=0
|
||||
|
||||
ACTION="install"
|
||||
TARGET_VERSION="${VERSION:-latest}"
|
||||
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
-h|--help) ACTION="help"; shift ;;
|
||||
uninstall|--uninstall)
|
||||
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
|
||||
shift ;;
|
||||
purge|--purge) ACTION="purge"; shift ;;
|
||||
install|--install) ACTION="install"; shift ;;
|
||||
-*) printf '[ERROR] Unknown option: %s\n' "$1" >&2; exit 1 ;;
|
||||
*)
|
||||
if [ "$ACTION" = "install" ]; then TARGET_VERSION="$1"
|
||||
else printf '[WARNING] Ignoring extra argument: %s\n' "$1" >&2; fi
|
||||
shift ;;
|
||||
esac
|
||||
done
|
||||
|
||||
say() {
|
||||
printf '%s\n' "$*"
|
||||
if [ "$#" -eq 0 ] || [ -z "${1:-}" ]; then
|
||||
printf '\n'
|
||||
else
|
||||
printf '[INFO] %s\n' "$*"
|
||||
fi
|
||||
}
|
||||
die() { printf '[ERROR] %s\n' "$*" >&2; exit 1; }
|
||||
|
||||
write_root() { $SUDO sh -c 'cat > "$1"' _ "$1"; }
|
||||
|
||||
cleanup() {
|
||||
if [ -n "${TEMP_DIR:-}" ] && [ -d "$TEMP_DIR" ]; then
|
||||
rm -rf -- "$TEMP_DIR"
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT INT TERM
|
||||
|
||||
show_help() {
|
||||
say "Usage: $0 [ <version> | install | uninstall | purge | --help ]"
|
||||
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
|
||||
say " install Install the latest version"
|
||||
say " uninstall Remove the binary and service (keeps config and user)"
|
||||
say " purge Remove everything including configuration, data, and user"
|
||||
exit 0
|
||||
}
|
||||
|
||||
die() {
|
||||
printf 'Error: %s\n' "$*" >&2
|
||||
exit 1
|
||||
check_os_entity() {
|
||||
if command -v getent >/dev/null 2>&1; then getent "$1" "$2" >/dev/null 2>&1
|
||||
else grep -q "^${2}:" "/etc/$1" 2>/dev/null; fi
|
||||
}
|
||||
|
||||
need_cmd() {
|
||||
command -v "$1" >/dev/null 2>&1 || die "required command not found: $1"
|
||||
normalize_path() {
|
||||
printf '%s\n' "$1" | tr -s '/' | sed 's|/$||; s|^$|/|'
|
||||
}
|
||||
|
||||
detect_os() {
|
||||
os="$(uname -s)"
|
||||
case "$os" in
|
||||
Linux) printf 'linux\n' ;;
|
||||
OpenBSD) printf 'openbsd\n' ;;
|
||||
*) printf '%s\n' "$os" ;;
|
||||
get_realpath() {
|
||||
path_in="$1"
|
||||
case "$path_in" in /*) ;; *) path_in="$(pwd)/$path_in" ;; esac
|
||||
|
||||
if command -v realpath >/dev/null 2>&1; then
|
||||
if realpath_out="$(realpath -m "$path_in" 2>/dev/null)"; then
|
||||
printf '%s\n' "$realpath_out"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
if command -v readlink >/dev/null 2>&1; then
|
||||
resolved_path="$(readlink -f "$path_in" 2>/dev/null || true)"
|
||||
if [ -n "$resolved_path" ]; then
|
||||
printf '%s\n' "$resolved_path"
|
||||
return
|
||||
fi
|
||||
fi
|
||||
|
||||
d="${path_in%/*}"; b="${path_in##*/}"
|
||||
if [ -z "$d" ]; then d="/"; fi
|
||||
if [ "$d" = "$path_in" ]; then d="/"; b="$path_in"; fi
|
||||
|
||||
if [ -d "$d" ]; then
|
||||
abs_d="$(cd "$d" >/dev/null 2>&1 && pwd || true)"
|
||||
if [ -n "$abs_d" ]; then
|
||||
if [ "$b" = "." ] || [ -z "$b" ]; then printf '%s\n' "$abs_d"
|
||||
elif [ "$abs_d" = "/" ]; then printf '/%s\n' "$b"
|
||||
else printf '%s/%s\n' "$abs_d" "$b"; fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
else
|
||||
normalize_path "$path_in"
|
||||
fi
|
||||
}
|
||||
|
||||
get_svc_mgr() {
|
||||
if command -v systemctl >/dev/null 2>&1 && [ -d /run/systemd/system ]; then echo "systemd"
|
||||
elif command -v rc-service >/dev/null 2>&1; then echo "openrc"
|
||||
else echo "none"; fi
|
||||
}
|
||||
|
||||
verify_common() {
|
||||
[ -n "$BIN_NAME" ] || die "BIN_NAME cannot be empty."
|
||||
[ -n "$INSTALL_DIR" ] || die "INSTALL_DIR cannot be empty."
|
||||
[ -n "$CONFIG_DIR" ] || die "CONFIG_DIR cannot be empty."
|
||||
[ -n "$CONFIG_FILE" ] || die "CONFIG_FILE cannot be empty."
|
||||
|
||||
case "${INSTALL_DIR}${CONFIG_DIR}${WORK_DIR}${CONFIG_FILE}" in
|
||||
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths. Only alphanumeric, _, ., -, and / allowed." ;;
|
||||
esac
|
||||
|
||||
case "$TARGET_VERSION" in *[!a-zA-Z0-9_.-]*) die "Invalid characters in version." ;; esac
|
||||
case "$BIN_NAME" in *[!a-zA-Z0-9_-]*) die "Invalid characters in BIN_NAME." ;; esac
|
||||
|
||||
INSTALL_DIR="$(get_realpath "$INSTALL_DIR")"
|
||||
CONFIG_DIR="$(get_realpath "$CONFIG_DIR")"
|
||||
WORK_DIR="$(get_realpath "$WORK_DIR")"
|
||||
CONFIG_FILE="$(get_realpath "$CONFIG_FILE")"
|
||||
|
||||
CONFIG_PARENT_DIR="${CONFIG_FILE%/*}"
|
||||
if [ -z "$CONFIG_PARENT_DIR" ]; then CONFIG_PARENT_DIR="/"; fi
|
||||
if [ "$CONFIG_PARENT_DIR" = "$CONFIG_FILE" ]; then CONFIG_PARENT_DIR="."; fi
|
||||
|
||||
if [ "$(id -u)" -eq 0 ]; then
|
||||
SUDO=""
|
||||
else
|
||||
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo. Neither found."
|
||||
SUDO="sudo"
|
||||
if ! sudo -n true 2>/dev/null; then
|
||||
if ! [ -t 0 ]; then
|
||||
die "sudo requires a password, but no TTY detected. Aborting to prevent hang."
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -d "$1" ]' _ "$CONFIG_FILE"; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
elif [ -d "$CONFIG_FILE" ]; then
|
||||
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
|
||||
fi
|
||||
|
||||
for path in "$CONFIG_DIR" "$CONFIG_PARENT_DIR" "$WORK_DIR"; do
|
||||
check_path="$(get_realpath "$path")"
|
||||
case "$check_path" in
|
||||
/|/bin|/sbin|/usr|/usr/bin|/usr/sbin|/usr/local|/usr/local/bin|/usr/local/sbin|/usr/local/etc|/usr/local/share|/etc|/var|/var/lib|/var/log|/var/run|/home|/root|/tmp|/lib|/lib64|/opt|/run|/boot|/dev|/sys|/proc)
|
||||
die "Safety check failed: '$path' (resolved to '$check_path') is a critical system directory." ;;
|
||||
esac
|
||||
done
|
||||
|
||||
check_install_dir="$(get_realpath "$INSTALL_DIR")"
|
||||
case "$check_install_dir" in
|
||||
/|/etc|/var|/home|/root|/tmp|/usr|/usr/local|/opt|/boot|/dev|/sys|/proc|/run)
|
||||
die "Safety check failed: INSTALL_DIR '$INSTALL_DIR' is a critical system directory." ;;
|
||||
esac
|
||||
|
||||
for cmd in id uname grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip rmdir; do
|
||||
command -v "$cmd" >/dev/null 2>&1 || die "Required command not found: $cmd"
|
||||
done
|
||||
}
|
||||
|
||||
verify_install_deps() {
|
||||
command -v curl >/dev/null 2>&1 || command -v wget >/dev/null 2>&1 || die "Neither curl nor wget is installed."
|
||||
command -v cp >/dev/null 2>&1 || command -v install >/dev/null 2>&1 || die "Need cp or install"
|
||||
|
||||
if ! command -v setcap >/dev/null 2>&1; then
|
||||
if command -v apk >/dev/null 2>&1; then
|
||||
$SUDO apk add --no-cache libcap-utils >/dev/null 2>&1 || $SUDO apk add --no-cache libcap >/dev/null 2>&1 || true
|
||||
elif command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO apt-get update -q >/dev/null 2>&1 || true
|
||||
$SUDO apt-get install -y -q libcap2-bin >/dev/null 2>&1 || true
|
||||
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap >/dev/null 2>&1 || true
|
||||
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap >/dev/null 2>&1 || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
detect_arch() {
|
||||
arch="$(uname -m)"
|
||||
case "$arch" in
|
||||
x86_64|amd64) printf 'x86_64\n' ;;
|
||||
aarch64|arm64) printf 'aarch64\n' ;;
|
||||
*) die "unsupported architecture: $arch" ;;
|
||||
sys_arch="$(uname -m)"
|
||||
case "$sys_arch" in
|
||||
x86_64|amd64) echo "x86_64" ;;
|
||||
aarch64|arm64) echo "aarch64" ;;
|
||||
*) die "Unsupported architecture: $sys_arch" ;;
|
||||
esac
|
||||
}
|
||||
|
||||
detect_libc() {
|
||||
case "$(ldd --version 2>&1 || true)" in
|
||||
*musl*) printf 'musl\n' ;;
|
||||
*) printf 'gnu\n' ;;
|
||||
esac
|
||||
for f in /lib/ld-musl-*.so.* /lib64/ld-musl-*.so.*; do
|
||||
if [ -e "$f" ]; then echo "musl"; return 0; fi
|
||||
done
|
||||
if grep -qE '^ID="?alpine"?' /etc/os-release 2>/dev/null; then echo "musl"; return 0; fi
|
||||
if command -v ldd >/dev/null 2>&1 && (ldd --version 2>&1 || true) | grep -qi musl; then echo "musl"; return 0; fi
|
||||
echo "gnu"
|
||||
}
|
||||
|
||||
fetch_to_stdout() {
|
||||
url="$1"
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$url"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -qO- "$url"
|
||||
else
|
||||
die "neither curl nor wget is installed"
|
||||
fetch_file() {
|
||||
if command -v curl >/dev/null 2>&1; then curl -fsSL "$1" -o "$2"
|
||||
else wget -q -O "$2" "$1"; fi
|
||||
}
|
||||
|
||||
ensure_user_group() {
|
||||
nologin_bin="$(command -v nologin 2>/dev/null || command -v false 2>/dev/null || echo /bin/false)"
|
||||
|
||||
if ! check_os_entity group telemt; then
|
||||
if command -v groupadd >/dev/null 2>&1; then $SUDO groupadd -r telemt
|
||||
elif command -v addgroup >/dev/null 2>&1; then $SUDO addgroup -S telemt
|
||||
else die "Cannot create group"; fi
|
||||
fi
|
||||
|
||||
if ! check_os_entity passwd telemt; then
|
||||
if command -v useradd >/dev/null 2>&1; then
|
||||
$SUDO useradd -r -g telemt -d "$WORK_DIR" -s "$nologin_bin" -c "Telemt Proxy" telemt
|
||||
elif command -v adduser >/dev/null 2>&1; then
|
||||
if adduser --help 2>&1 | grep -q -- '-S'; then
|
||||
$SUDO adduser -S -D -H -h "$WORK_DIR" -s "$nologin_bin" -G telemt telemt
|
||||
else
|
||||
$SUDO adduser --system --home "$WORK_DIR" --shell "$nologin_bin" --no-create-home --ingroup telemt --disabled-password telemt
|
||||
fi
|
||||
else die "Cannot create user"; fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dirs() {
|
||||
$SUDO mkdir -p "$WORK_DIR" "$CONFIG_DIR" "$CONFIG_PARENT_DIR" || die "Failed to create directories"
|
||||
|
||||
$SUDO chown telemt:telemt "$WORK_DIR" && $SUDO chmod 750 "$WORK_DIR"
|
||||
$SUDO chown root:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
|
||||
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO chown root:telemt "$CONFIG_PARENT_DIR" && $SUDO chmod 750 "$CONFIG_PARENT_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
stop_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ] && systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
|
||||
$SUDO systemctl stop "$SERVICE_NAME" 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ] && rc-service "$SERVICE_NAME" status >/dev/null 2>&1; then
|
||||
$SUDO rc-service "$SERVICE_NAME" stop 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
install_binary() {
|
||||
src="$1"
|
||||
dst="$2"
|
||||
bin_src="$1"; bin_dst="$2"
|
||||
if [ -e "$INSTALL_DIR" ] && [ ! -d "$INSTALL_DIR" ]; then
|
||||
die "'$INSTALL_DIR' is not a directory."
|
||||
fi
|
||||
|
||||
if [ -w "$INSTALL_DIR" ] || { [ ! -e "$INSTALL_DIR" ] && [ -w "$(dirname "$INSTALL_DIR")" ]; }; then
|
||||
mkdir -p "$INSTALL_DIR"
|
||||
install -m 0755 "$src" "$dst"
|
||||
elif command -v sudo >/dev/null 2>&1; then
|
||||
sudo mkdir -p "$INSTALL_DIR"
|
||||
sudo install -m 0755 "$src" "$dst"
|
||||
$SUDO mkdir -p "$INSTALL_DIR" || die "Failed to create install directory"
|
||||
if command -v install >/dev/null 2>&1; then
|
||||
$SUDO install -m 0755 "$bin_src" "$bin_dst" || die "Failed to install binary"
|
||||
else
|
||||
die "cannot write to $INSTALL_DIR and sudo is not available"
|
||||
$SUDO rm -f "$bin_dst" 2>/dev/null || true
|
||||
$SUDO cp "$bin_src" "$bin_dst" && $SUDO chmod 0755 "$bin_dst" || die "Failed to copy binary"
|
||||
fi
|
||||
|
||||
$SUDO sh -c '[ -x "$1" ]' _ "$bin_dst" || die "Binary not executable: $bin_dst"
|
||||
|
||||
if command -v setcap >/dev/null 2>&1; then
|
||||
$SUDO setcap cap_net_bind_service=+ep "$bin_dst" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
need_cmd uname
|
||||
need_cmd tar
|
||||
need_cmd mktemp
|
||||
need_cmd grep
|
||||
need_cmd install
|
||||
generate_secret() {
|
||||
secret="$(command -v openssl >/dev/null 2>&1 && openssl rand -hex 16 2>/dev/null || true)"
|
||||
if [ -z "$secret" ] || [ "${#secret}" -ne 32 ]; then
|
||||
if command -v od >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | od -An -tx1 | tr -d ' \n')"
|
||||
elif command -v hexdump >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | hexdump -e '1/1 "%02x"')"
|
||||
elif command -v xxd >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | xxd -p | tr -d '\n')"
|
||||
fi
|
||||
fi
|
||||
if [ "${#secret}" -eq 32 ]; then echo "$secret"; else return 1; fi
|
||||
}
|
||||
|
||||
ARCH="$(detect_arch)"
|
||||
OS="$(detect_os)"
|
||||
generate_config_content() {
|
||||
escaped_tls_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
|
||||
|
||||
if [ "$OS" != "linux" ]; then
|
||||
case "$OS" in
|
||||
openbsd)
|
||||
die "install.sh installs only Linux release artifacts. On OpenBSD, build from source (see docs/OPENBSD.en.md)."
|
||||
;;
|
||||
*)
|
||||
die "unsupported operating system for install.sh: $OS"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
cat <<EOF
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
LIBC="$(detect_libc)"
|
||||
[general.modes]
|
||||
classic = false
|
||||
secure = false
|
||||
tls = true
|
||||
|
||||
case "$VERSION" in
|
||||
latest)
|
||||
URL="https://github.com/$REPO/releases/latest/download/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
;;
|
||||
*)
|
||||
URL="https://github.com/$REPO/releases/download/${VERSION}/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
[server]
|
||||
port = 443
|
||||
|
||||
[server.api]
|
||||
enabled = true
|
||||
listen = "127.0.0.1:9091"
|
||||
whitelist = ["127.0.0.1/32"]
|
||||
|
||||
[censorship]
|
||||
tls_domain = "${escaped_tls_domain}"
|
||||
|
||||
[access.users]
|
||||
hello = "$1"
|
||||
EOF
|
||||
}
|
||||
|
||||
install_config() {
|
||||
if [ -n "$SUDO" ]; then
|
||||
if $SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
return 0
|
||||
fi
|
||||
elif [ -f "$CONFIG_FILE" ]; then
|
||||
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
|
||||
return 0
|
||||
fi
|
||||
|
||||
toml_secret="$(generate_secret)" || die "Failed to generate secret."
|
||||
|
||||
generate_config_content "$toml_secret" | write_root "$CONFIG_FILE" || die "Failed to install config"
|
||||
$SUDO chown root:telemt "$CONFIG_FILE" && $SUDO chmod 640 "$CONFIG_FILE"
|
||||
|
||||
say " -> Config created successfully."
|
||||
say " -> Generated secret for default user 'hello': $toml_secret"
|
||||
}
|
||||
|
||||
generate_systemd_content() {
|
||||
cat <<EOF
|
||||
[Unit]
|
||||
Description=Telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
User=telemt
|
||||
Group=telemt
|
||||
WorkingDirectory=$WORK_DIR
|
||||
ExecStart="${INSTALL_DIR}/${BIN_NAME}" "${CONFIG_FILE}"
|
||||
Restart=on-failure
|
||||
LimitNOFILE=65536
|
||||
AmbientCapabilities=CAP_NET_BIND_SERVICE
|
||||
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
EOF
|
||||
}
|
||||
|
||||
generate_openrc_content() {
|
||||
cat <<EOF
|
||||
#!/sbin/openrc-run
|
||||
name="$SERVICE_NAME"
|
||||
description="Telemt Proxy Service"
|
||||
command="${INSTALL_DIR}/${BIN_NAME}"
|
||||
command_args="${CONFIG_FILE}"
|
||||
command_background=true
|
||||
command_user="telemt:telemt"
|
||||
pidfile="/run/\${RC_SVCNAME}.pid"
|
||||
directory="${WORK_DIR}"
|
||||
rc_ulimit="-n 65536"
|
||||
depend() { need net; use logger; }
|
||||
EOF
|
||||
}
|
||||
|
||||
install_service() {
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
generate_systemd_content | write_root "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO chown root:root "/etc/systemd/system/${SERVICE_NAME}.service" && $SUDO chmod 644 "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
|
||||
$SUDO systemctl daemon-reload || true
|
||||
$SUDO systemctl enable "$SERVICE_NAME" || true
|
||||
|
||||
if ! $SUDO systemctl start "$SERVICE_NAME"; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
generate_openrc_content | write_root "/etc/init.d/${SERVICE_NAME}"
|
||||
$SUDO chown root:root "/etc/init.d/${SERVICE_NAME}" && $SUDO chmod 0755 "/etc/init.d/${SERVICE_NAME}"
|
||||
|
||||
$SUDO rc-update add "$SERVICE_NAME" default 2>/dev/null || true
|
||||
|
||||
if ! $SUDO rc-service "$SERVICE_NAME" start 2>/dev/null; then
|
||||
say "[WARNING] Failed to start service"
|
||||
SERVICE_START_FAILED=1
|
||||
fi
|
||||
else
|
||||
cmd="\"${INSTALL_DIR}/${BIN_NAME}\" \"${CONFIG_FILE}\""
|
||||
if [ -n "$SUDO" ]; then
|
||||
say " -> Service manager not found. Start manually: sudo -u telemt $cmd"
|
||||
else
|
||||
say " -> Service manager not found. Start manually: su -s /bin/sh telemt -c '$cmd'"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
kill_user_procs() {
|
||||
if command -v pkill >/dev/null 2>&1; then
|
||||
$SUDO pkill -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
sleep 1
|
||||
$SUDO pkill -9 -u telemt "$BIN_NAME" 2>/dev/null || true
|
||||
else
|
||||
if command -v pgrep >/dev/null 2>&1; then
|
||||
pids="$(pgrep -u telemt 2>/dev/null || true)"
|
||||
else
|
||||
pids="$(ps -u telemt -o pid= 2>/dev/null || true)"
|
||||
fi
|
||||
|
||||
if [ -n "$pids" ]; then
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
sleep 1
|
||||
for pid in $pids; do
|
||||
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill -9 "$pid" 2>/dev/null || true ;; esac
|
||||
done
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
uninstall() {
|
||||
say "Starting uninstallation of $BIN_NAME..."
|
||||
|
||||
say ">>> Stage 1: Stopping services"
|
||||
stop_service
|
||||
|
||||
say ">>> Stage 2: Removing service configuration"
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
$SUDO systemctl disable "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/systemd/system/${SERVICE_NAME}.service"
|
||||
$SUDO systemctl daemon-reload 2>/dev/null || true
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
$SUDO rc-update del "$SERVICE_NAME" 2>/dev/null || true
|
||||
$SUDO rm -f "/etc/init.d/${SERVICE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 3: Terminating user processes"
|
||||
kill_user_procs
|
||||
|
||||
say ">>> Stage 4: Removing binary"
|
||||
$SUDO rm -f "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
if [ "$ACTION" = "purge" ]; then
|
||||
say ">>> Stage 5: Purging configuration, data, and user"
|
||||
$SUDO rm -rf "$CONFIG_DIR" "$WORK_DIR"
|
||||
$SUDO rm -f "$CONFIG_FILE"
|
||||
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
|
||||
$SUDO rmdir "$CONFIG_PARENT_DIR" 2>/dev/null || true
|
||||
fi
|
||||
$SUDO userdel telemt 2>/dev/null || $SUDO deluser telemt 2>/dev/null || true
|
||||
$SUDO groupdel telemt 2>/dev/null || $SUDO delgroup telemt 2>/dev/null || true
|
||||
else
|
||||
say "Note: Configuration and user kept. Run with 'purge' to remove completely."
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
printf ' UNINSTALLATION COMPLETE\n'
|
||||
printf '====================================================================\n\n'
|
||||
exit 0
|
||||
}
|
||||
|
||||
case "$ACTION" in
|
||||
help) show_help ;;
|
||||
uninstall|purge) verify_common; uninstall ;;
|
||||
install)
|
||||
say "Starting installation of $BIN_NAME (Version: $TARGET_VERSION)"
|
||||
|
||||
say ">>> Stage 1: Verifying environment and dependencies"
|
||||
verify_common; verify_install_deps
|
||||
|
||||
if [ "$TARGET_VERSION" != "latest" ]; then
|
||||
TARGET_VERSION="${TARGET_VERSION#v}"
|
||||
fi
|
||||
|
||||
ARCH="$(detect_arch)"; LIBC="$(detect_libc)"
|
||||
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
|
||||
|
||||
if [ "$TARGET_VERSION" = "latest" ]; then
|
||||
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
|
||||
else
|
||||
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
|
||||
fi
|
||||
|
||||
say ">>> Stage 2: Downloading archive"
|
||||
TEMP_DIR="$(mktemp -d)" || die "Temp directory creation failed"
|
||||
if [ -z "$TEMP_DIR" ] || [ ! -d "$TEMP_DIR" ]; then
|
||||
die "Temp directory is invalid or was not created"
|
||||
fi
|
||||
|
||||
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
|
||||
|
||||
say ">>> Stage 3: Extracting archive"
|
||||
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
|
||||
die "Extraction failed (downloaded archive might be invalid or 404)."
|
||||
fi
|
||||
|
||||
EXTRACTED_BIN="$(find "$TEMP_DIR" -type f -name "$BIN_NAME" -print 2>/dev/null | head -n 1 || true)"
|
||||
[ -n "$EXTRACTED_BIN" ] || die "Binary '$BIN_NAME' not found in archive"
|
||||
|
||||
say ">>> Stage 4: Setting up environment (User, Group, Directories)"
|
||||
ensure_user_group; setup_dirs; stop_service
|
||||
|
||||
say ">>> Stage 5: Installing binary"
|
||||
install_binary "$EXTRACTED_BIN" "${INSTALL_DIR}/${BIN_NAME}"
|
||||
|
||||
say ">>> Stage 6: Generating configuration"
|
||||
install_config
|
||||
|
||||
say ">>> Stage 7: Installing and starting service"
|
||||
install_service
|
||||
|
||||
if [ "${SERVICE_START_FAILED:-0}" -eq 1 ]; then
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION COMPLETED WITH WARNINGS\n'
|
||||
printf '====================================================================\n\n'
|
||||
printf 'The service was installed but failed to start automatically.\n'
|
||||
printf 'Please check the logs to determine the issue.\n\n'
|
||||
else
|
||||
printf '\n====================================================================\n'
|
||||
printf ' INSTALLATION SUCCESS\n'
|
||||
printf '====================================================================\n\n'
|
||||
fi
|
||||
|
||||
svc="$(get_svc_mgr)"
|
||||
if [ "$svc" = "systemd" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' systemctl status %s\n\n' "$SERVICE_NAME"
|
||||
elif [ "$svc" = "openrc" ]; then
|
||||
printf 'To check the status of your proxy service, run:\n'
|
||||
printf ' rc-service %s status\n\n' "$SERVICE_NAME"
|
||||
fi
|
||||
|
||||
printf 'To get your user connection links (for Telegram), run:\n'
|
||||
if command -v jq >/dev/null 2>&1; then
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users | jq -r '\''.data[] | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n'
|
||||
else
|
||||
printf ' curl -s http://127.0.0.1:9091/v1/users\n'
|
||||
printf ' (Tip: Install '\''jq'\'' for a much cleaner output)\n'
|
||||
fi
|
||||
|
||||
printf '\n====================================================================\n'
|
||||
;;
|
||||
esac
|
||||
|
||||
TMPDIR="$(mktemp -d)"
|
||||
trap 'rm -rf "$TMPDIR"' EXIT INT TERM
|
||||
|
||||
say "Installing $BIN_NAME ($VERSION) for $ARCH-linux-$LIBC..."
|
||||
fetch_to_stdout "$URL" | tar -xzf - -C "$TMPDIR"
|
||||
|
||||
[ -f "$TMPDIR/$BIN_NAME" ] || die "archive did not contain $BIN_NAME"
|
||||
|
||||
install_binary "$TMPDIR/$BIN_NAME" "$INSTALL_DIR/$BIN_NAME"
|
||||
|
||||
say "Installed: $INSTALL_DIR/$BIN_NAME"
|
||||
"$INSTALL_DIR/$BIN_NAME" --version 2>/dev/null || true
|
||||
|
||||
@@ -24,10 +24,7 @@ pub(super) fn success_response<T: Serialize>(
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub(super) fn error_response(
|
||||
request_id: u64,
|
||||
failure: ApiFailure,
|
||||
) -> hyper::Response<Full<Bytes>> {
|
||||
pub(super) fn error_response(request_id: u64, failure: ApiFailure) -> hyper::Response<Full<Bytes>> {
|
||||
let payload = ErrorResponse {
|
||||
ok: false,
|
||||
error: ErrorBody {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::convert::Infallible;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::PathBuf;
|
||||
@@ -19,8 +21,8 @@ use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::route_mode::RouteRuntimeController;
|
||||
use crate::startup::StartupTracker;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
mod config_store;
|
||||
mod events;
|
||||
@@ -36,8 +38,8 @@ mod runtime_zero;
|
||||
mod users;
|
||||
|
||||
use config_store::{current_revision, parse_if_match};
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use events::ApiEventStore;
|
||||
use http_utils::{error_response, read_json, read_optional_json, success_response};
|
||||
use model::{
|
||||
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
|
||||
};
|
||||
@@ -55,11 +57,11 @@ use runtime_stats::{
|
||||
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
|
||||
build_upstreams_data, build_zero_all_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use runtime_zero::{
|
||||
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
|
||||
build_system_info_data,
|
||||
};
|
||||
use runtime_watch::spawn_runtime_watchers;
|
||||
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
|
||||
|
||||
pub(super) struct ApiRuntimeState {
|
||||
@@ -208,15 +210,15 @@ async fn handle(
|
||||
));
|
||||
}
|
||||
|
||||
if !api_cfg.whitelist.is_empty()
|
||||
&& !api_cfg
|
||||
.whitelist
|
||||
.iter()
|
||||
.any(|net| net.contains(peer.ip()))
|
||||
if !api_cfg.whitelist.is_empty() && !api_cfg.whitelist.iter().any(|net| net.contains(peer.ip()))
|
||||
{
|
||||
return Ok(error_response(
|
||||
request_id,
|
||||
ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
|
||||
ApiFailure::new(
|
||||
StatusCode::FORBIDDEN,
|
||||
"forbidden",
|
||||
"Source IP is not allowed",
|
||||
),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -347,7 +349,8 @@ async fn handle(
|
||||
}
|
||||
("GET", "/v1/runtime/connections/summary") => {
|
||||
let revision = current_revision(&shared.config_path).await?;
|
||||
let data = build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
let data =
|
||||
build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
|
||||
Ok(success_response(StatusCode::OK, data, revision))
|
||||
}
|
||||
("GET", "/v1/runtime/events/recent") => {
|
||||
@@ -389,13 +392,16 @@ async fn handle(
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
Err(error) => {
|
||||
shared.runtime_events.record("api.user.create.failed", error.code);
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.failed", error.code);
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.create.ok", format!("username={}", data.user.username));
|
||||
shared.runtime_events.record(
|
||||
"api.user.create.ok",
|
||||
format!("username={}", data.user.username),
|
||||
);
|
||||
Ok(success_response(StatusCode::CREATED, data, revision))
|
||||
}
|
||||
_ => {
|
||||
@@ -414,7 +420,8 @@ async fn handle(
|
||||
detected_ip_v6,
|
||||
)
|
||||
.await;
|
||||
if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
|
||||
if let Some(user_info) =
|
||||
users.into_iter().find(|entry| entry.username == user)
|
||||
{
|
||||
return Ok(success_response(StatusCode::OK, user_info, revision));
|
||||
}
|
||||
@@ -435,7 +442,8 @@ async fn handle(
|
||||
));
|
||||
}
|
||||
let expected_revision = parse_if_match(req.headers());
|
||||
let body = read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let body =
|
||||
read_json::<PatchUserRequest>(req.into_body(), body_limit).await?;
|
||||
let result = patch_user(user, body, expected_revision, &shared).await;
|
||||
let (data, revision) = match result {
|
||||
Ok(ok) => ok,
|
||||
@@ -475,10 +483,9 @@ async fn handle(
|
||||
return Err(error);
|
||||
}
|
||||
};
|
||||
shared.runtime_events.record(
|
||||
"api.user.delete.ok",
|
||||
format!("username={}", deleted_user),
|
||||
);
|
||||
shared
|
||||
.runtime_events
|
||||
.record("api.user.delete.ok", format!("username={}", deleted_user));
|
||||
return Ok(success_response(StatusCode::OK, deleted_user, revision));
|
||||
}
|
||||
if method == Method::POST
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use std::net::IpAddr;
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hyper::StatusCode;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
|
||||
const MAX_USERNAME_LEN: usize = 64;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -134,6 +136,7 @@ pub(super) struct UpstreamSummaryData {
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
@@ -236,6 +239,8 @@ pub(super) struct MeWritersSummary {
|
||||
pub(super) required_writers: usize,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
@@ -250,6 +255,12 @@ pub(super) struct MeWriterStatus {
|
||||
pub(super) bound_clients: usize,
|
||||
pub(super) idle_for_secs: Option<u64>,
|
||||
pub(super) rtt_ema_ms: Option<f64>,
|
||||
pub(super) matches_active_generation: bool,
|
||||
pub(super) in_desired_map: bool,
|
||||
pub(super) allow_drain_fallback: bool,
|
||||
pub(super) drain_started_at_epoch_secs: Option<u64>,
|
||||
pub(super) drain_deadline_epoch_secs: Option<u64>,
|
||||
pub(super) drain_over_ttl: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
@@ -276,6 +287,8 @@ pub(super) struct DcStatus {
|
||||
pub(super) floor_capped: bool,
|
||||
pub(super) alive_writers: usize,
|
||||
pub(super) coverage_pct: f64,
|
||||
pub(super) fresh_alive_writers: usize,
|
||||
pub(super) fresh_coverage_pct: f64,
|
||||
pub(super) rtt_ms: Option<f64>,
|
||||
pub(super) load: usize,
|
||||
}
|
||||
@@ -471,7 +484,9 @@ pub(super) fn is_valid_username(user: &str) -> bool {
|
||||
}
|
||||
|
||||
pub(super) fn random_user_secret() -> String {
|
||||
static API_SECRET_RNG: OnceLock<SecureRandom> = OnceLock::new();
|
||||
let rng = API_SECRET_RNG.get_or_init(SecureRandom::new);
|
||||
let mut bytes = [0u8; 16];
|
||||
rand::rng().fill(&mut bytes);
|
||||
rng.fill(&mut bytes);
|
||||
hex::encode(bytes)
|
||||
}
|
||||
|
||||
@@ -167,11 +167,7 @@ async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option<f64> {
|
||||
let pool = shared.me_pool.read().await.clone()?;
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let configured_dc_groups = status.configured_dc_groups;
|
||||
let covered_dc_groups = status
|
||||
.dcs
|
||||
.iter()
|
||||
.filter(|dc| dc.alive_writers > 0)
|
||||
.count();
|
||||
let covered_dc_groups = status.dcs.iter().filter(|dc| dc.alive_writers > 0).count();
|
||||
|
||||
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
|
||||
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
|
||||
|
||||
@@ -107,6 +107,25 @@ pub(super) struct RuntimeMeQualityRouteDropData {
|
||||
pub(super) queue_full_high_total: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityFamilyStateData {
|
||||
pub(super) family: &'static str,
|
||||
pub(super) state: &'static str,
|
||||
pub(super) state_since_epoch_secs: u64,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub(super) suppressed_until_epoch_secs: Option<u64>,
|
||||
pub(super) fail_streak: u32,
|
||||
pub(super) recover_success_streak: u32,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDrainGateData {
|
||||
pub(super) route_quorum_ok: bool,
|
||||
pub(super) redundancy_ok: bool,
|
||||
pub(super) block_reason: &'static str,
|
||||
pub(super) updated_at_epoch_secs: u64,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) dc: i16,
|
||||
@@ -120,6 +139,8 @@ pub(super) struct RuntimeMeQualityDcRttData {
|
||||
pub(super) struct RuntimeMeQualityPayload {
|
||||
pub(super) counters: RuntimeMeQualityCountersData,
|
||||
pub(super) route_drops: RuntimeMeQualityRouteDropData,
|
||||
pub(super) family_states: Vec<RuntimeMeQualityFamilyStateData>,
|
||||
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
|
||||
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
|
||||
}
|
||||
|
||||
@@ -158,6 +179,7 @@ pub(super) struct RuntimeUpstreamQualitySummaryData {
|
||||
pub(super) direct_total: usize,
|
||||
pub(super) socks4_total: usize,
|
||||
pub(super) socks5_total: usize,
|
||||
pub(super) shadowsocks_total: usize,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
@@ -360,6 +382,19 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
|
||||
};
|
||||
|
||||
let status = pool.api_status_snapshot().await;
|
||||
let family_states = pool
|
||||
.api_family_state_snapshot()
|
||||
.into_iter()
|
||||
.map(|entry| RuntimeMeQualityFamilyStateData {
|
||||
family: entry.family,
|
||||
state: entry.state,
|
||||
state_since_epoch_secs: entry.state_since_epoch_secs,
|
||||
suppressed_until_epoch_secs: entry.suppressed_until_epoch_secs,
|
||||
fail_streak: entry.fail_streak,
|
||||
recover_success_streak: entry.recover_success_streak,
|
||||
})
|
||||
.collect();
|
||||
let drain_gate_snapshot = pool.api_drain_gate_snapshot();
|
||||
RuntimeMeQualityData {
|
||||
enabled: true,
|
||||
reason: None,
|
||||
@@ -380,6 +415,13 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
|
||||
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
|
||||
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
|
||||
},
|
||||
family_states,
|
||||
drain_gate: RuntimeMeQualityDrainGateData {
|
||||
route_quorum_ok: drain_gate_snapshot.route_quorum_ok,
|
||||
redundancy_ok: drain_gate_snapshot.redundancy_ok,
|
||||
block_reason: drain_gate_snapshot.block_reason,
|
||||
updated_at_epoch_secs: drain_gate_snapshot.updated_at_epoch_secs,
|
||||
},
|
||||
dc_rtt: status
|
||||
.dcs
|
||||
.into_iter()
|
||||
@@ -404,7 +446,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
|
||||
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
|
||||
connect_success_total: shared.stats.get_upstream_connect_success_total(),
|
||||
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
|
||||
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(),
|
||||
connect_failfast_hard_error_total: shared
|
||||
.stats
|
||||
.get_upstream_connect_failfast_hard_error_total(),
|
||||
};
|
||||
|
||||
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
|
||||
@@ -444,6 +488,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
}),
|
||||
upstreams: Some(
|
||||
snapshot
|
||||
@@ -455,6 +500,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
|
||||
crate::transport::UpstreamRouteKind::Direct => "direct",
|
||||
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
|
||||
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
|
||||
crate::transport::UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
},
|
||||
address: upstream.address,
|
||||
weight: upstream.weight,
|
||||
@@ -474,7 +520,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
|
||||
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
|
||||
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
|
||||
crate::transport::upstream::IpPreference::BothWork => "both_work",
|
||||
crate::transport::upstream::IpPreference::Unavailable => "unavailable",
|
||||
crate::transport::upstream::IpPreference::Unavailable => {
|
||||
"unavailable"
|
||||
}
|
||||
},
|
||||
})
|
||||
.collect(),
|
||||
@@ -512,14 +560,18 @@ pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNa
|
||||
live_total: snapshot.live_servers.len(),
|
||||
},
|
||||
reflection: RuntimeNatStunReflectionBlockData {
|
||||
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v4: snapshot
|
||||
.reflection_v4
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
v6: snapshot
|
||||
.reflection_v6
|
||||
.map(|entry| RuntimeNatStunReflectionData {
|
||||
addr: entry.addr.to_string(),
|
||||
age_secs: entry.age_secs,
|
||||
}),
|
||||
},
|
||||
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
|
||||
}),
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::net::IpAddr;
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
@@ -7,8 +7,8 @@ use serde::Serialize;
|
||||
|
||||
use crate::config::{ProxyConfig, UpstreamType};
|
||||
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
|
||||
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
|
||||
|
||||
use super::ApiShared;
|
||||
|
||||
@@ -262,8 +262,8 @@ fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
|
||||
let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
|
||||
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
|
||||
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
|
||||
guard.ewma_errors_per_min = guard.ewma_errors_per_min
|
||||
+ alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
|
||||
guard.ewma_errors_per_min =
|
||||
guard.ewma_errors_per_min + alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
|
||||
guard.last_epoch_secs = now_epoch_secs;
|
||||
guard.last_total_errors = total_errors;
|
||||
guard.ewma_errors_per_min
|
||||
@@ -284,6 +284,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,8 +2,8 @@ use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use crate::config::ApiConfig;
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
use crate::transport::UpstreamRouteKind;
|
||||
use crate::transport::upstream::IpPreference;
|
||||
|
||||
use super::ApiShared;
|
||||
use super::model::{
|
||||
@@ -136,7 +136,8 @@ fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
|
||||
.get_upstream_connect_duration_success_bucket_501_1000ms(),
|
||||
connect_duration_success_bucket_gt_1000ms: stats
|
||||
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_le_100ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_le_100ms(),
|
||||
connect_duration_fail_bucket_101_500ms: stats
|
||||
.get_upstream_connect_duration_fail_bucket_101_500ms(),
|
||||
connect_duration_fail_bucket_501_1000ms: stats
|
||||
@@ -178,6 +179,7 @@ pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> U
|
||||
direct_total: snapshot.summary.direct_total,
|
||||
socks4_total: snapshot.summary.socks4_total,
|
||||
socks5_total: snapshot.summary.socks5_total,
|
||||
shadowsocks_total: snapshot.summary.shadowsocks_total,
|
||||
};
|
||||
let upstreams = snapshot
|
||||
.upstreams
|
||||
@@ -314,6 +316,8 @@ async fn get_minimal_payload_cached(
|
||||
required_writers: status.required_writers,
|
||||
alive_writers: status.alive_writers,
|
||||
coverage_pct: status.coverage_pct,
|
||||
fresh_alive_writers: status.fresh_alive_writers,
|
||||
fresh_coverage_pct: status.fresh_coverage_pct,
|
||||
},
|
||||
writers: status
|
||||
.writers
|
||||
@@ -329,6 +333,12 @@ async fn get_minimal_payload_cached(
|
||||
bound_clients: entry.bound_clients,
|
||||
idle_for_secs: entry.idle_for_secs,
|
||||
rtt_ema_ms: entry.rtt_ema_ms,
|
||||
matches_active_generation: entry.matches_active_generation,
|
||||
in_desired_map: entry.in_desired_map,
|
||||
allow_drain_fallback: entry.allow_drain_fallback,
|
||||
drain_started_at_epoch_secs: entry.drain_started_at_epoch_secs,
|
||||
drain_deadline_epoch_secs: entry.drain_deadline_epoch_secs,
|
||||
drain_over_ttl: entry.drain_over_ttl,
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
@@ -363,6 +373,8 @@ async fn get_minimal_payload_cached(
|
||||
floor_capped: entry.floor_capped,
|
||||
alive_writers: entry.alive_writers,
|
||||
coverage_pct: entry.coverage_pct,
|
||||
fresh_alive_writers: entry.fresh_alive_writers,
|
||||
fresh_coverage_pct: entry.fresh_coverage_pct,
|
||||
rtt_ms: entry.rtt_ms,
|
||||
load: entry.load,
|
||||
})
|
||||
@@ -381,8 +393,7 @@ async fn get_minimal_payload_cached(
|
||||
adaptive_floor_min_writers_multi_endpoint: runtime
|
||||
.adaptive_floor_min_writers_multi_endpoint,
|
||||
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
|
||||
adaptive_floor_writers_per_core_total: runtime
|
||||
.adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_writers_per_core_total: runtime.adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: runtime
|
||||
.adaptive_floor_max_extra_writers_single_per_core,
|
||||
@@ -390,12 +401,9 @@ async fn get_minimal_payload_cached(
|
||||
.adaptive_floor_max_extra_writers_multi_per_core,
|
||||
adaptive_floor_max_active_writers_per_core: runtime
|
||||
.adaptive_floor_max_active_writers_per_core,
|
||||
adaptive_floor_max_warm_writers_per_core: runtime
|
||||
.adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: runtime
|
||||
.adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: runtime
|
||||
.adaptive_floor_max_warm_writers_global,
|
||||
adaptive_floor_max_warm_writers_per_core: runtime.adaptive_floor_max_warm_writers_per_core,
|
||||
adaptive_floor_max_active_writers_global: runtime.adaptive_floor_max_active_writers_global,
|
||||
adaptive_floor_max_warm_writers_global: runtime.adaptive_floor_max_warm_writers_global,
|
||||
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
|
||||
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
|
||||
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
|
||||
@@ -486,6 +494,8 @@ fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersDa
|
||||
required_writers: 0,
|
||||
alive_writers: 0,
|
||||
coverage_pct: 0.0,
|
||||
fresh_alive_writers: 0,
|
||||
fresh_coverage_pct: 0.0,
|
||||
},
|
||||
writers: Vec::new(),
|
||||
}
|
||||
@@ -505,6 +515,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
|
||||
UpstreamRouteKind::Direct => "direct",
|
||||
UpstreamRouteKind::Socks4 => "socks4",
|
||||
UpstreamRouteKind::Socks5 => "socks5",
|
||||
UpstreamRouteKind::Shadowsocks => "shadowsocks",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -90,6 +90,7 @@ pub(super) struct EffectiveMiddleProxyLimits {
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub(super) struct EffectiveUserIpPolicyLimits {
|
||||
pub(super) global_each: usize,
|
||||
pub(super) mode: &'static str,
|
||||
pub(super) window_secs: u64,
|
||||
}
|
||||
@@ -127,7 +128,8 @@ pub(super) fn build_system_info_data(
|
||||
.runtime_state
|
||||
.last_config_reload_epoch_secs
|
||||
.load(Ordering::Relaxed);
|
||||
let last_config_reload_epoch_secs = (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
let last_config_reload_epoch_secs =
|
||||
(last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
|
||||
|
||||
let git_commit = option_env!("TELEMT_GIT_COMMIT")
|
||||
.or(option_env!("VERGEN_GIT_SHA"))
|
||||
@@ -152,7 +154,10 @@ pub(super) fn build_system_info_data(
|
||||
uptime_seconds: shared.stats.uptime_secs(),
|
||||
config_path: shared.config_path.display().to_string(),
|
||||
config_hash: revision.to_string(),
|
||||
config_reload_count: shared.runtime_state.config_reload_count.load(Ordering::Relaxed),
|
||||
config_reload_count: shared
|
||||
.runtime_state
|
||||
.config_reload_count
|
||||
.load(Ordering::Relaxed),
|
||||
last_config_reload_epoch_secs,
|
||||
}
|
||||
}
|
||||
@@ -232,9 +237,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
||||
adaptive_floor_writers_per_core_total: cfg
|
||||
.general
|
||||
.me_adaptive_floor_writers_per_core_total,
|
||||
adaptive_floor_cpu_cores_override: cfg
|
||||
.general
|
||||
.me_adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
@@ -262,6 +265,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
|
||||
me2dc_fallback: cfg.general.me2dc_fallback,
|
||||
},
|
||||
user_ip_policy: EffectiveUserIpPolicyLimits {
|
||||
global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
mode: user_max_unique_ips_mode_label(cfg.access.user_max_unique_ips_mode),
|
||||
window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
},
|
||||
|
||||
@@ -46,7 +46,9 @@ pub(super) async fn create_user(
|
||||
None => random_user_secret(),
|
||||
};
|
||||
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
@@ -65,12 +67,18 @@ pub(super) async fn create_user(
|
||||
));
|
||||
}
|
||||
|
||||
cfg.access.users.insert(body.username.clone(), secret.clone());
|
||||
cfg.access
|
||||
.users
|
||||
.insert(body.username.clone(), secret.clone());
|
||||
if let Some(ad_tag) = body.user_ad_tag {
|
||||
cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
|
||||
cfg.access
|
||||
.user_ad_tags
|
||||
.insert(body.username.clone(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(body.username.clone(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access
|
||||
@@ -78,7 +86,9 @@ pub(super) async fn create_user(
|
||||
.insert(body.username.clone(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(body.username.clone(), quota);
|
||||
cfg.access
|
||||
.user_data_quota
|
||||
.insert(body.username.clone(), quota);
|
||||
}
|
||||
|
||||
let updated_limit = body.max_unique_ips;
|
||||
@@ -108,11 +118,15 @@ pub(super) async fn create_user(
|
||||
touched_sections.push(AccessSection::UserMaxUniqueIps);
|
||||
}
|
||||
|
||||
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
if let Some(limit) = updated_limit {
|
||||
shared.ip_tracker.set_user_limit(&body.username, limit).await;
|
||||
shared
|
||||
.ip_tracker
|
||||
.set_user_limit(&body.username, limit)
|
||||
.await;
|
||||
}
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
|
||||
@@ -140,12 +154,7 @@ pub(super) async fn create_user(
|
||||
recent_unique_ips: 0,
|
||||
recent_unique_ips_list: Vec::new(),
|
||||
total_octets: 0,
|
||||
links: build_user_links(
|
||||
&cfg,
|
||||
&secret,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
),
|
||||
links: build_user_links(&cfg, &secret, detected_ip_v4, detected_ip_v6),
|
||||
});
|
||||
|
||||
Ok((CreateUserResponse { user, secret }, revision))
|
||||
@@ -157,12 +166,16 @@ pub(super) async fn patch_user(
|
||||
expected_revision: Option<String>,
|
||||
shared: &ApiShared,
|
||||
) -> Result<(UserInfo, String), ApiFailure> {
|
||||
if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
|
||||
if let Some(secret) = body.secret.as_ref()
|
||||
&& !is_valid_user_secret(secret)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"secret must be exactly 32 hex characters",
|
||||
));
|
||||
}
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
|
||||
if let Some(ad_tag) = body.user_ad_tag.as_ref()
|
||||
&& !is_valid_ad_tag(ad_tag)
|
||||
{
|
||||
return Err(ApiFailure::bad_request(
|
||||
"user_ad_tag must be exactly 32 hex characters",
|
||||
));
|
||||
@@ -187,10 +200,14 @@ pub(super) async fn patch_user(
|
||||
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
|
||||
}
|
||||
if let Some(limit) = body.max_tcp_conns {
|
||||
cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
|
||||
cfg.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), limit);
|
||||
}
|
||||
if let Some(expiration) = expiration {
|
||||
cfg.access.user_expirations.insert(user.to_string(), expiration);
|
||||
cfg.access
|
||||
.user_expirations
|
||||
.insert(user.to_string(), expiration);
|
||||
}
|
||||
if let Some(quota) = body.data_quota_bytes {
|
||||
cfg.access.user_data_quota.insert(user.to_string(), quota);
|
||||
@@ -198,7 +215,9 @@ pub(super) async fn patch_user(
|
||||
|
||||
let mut updated_limit = None;
|
||||
if let Some(limit) = body.max_unique_ips {
|
||||
cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
|
||||
cfg.access
|
||||
.user_max_unique_ips
|
||||
.insert(user.to_string(), limit);
|
||||
updated_limit = Some(limit);
|
||||
}
|
||||
|
||||
@@ -263,7 +282,8 @@ pub(super) async fn rotate_secret(
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
|
||||
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
|
||||
@@ -330,7 +350,8 @@ pub(super) async fn delete_user(
|
||||
AccessSection::UserDataQuota,
|
||||
AccessSection::UserMaxUniqueIps,
|
||||
];
|
||||
let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
let revision =
|
||||
save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
|
||||
drop(_guard);
|
||||
shared.ip_tracker.remove_user_limit(user).await;
|
||||
shared.ip_tracker.clear_user_ips(user).await;
|
||||
@@ -365,12 +386,7 @@ pub(super) async fn users_from_config(
|
||||
.users
|
||||
.get(&username)
|
||||
.map(|secret| {
|
||||
build_user_links(
|
||||
cfg,
|
||||
secret,
|
||||
startup_detected_ip_v4,
|
||||
startup_detected_ip_v6,
|
||||
)
|
||||
build_user_links(cfg, secret, startup_detected_ip_v4, startup_detected_ip_v6)
|
||||
})
|
||||
.unwrap_or(UserLinks {
|
||||
classic: Vec::new(),
|
||||
@@ -386,7 +402,14 @@ pub(super) async fn users_from_config(
|
||||
.get(&username)
|
||||
.map(chrono::DateTime::<chrono::Utc>::to_rfc3339),
|
||||
data_quota_bytes: cfg.access.user_data_quota.get(&username).copied(),
|
||||
max_unique_ips: cfg.access.user_max_unique_ips.get(&username).copied(),
|
||||
max_unique_ips: cfg
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.get(&username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((cfg.access.user_max_unique_ips_global_each > 0)
|
||||
.then_some(cfg.access.user_max_unique_ips_global_each)),
|
||||
current_connections: stats.get_user_curr_connects(&username),
|
||||
active_unique_ips: active_ip_list.len(),
|
||||
active_unique_ips_list: active_ip_list,
|
||||
@@ -472,11 +495,11 @@ fn resolve_link_hosts(
|
||||
push_unique_host(&mut hosts, host);
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip {
|
||||
if !ip.is_unspecified() {
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
continue;
|
||||
}
|
||||
if let Some(ip) = listener.announce_ip
|
||||
&& !ip.is_unspecified()
|
||||
{
|
||||
push_unique_host(&mut hosts, &ip.to_string());
|
||||
continue;
|
||||
}
|
||||
if listener.ip.is_unspecified() {
|
||||
let detected_ip = if listener.ip.is_ipv4() {
|
||||
|
||||
69
src/cli.rs
69
src/cli.rs
@@ -1,9 +1,9 @@
|
||||
//! CLI commands: --init (fire-and-forget setup)
|
||||
|
||||
use rand::RngExt;
|
||||
use std::fs;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command;
|
||||
use rand::Rng;
|
||||
|
||||
/// Options for the init command
|
||||
pub struct InitOptions {
|
||||
@@ -35,10 +35,10 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
if !args.iter().any(|a| a == "--init") {
|
||||
return None;
|
||||
}
|
||||
|
||||
|
||||
let mut opts = InitOptions::default();
|
||||
let mut i = 0;
|
||||
|
||||
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--port" => {
|
||||
@@ -78,7 +78,7 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
|
||||
Some(opts)
|
||||
}
|
||||
|
||||
@@ -86,7 +86,7 @@ pub fn parse_init_args(args: &[String]) -> Option<InitOptions> {
|
||||
pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[telemt] Fire-and-forget setup");
|
||||
eprintln!();
|
||||
|
||||
|
||||
// 1. Generate or validate secret
|
||||
let secret = match opts.secret {
|
||||
Some(s) => {
|
||||
@@ -98,28 +98,28 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
}
|
||||
None => generate_secret(),
|
||||
};
|
||||
|
||||
|
||||
eprintln!("[+] Secret: {}", secret);
|
||||
eprintln!("[+] User: {}", opts.username);
|
||||
eprintln!("[+] Port: {}", opts.port);
|
||||
eprintln!("[+] Domain: {}", opts.domain);
|
||||
|
||||
|
||||
// 2. Create config directory
|
||||
fs::create_dir_all(&opts.config_dir)?;
|
||||
let config_path = opts.config_dir.join("config.toml");
|
||||
|
||||
|
||||
// 3. Write config
|
||||
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
|
||||
fs::write(&config_path, &config_content)?;
|
||||
eprintln!("[+] Config written to {}", config_path.display());
|
||||
|
||||
|
||||
// 4. Write systemd unit
|
||||
let exe_path = std::env::current_exe()
|
||||
.unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let exe_path =
|
||||
std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
|
||||
|
||||
let unit_path = Path::new("/etc/systemd/system/telemt.service");
|
||||
let unit_content = generate_systemd_unit(&exe_path, &config_path);
|
||||
|
||||
|
||||
match fs::write(unit_path, &unit_content) {
|
||||
Ok(()) => {
|
||||
eprintln!("[+] Systemd unit written to {}", unit_path.display());
|
||||
@@ -128,31 +128,31 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
|
||||
eprintln!("[!] Manual unit file content:");
|
||||
eprintln!("{}", unit_content);
|
||||
|
||||
|
||||
// Still print links and config
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// 5. Reload systemd
|
||||
run_cmd("systemctl", &["daemon-reload"]);
|
||||
|
||||
|
||||
// 6. Enable service
|
||||
run_cmd("systemctl", &["enable", "telemt.service"]);
|
||||
eprintln!("[+] Service enabled");
|
||||
|
||||
|
||||
// 7. Start service (unless --no-start)
|
||||
if !opts.no_start {
|
||||
run_cmd("systemctl", &["start", "telemt.service"]);
|
||||
eprintln!("[+] Service started");
|
||||
|
||||
|
||||
// Brief delay then check status
|
||||
std::thread::sleep(std::time::Duration::from_secs(1));
|
||||
let status = Command::new("systemctl")
|
||||
.args(["is-active", "telemt.service"])
|
||||
.output();
|
||||
|
||||
|
||||
match status {
|
||||
Ok(out) if out.status.success() => {
|
||||
eprintln!("[+] Service is running");
|
||||
@@ -166,12 +166,12 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box<dyn std::error::Error>> {
|
||||
eprintln!("[+] Service not started (--no-start)");
|
||||
eprintln!("[+] Start manually: systemctl start telemt.service");
|
||||
}
|
||||
|
||||
|
||||
eprintln!();
|
||||
|
||||
|
||||
// 8. Print links
|
||||
print_links(&opts.username, &secret, opts.port, &opts.domain);
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -183,7 +183,7 @@ fn generate_secret() -> String {
|
||||
|
||||
fn generate_config(username: &str, secret: &str, port: u16, domain: &str) -> String {
|
||||
format!(
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
r#"# Telemt MTProxy — auto-generated config
|
||||
# Re-run `telemt --init` to regenerate
|
||||
|
||||
show_link = ["{username}"]
|
||||
@@ -198,8 +198,15 @@ desync_all_full = false
|
||||
update_every = 43200
|
||||
hardswap = false
|
||||
me_pool_drain_ttl_secs = 90
|
||||
me_instadrain = false
|
||||
me_pool_drain_threshold = 32
|
||||
me_pool_drain_soft_evict_grace_secs = 10
|
||||
me_pool_drain_soft_evict_per_writer = 2
|
||||
me_pool_drain_soft_evict_budget_per_core = 16
|
||||
me_pool_drain_soft_evict_cooldown_ms = 1000
|
||||
me_bind_stale_mode = "never"
|
||||
me_pool_min_fresh_ratio = 0.8
|
||||
me_reinit_drain_timeout_secs = 120
|
||||
me_reinit_drain_timeout_secs = 90
|
||||
|
||||
[network]
|
||||
ipv4 = true
|
||||
@@ -239,7 +246,7 @@ tls_full_cert_ttl_secs = 90
|
||||
|
||||
[access]
|
||||
replay_check_len = 65536
|
||||
replay_window_secs = 1800
|
||||
replay_window_secs = 120
|
||||
ignore_time_skew = false
|
||||
|
||||
[access.users]
|
||||
@@ -259,9 +266,9 @@ weight = 10
|
||||
|
||||
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
|
||||
format!(
|
||||
r#"[Unit]
|
||||
r#"[Unit]
|
||||
Description=Telemt MTProxy
|
||||
Documentation=https://github.com/nicepkg/telemt
|
||||
Documentation=https://github.com/telemt/telemt
|
||||
After=network-online.target
|
||||
Wants=network-online.target
|
||||
|
||||
@@ -302,11 +309,13 @@ fn run_cmd(cmd: &str, args: &[&str]) {
|
||||
|
||||
fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
|
||||
let domain_hex = hex::encode(domain);
|
||||
|
||||
|
||||
println!("=== Proxy Links ===");
|
||||
println!("[{}]", username);
|
||||
println!(" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex);
|
||||
println!(
|
||||
" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
|
||||
port, secret, domain_hex
|
||||
);
|
||||
println!();
|
||||
println!("Replace YOUR_SERVER_IP with your server's public IP.");
|
||||
println!("The proxy will auto-detect and display the correct link on startup.");
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::collections::HashMap;
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::Deserialize;
|
||||
use std::collections::HashMap;
|
||||
|
||||
// Helper defaults kept private to the config module.
|
||||
const DEFAULT_NETWORK_IPV6: Option<bool> = Some(false);
|
||||
@@ -27,8 +27,8 @@ const DEFAULT_ME_C2ME_CHANNEL_CAPACITY: usize = 1024;
|
||||
const DEFAULT_ME_READER_ROUTE_DATA_WAIT_MS: u64 = 2;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 1500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = false;
|
||||
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
|
||||
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
|
||||
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
|
||||
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
|
||||
@@ -36,7 +36,16 @@ const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
|
||||
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
|
||||
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
|
||||
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
|
||||
const DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS: u64 = 3000;
|
||||
const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 10;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 2;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
|
||||
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
|
||||
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
|
||||
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
|
||||
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
|
||||
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
|
||||
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
|
||||
@@ -56,6 +65,10 @@ pub(crate) fn default_tls_domain() -> String {
|
||||
"petrovich.ru".to_string()
|
||||
}
|
||||
|
||||
pub(crate) fn default_tls_fetch_scope() -> String {
|
||||
String::new()
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_port() -> u16 {
|
||||
443
|
||||
}
|
||||
@@ -73,23 +86,41 @@ pub(crate) fn default_replay_check_len() -> usize {
|
||||
}
|
||||
|
||||
pub(crate) fn default_replay_window_secs() -> u64 {
|
||||
1800
|
||||
// Keep replay cache TTL tight by default to reduce replay surface.
|
||||
// Deployments with higher RTT or longer reconnect jitter can override this in config.
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_handshake_timeout() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_soft_secs() -> u64 {
|
||||
120
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_client_idle_hard_secs() -> u64 {
|
||||
360
|
||||
}
|
||||
|
||||
pub(crate) fn default_relay_idle_grace_after_downstream_activity_secs() -> u64 {
|
||||
30
|
||||
}
|
||||
|
||||
pub(crate) fn default_connect_timeout() -> u64 {
|
||||
10
|
||||
}
|
||||
|
||||
pub(crate) fn default_keepalive() -> u64 {
|
||||
60
|
||||
15
|
||||
}
|
||||
|
||||
pub(crate) fn default_ack_timeout() -> u64 {
|
||||
300
|
||||
90
|
||||
}
|
||||
pub(crate) fn default_me_one_retry() -> u8 {
|
||||
12
|
||||
@@ -112,10 +143,7 @@ pub(crate) fn default_weight() -> u16 {
|
||||
}
|
||||
|
||||
pub(crate) fn default_metrics_whitelist() -> Vec<IpNetwork> {
|
||||
vec![
|
||||
"127.0.0.1/32".parse().unwrap(),
|
||||
"::1/128".parse().unwrap(),
|
||||
]
|
||||
vec!["127.0.0.1/32".parse().unwrap(), "::1/128".parse().unwrap()]
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_listen() -> String {
|
||||
@@ -138,15 +166,31 @@ pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool { false }
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 { 1000 }
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize { 10 }
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize { 256 }
|
||||
pub(crate) fn default_api_runtime_edge_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 {
|
||||
1000
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_top_n() -> usize {
|
||||
10
|
||||
}
|
||||
pub(crate) fn default_api_runtime_edge_events_capacity() -> usize {
|
||||
256
|
||||
}
|
||||
|
||||
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
|
||||
500
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_max_connections() -> u32 {
|
||||
10_000
|
||||
}
|
||||
|
||||
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
|
||||
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_prefer_4() -> u8 {
|
||||
4
|
||||
}
|
||||
@@ -371,6 +415,18 @@ pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
|
||||
DEFAULT_ME_WARN_RATE_LIMIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_hybrid_max_wait_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_route_blocking_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_c2me_send_timeout_ms() -> u64 {
|
||||
DEFAULT_ME_C2ME_SEND_TIMEOUT_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
|
||||
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
|
||||
}
|
||||
@@ -452,17 +508,53 @@ pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
|
||||
0
|
||||
8
|
||||
}
|
||||
|
||||
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
|
||||
0
|
||||
24
|
||||
}
|
||||
|
||||
pub(crate) fn default_alpn_enforce() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening() -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_hardening_aggressive_mode() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_bucket_cap_bytes() -> usize {
|
||||
4096
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
|
||||
512
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_floor_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_mask_timing_normalization_ceiling_ms() -> u64 {
|
||||
0
|
||||
}
|
||||
|
||||
pub(crate) fn default_stun_servers() -> Vec<String> {
|
||||
vec![
|
||||
"stun.l.google.com:5349".to_string(),
|
||||
@@ -577,13 +669,41 @@ pub(crate) fn default_proxy_secret_len_max() -> usize {
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
|
||||
120
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
|
||||
90
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_instadrain() -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
|
||||
32
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_grace_secs() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_per_writer() -> u8 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_budget_per_core() -> u16 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_pool_drain_soft_evict_cooldown_ms() -> u64 {
|
||||
DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS
|
||||
}
|
||||
|
||||
pub(crate) fn default_me_bind_stale_ttl_secs() -> u64 {
|
||||
default_me_pool_drain_ttl_secs()
|
||||
}
|
||||
@@ -635,6 +755,10 @@ pub(crate) fn default_user_max_unique_ips_window_secs() -> u64 {
|
||||
DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS
|
||||
}
|
||||
|
||||
pub(crate) fn default_user_max_unique_ips_global_each() -> usize {
|
||||
0
|
||||
}
|
||||
|
||||
// Custom deserializer helpers
|
||||
|
||||
#[derive(Deserialize)]
|
||||
|
||||
@@ -21,35 +21,40 @@
|
||||
//! `network.*`, `use_middle_proxy`) are **not** applied; a warning is emitted.
|
||||
//! Non-hot changes are never mixed into the runtime config snapshot.
|
||||
|
||||
use std::collections::BTreeSet;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::{Arc, RwLock as StdRwLock};
|
||||
use std::time::Duration;
|
||||
|
||||
use notify::{EventKind, RecursiveMode, Watcher, recommended_watcher};
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use super::load::{LoadedConfig, ProxyConfig};
|
||||
use crate::config::{
|
||||
LogLevel, MeBindStaleMode, MeFloorMode, MeSocksKdfPolicy, MeTelemetryLevel,
|
||||
MeWriterPickMode,
|
||||
LogLevel, MeBindStaleMode, MeFloorMode, MeSocksKdfPolicy, MeTelemetryLevel, MeWriterPickMode,
|
||||
};
|
||||
use super::load::ProxyConfig;
|
||||
|
||||
const HOT_RELOAD_DEBOUNCE: Duration = Duration::from_millis(50);
|
||||
|
||||
// ── Hot fields ────────────────────────────────────────────────────────────────
|
||||
|
||||
/// Fields that are safe to swap without restarting listeners.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
pub struct HotFields {
|
||||
pub log_level: LogLevel,
|
||||
pub ad_tag: Option<String>,
|
||||
pub dns_overrides: Vec<String>,
|
||||
pub desync_all_full: bool,
|
||||
pub update_every_secs: u64,
|
||||
pub me_reinit_every_secs: u64,
|
||||
pub me_reinit_singleflight: bool,
|
||||
pub log_level: LogLevel,
|
||||
pub ad_tag: Option<String>,
|
||||
pub dns_overrides: Vec<String>,
|
||||
pub desync_all_full: bool,
|
||||
pub update_every_secs: u64,
|
||||
pub me_reinit_every_secs: u64,
|
||||
pub me_reinit_singleflight: bool,
|
||||
pub me_reinit_coalesce_window_ms: u64,
|
||||
pub hardswap: bool,
|
||||
pub me_pool_drain_ttl_secs: u64,
|
||||
pub hardswap: bool,
|
||||
pub me_pool_drain_ttl_secs: u64,
|
||||
pub me_instadrain: bool,
|
||||
pub me_pool_drain_threshold: u64,
|
||||
pub me_pool_min_fresh_ratio: f32,
|
||||
pub me_reinit_drain_timeout_secs: u64,
|
||||
pub me_hardswap_warmup_delay_min_ms: u64,
|
||||
@@ -107,12 +112,13 @@ pub struct HotFields {
|
||||
pub me_health_interval_ms_healthy: u64,
|
||||
pub me_admission_poll_ms: u64,
|
||||
pub me_warn_rate_limit_ms: u64,
|
||||
pub users: std::collections::HashMap<String, String>,
|
||||
pub user_ad_tags: std::collections::HashMap<String, String>,
|
||||
pub user_max_tcp_conns: std::collections::HashMap<String, usize>,
|
||||
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
||||
pub user_data_quota: std::collections::HashMap<String, u64>,
|
||||
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
||||
pub users: std::collections::HashMap<String, String>,
|
||||
pub user_ad_tags: std::collections::HashMap<String, String>,
|
||||
pub user_max_tcp_conns: std::collections::HashMap<String, usize>,
|
||||
pub user_expirations: std::collections::HashMap<String, chrono::DateTime<chrono::Utc>>,
|
||||
pub user_data_quota: std::collections::HashMap<String, u64>,
|
||||
pub user_max_unique_ips: std::collections::HashMap<String, usize>,
|
||||
pub user_max_unique_ips_global_each: usize,
|
||||
pub user_max_unique_ips_mode: crate::config::UserMaxUniqueIpsMode,
|
||||
pub user_max_unique_ips_window_secs: u64,
|
||||
}
|
||||
@@ -120,16 +126,18 @@ pub struct HotFields {
|
||||
impl HotFields {
|
||||
pub fn from_config(cfg: &ProxyConfig) -> Self {
|
||||
Self {
|
||||
log_level: cfg.general.log_level.clone(),
|
||||
ad_tag: cfg.general.ad_tag.clone(),
|
||||
dns_overrides: cfg.network.dns_overrides.clone(),
|
||||
desync_all_full: cfg.general.desync_all_full,
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.me_reinit_every_secs,
|
||||
me_reinit_singleflight: cfg.general.me_reinit_singleflight,
|
||||
log_level: cfg.general.log_level.clone(),
|
||||
ad_tag: cfg.general.ad_tag.clone(),
|
||||
dns_overrides: cfg.network.dns_overrides.clone(),
|
||||
desync_all_full: cfg.general.desync_all_full,
|
||||
update_every_secs: cfg.general.effective_update_every_secs(),
|
||||
me_reinit_every_secs: cfg.general.me_reinit_every_secs,
|
||||
me_reinit_singleflight: cfg.general.me_reinit_singleflight,
|
||||
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
|
||||
hardswap: cfg.general.hardswap,
|
||||
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
|
||||
hardswap: cfg.general.hardswap,
|
||||
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
|
||||
me_instadrain: cfg.general.me_instadrain,
|
||||
me_pool_drain_threshold: cfg.general.me_pool_drain_threshold,
|
||||
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
|
||||
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
|
||||
me_hardswap_warmup_delay_min_ms: cfg.general.me_hardswap_warmup_delay_min_ms,
|
||||
@@ -180,15 +188,11 @@ impl HotFields {
|
||||
me_adaptive_floor_min_writers_multi_endpoint: cfg
|
||||
.general
|
||||
.me_adaptive_floor_min_writers_multi_endpoint,
|
||||
me_adaptive_floor_recover_grace_secs: cfg
|
||||
.general
|
||||
.me_adaptive_floor_recover_grace_secs,
|
||||
me_adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
|
||||
me_adaptive_floor_writers_per_core_total: cfg
|
||||
.general
|
||||
.me_adaptive_floor_writers_per_core_total,
|
||||
me_adaptive_floor_cpu_cores_override: cfg
|
||||
.general
|
||||
.me_adaptive_floor_cpu_cores_override,
|
||||
me_adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
|
||||
me_adaptive_floor_max_extra_writers_single_per_core: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
@@ -207,9 +211,15 @@ impl HotFields {
|
||||
me_adaptive_floor_max_warm_writers_global: cfg
|
||||
.general
|
||||
.me_adaptive_floor_max_warm_writers_global,
|
||||
me_route_backpressure_base_timeout_ms: cfg.general.me_route_backpressure_base_timeout_ms,
|
||||
me_route_backpressure_high_timeout_ms: cfg.general.me_route_backpressure_high_timeout_ms,
|
||||
me_route_backpressure_high_watermark_pct: cfg.general.me_route_backpressure_high_watermark_pct,
|
||||
me_route_backpressure_base_timeout_ms: cfg
|
||||
.general
|
||||
.me_route_backpressure_base_timeout_ms,
|
||||
me_route_backpressure_high_timeout_ms: cfg
|
||||
.general
|
||||
.me_route_backpressure_high_timeout_ms,
|
||||
me_route_backpressure_high_watermark_pct: cfg
|
||||
.general
|
||||
.me_route_backpressure_high_watermark_pct,
|
||||
me_reader_route_data_wait_ms: cfg.general.me_reader_route_data_wait_ms,
|
||||
me_d2c_flush_batch_max_frames: cfg.general.me_d2c_flush_batch_max_frames,
|
||||
me_d2c_flush_batch_max_bytes: cfg.general.me_d2c_flush_batch_max_bytes,
|
||||
@@ -221,12 +231,13 @@ impl HotFields {
|
||||
me_health_interval_ms_healthy: cfg.general.me_health_interval_ms_healthy,
|
||||
me_admission_poll_ms: cfg.general.me_admission_poll_ms,
|
||||
me_warn_rate_limit_ms: cfg.general.me_warn_rate_limit_ms,
|
||||
users: cfg.access.users.clone(),
|
||||
user_ad_tags: cfg.access.user_ad_tags.clone(),
|
||||
user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
|
||||
user_expirations: cfg.access.user_expirations.clone(),
|
||||
user_data_quota: cfg.access.user_data_quota.clone(),
|
||||
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
||||
users: cfg.access.users.clone(),
|
||||
user_ad_tags: cfg.access.user_ad_tags.clone(),
|
||||
user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
|
||||
user_expirations: cfg.access.user_expirations.clone(),
|
||||
user_data_quota: cfg.access.user_data_quota.clone(),
|
||||
user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
|
||||
user_max_unique_ips_global_each: cfg.access.user_max_unique_ips_global_each,
|
||||
user_max_unique_ips_mode: cfg.access.user_max_unique_ips_mode,
|
||||
user_max_unique_ips_window_secs: cfg.access.user_max_unique_ips_window_secs,
|
||||
}
|
||||
@@ -287,6 +298,129 @@ fn listeners_equal(
|
||||
})
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
struct WatchManifest {
|
||||
files: BTreeSet<PathBuf>,
|
||||
dirs: BTreeSet<PathBuf>,
|
||||
}
|
||||
|
||||
impl WatchManifest {
|
||||
fn from_source_files(source_files: &[PathBuf]) -> Self {
|
||||
let mut files = BTreeSet::new();
|
||||
let mut dirs = BTreeSet::new();
|
||||
|
||||
for path in source_files {
|
||||
let normalized = normalize_watch_path(path);
|
||||
files.insert(normalized.clone());
|
||||
if let Some(parent) = normalized.parent() {
|
||||
dirs.insert(parent.to_path_buf());
|
||||
}
|
||||
}
|
||||
|
||||
Self { files, dirs }
|
||||
}
|
||||
|
||||
fn matches_event_paths(&self, event_paths: &[PathBuf]) -> bool {
|
||||
event_paths
|
||||
.iter()
|
||||
.map(|path| normalize_watch_path(path))
|
||||
.any(|path| self.files.contains(&path))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct ReloadState {
|
||||
applied_snapshot_hash: Option<u64>,
|
||||
}
|
||||
|
||||
impl ReloadState {
|
||||
fn new(applied_snapshot_hash: Option<u64>) -> Self {
|
||||
Self {
|
||||
applied_snapshot_hash,
|
||||
}
|
||||
}
|
||||
|
||||
fn is_applied(&self, hash: u64) -> bool {
|
||||
self.applied_snapshot_hash == Some(hash)
|
||||
}
|
||||
|
||||
fn mark_applied(&mut self, hash: u64) {
|
||||
self.applied_snapshot_hash = Some(hash);
|
||||
}
|
||||
}
|
||||
|
||||
fn normalize_watch_path(path: &Path) -> PathBuf {
|
||||
path.canonicalize().unwrap_or_else(|_| {
|
||||
if path.is_absolute() {
|
||||
path.to_path_buf()
|
||||
} else {
|
||||
std::env::current_dir()
|
||||
.map(|cwd| cwd.join(path))
|
||||
.unwrap_or_else(|_| path.to_path_buf())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn sync_watch_paths<W: Watcher>(
|
||||
watcher: &mut W,
|
||||
current: &BTreeSet<PathBuf>,
|
||||
next: &BTreeSet<PathBuf>,
|
||||
recursive_mode: RecursiveMode,
|
||||
kind: &str,
|
||||
) {
|
||||
for path in current.difference(next) {
|
||||
if let Err(e) = watcher.unwatch(path) {
|
||||
warn!(path = %path.display(), error = %e, "config watcher: failed to unwatch {kind}");
|
||||
}
|
||||
}
|
||||
|
||||
for path in next.difference(current) {
|
||||
if let Err(e) = watcher.watch(path, recursive_mode) {
|
||||
warn!(path = %path.display(), error = %e, "config watcher: failed to watch {kind}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_watch_manifest<W1: Watcher, W2: Watcher>(
|
||||
notify_watcher: Option<&mut W1>,
|
||||
poll_watcher: Option<&mut W2>,
|
||||
manifest_state: &Arc<StdRwLock<WatchManifest>>,
|
||||
next_manifest: WatchManifest,
|
||||
) {
|
||||
let current_manifest = manifest_state
|
||||
.read()
|
||||
.map(|manifest| manifest.clone())
|
||||
.unwrap_or_default();
|
||||
|
||||
if current_manifest == next_manifest {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(watcher) = notify_watcher {
|
||||
sync_watch_paths(
|
||||
watcher,
|
||||
¤t_manifest.dirs,
|
||||
&next_manifest.dirs,
|
||||
RecursiveMode::NonRecursive,
|
||||
"config directory",
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(watcher) = poll_watcher {
|
||||
sync_watch_paths(
|
||||
watcher,
|
||||
¤t_manifest.files,
|
||||
&next_manifest.files,
|
||||
RecursiveMode::NonRecursive,
|
||||
"config file",
|
||||
);
|
||||
}
|
||||
|
||||
if let Ok(mut manifest) = manifest_state.write() {
|
||||
*manifest = next_manifest;
|
||||
}
|
||||
}
|
||||
|
||||
fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
||||
let mut cfg = old.clone();
|
||||
|
||||
@@ -302,6 +436,8 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
||||
cfg.general.me_reinit_coalesce_window_ms = new.general.me_reinit_coalesce_window_ms;
|
||||
cfg.general.hardswap = new.general.hardswap;
|
||||
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
|
||||
cfg.general.me_instadrain = new.general.me_instadrain;
|
||||
cfg.general.me_pool_drain_threshold = new.general.me_pool_drain_threshold;
|
||||
cfg.general.me_pool_min_fresh_ratio = new.general.me_pool_min_fresh_ratio;
|
||||
cfg.general.me_reinit_drain_timeout_secs = new.general.me_reinit_drain_timeout_secs;
|
||||
cfg.general.me_hardswap_warmup_delay_min_ms = new.general.me_hardswap_warmup_delay_min_ms;
|
||||
@@ -348,10 +484,14 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
||||
new.general.me_adaptive_floor_writers_per_core_total;
|
||||
cfg.general.me_adaptive_floor_cpu_cores_override =
|
||||
new.general.me_adaptive_floor_cpu_cores_override;
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_single_per_core =
|
||||
new.general.me_adaptive_floor_max_extra_writers_single_per_core;
|
||||
cfg.general.me_adaptive_floor_max_extra_writers_multi_per_core =
|
||||
new.general.me_adaptive_floor_max_extra_writers_multi_per_core;
|
||||
cfg.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core = new
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core;
|
||||
cfg.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core = new
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core;
|
||||
cfg.general.me_adaptive_floor_max_active_writers_per_core =
|
||||
new.general.me_adaptive_floor_max_active_writers_per_core;
|
||||
cfg.general.me_adaptive_floor_max_warm_writers_per_core =
|
||||
@@ -384,6 +524,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
|
||||
cfg.access.user_expirations = new.access.user_expirations.clone();
|
||||
cfg.access.user_data_quota = new.access.user_data_quota.clone();
|
||||
cfg.access.user_max_unique_ips = new.access.user_max_unique_ips.clone();
|
||||
cfg.access.user_max_unique_ips_global_each = new.access.user_max_unique_ips_global_each;
|
||||
cfg.access.user_max_unique_ips_mode = new.access.user_max_unique_ips_mode;
|
||||
cfg.access.user_max_unique_ips_window_secs = new.access.user_max_unique_ips_window_secs;
|
||||
|
||||
@@ -409,8 +550,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
||||
|| old.server.api.minimal_runtime_cache_ttl_ms
|
||||
!= new.server.api.minimal_runtime_cache_ttl_ms
|
||||
|| old.server.api.runtime_edge_enabled != new.server.api.runtime_edge_enabled
|
||||
|| old.server.api.runtime_edge_cache_ttl_ms
|
||||
!= new.server.api.runtime_edge_cache_ttl_ms
|
||||
|| old.server.api.runtime_edge_cache_ttl_ms != new.server.api.runtime_edge_cache_ttl_ms
|
||||
|| old.server.api.runtime_edge_top_n != new.server.api.runtime_edge_top_n
|
||||
|| old.server.api.runtime_edge_events_capacity
|
||||
!= new.server.api.runtime_edge_events_capacity
|
||||
@@ -432,6 +572,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
||||
}
|
||||
if old.censorship.tls_domain != new.censorship.tls_domain
|
||||
|| old.censorship.tls_domains != new.censorship.tls_domains
|
||||
|| old.censorship.tls_fetch_scope != new.censorship.tls_fetch_scope
|
||||
|| old.censorship.mask != new.censorship.mask
|
||||
|| old.censorship.mask_host != new.censorship.mask_host
|
||||
|| old.censorship.mask_port != new.censorship.mask_port
|
||||
@@ -445,6 +586,19 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|
||||
|| old.censorship.tls_full_cert_ttl_secs != new.censorship.tls_full_cert_ttl_secs
|
||||
|| old.censorship.alpn_enforce != new.censorship.alpn_enforce
|
||||
|| old.censorship.mask_proxy_protocol != new.censorship.mask_proxy_protocol
|
||||
|| old.censorship.mask_shape_hardening != new.censorship.mask_shape_hardening
|
||||
|| old.censorship.mask_shape_bucket_floor_bytes
|
||||
!= new.censorship.mask_shape_bucket_floor_bytes
|
||||
|| old.censorship.mask_shape_bucket_cap_bytes != new.censorship.mask_shape_bucket_cap_bytes
|
||||
|| old.censorship.mask_shape_above_cap_blur != new.censorship.mask_shape_above_cap_blur
|
||||
|| old.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
!= new.censorship.mask_shape_above_cap_blur_max_bytes
|
||||
|| old.censorship.mask_timing_normalization_enabled
|
||||
!= new.censorship.mask_timing_normalization_enabled
|
||||
|| old.censorship.mask_timing_normalization_floor_ms
|
||||
!= new.censorship.mask_timing_normalization_floor_ms
|
||||
|| old.censorship.mask_timing_normalization_ceiling_ms
|
||||
!= new.censorship.mask_timing_normalization_ceiling_ms
|
||||
{
|
||||
warned = true;
|
||||
warn!("config reload: censorship settings changed; restart required");
|
||||
@@ -674,6 +828,19 @@ fn log_changes(
|
||||
old_hot.me_pool_drain_ttl_secs, new_hot.me_pool_drain_ttl_secs,
|
||||
);
|
||||
}
|
||||
if old_hot.me_instadrain != new_hot.me_instadrain {
|
||||
info!(
|
||||
"config reload: me_instadrain: {} → {}",
|
||||
old_hot.me_instadrain, new_hot.me_instadrain,
|
||||
);
|
||||
}
|
||||
|
||||
if old_hot.me_pool_drain_threshold != new_hot.me_pool_drain_threshold {
|
||||
info!(
|
||||
"config reload: me_pool_drain_threshold: {} → {}",
|
||||
old_hot.me_pool_drain_threshold, new_hot.me_pool_drain_threshold,
|
||||
);
|
||||
}
|
||||
|
||||
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
|
||||
info!(
|
||||
@@ -707,8 +874,7 @@ fn log_changes(
|
||||
{
|
||||
info!(
|
||||
"config reload: me_bind_stale: mode={:?} ttl={}s",
|
||||
new_hot.me_bind_stale_mode,
|
||||
new_hot.me_bind_stale_ttl_secs
|
||||
new_hot.me_bind_stale_mode, new_hot.me_bind_stale_ttl_secs
|
||||
);
|
||||
}
|
||||
if old_hot.me_secret_atomic_snapshot != new_hot.me_secret_atomic_snapshot
|
||||
@@ -788,8 +954,7 @@ fn log_changes(
|
||||
if old_hot.me_socks_kdf_policy != new_hot.me_socks_kdf_policy {
|
||||
info!(
|
||||
"config reload: me_socks_kdf_policy: {:?} → {:?}",
|
||||
old_hot.me_socks_kdf_policy,
|
||||
new_hot.me_socks_kdf_policy,
|
||||
old_hot.me_socks_kdf_policy, new_hot.me_socks_kdf_policy,
|
||||
);
|
||||
}
|
||||
|
||||
@@ -843,8 +1008,7 @@ fn log_changes(
|
||||
|| old_hot.me_route_backpressure_high_watermark_pct
|
||||
!= new_hot.me_route_backpressure_high_watermark_pct
|
||||
|| old_hot.me_reader_route_data_wait_ms != new_hot.me_reader_route_data_wait_ms
|
||||
|| old_hot.me_health_interval_ms_unhealthy
|
||||
!= new_hot.me_health_interval_ms_unhealthy
|
||||
|| old_hot.me_health_interval_ms_unhealthy != new_hot.me_health_interval_ms_unhealthy
|
||||
|| old_hot.me_health_interval_ms_healthy != new_hot.me_health_interval_ms_healthy
|
||||
|| old_hot.me_admission_poll_ms != new_hot.me_admission_poll_ms
|
||||
|| old_hot.me_warn_rate_limit_ms != new_hot.me_warn_rate_limit_ms
|
||||
@@ -881,19 +1045,27 @@ fn log_changes(
|
||||
}
|
||||
|
||||
if old_hot.users != new_hot.users {
|
||||
let mut added: Vec<&String> = new_hot.users.keys()
|
||||
let mut added: Vec<&String> = new_hot
|
||||
.users
|
||||
.keys()
|
||||
.filter(|u| !old_hot.users.contains_key(*u))
|
||||
.collect();
|
||||
added.sort();
|
||||
|
||||
let mut removed: Vec<&String> = old_hot.users.keys()
|
||||
let mut removed: Vec<&String> = old_hot
|
||||
.users
|
||||
.keys()
|
||||
.filter(|u| !new_hot.users.contains_key(*u))
|
||||
.collect();
|
||||
removed.sort();
|
||||
|
||||
let mut changed: Vec<&String> = new_hot.users.keys()
|
||||
let mut changed: Vec<&String> = new_hot
|
||||
.users
|
||||
.keys()
|
||||
.filter(|u| {
|
||||
old_hot.users.get(*u)
|
||||
old_hot
|
||||
.users
|
||||
.get(*u)
|
||||
.map(|s| s != &new_hot.users[*u])
|
||||
.unwrap_or(false)
|
||||
})
|
||||
@@ -903,10 +1075,18 @@ fn log_changes(
|
||||
if !added.is_empty() {
|
||||
info!(
|
||||
"config reload: users added: [{}]",
|
||||
added.iter().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
|
||||
added
|
||||
.iter()
|
||||
.map(|s| s.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
let host = resolve_link_host(new_cfg, detected_ip_v4, detected_ip_v6);
|
||||
let port = new_cfg.general.links.public_port.unwrap_or(new_cfg.server.port);
|
||||
let port = new_cfg
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(new_cfg.server.port);
|
||||
for user in &added {
|
||||
if let Some(secret) = new_hot.users.get(*user) {
|
||||
print_user_links(user, secret, &host, port, new_cfg);
|
||||
@@ -916,13 +1096,21 @@ fn log_changes(
|
||||
if !removed.is_empty() {
|
||||
info!(
|
||||
"config reload: users removed: [{}]",
|
||||
removed.iter().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
|
||||
removed
|
||||
.iter()
|
||||
.map(|s| s.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
}
|
||||
if !changed.is_empty() {
|
||||
info!(
|
||||
"config reload: users secret changed: [{}]",
|
||||
changed.iter().map(|s| s.as_str()).collect::<Vec<_>>().join(", ")
|
||||
changed
|
||||
.iter()
|
||||
.map(|s| s.as_str())
|
||||
.collect::<Vec<_>>()
|
||||
.join(", ")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -951,12 +1139,13 @@ fn log_changes(
|
||||
new_hot.user_max_unique_ips.len()
|
||||
);
|
||||
}
|
||||
if old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
|
||||
|| old_hot.user_max_unique_ips_window_secs
|
||||
!= new_hot.user_max_unique_ips_window_secs
|
||||
if old_hot.user_max_unique_ips_global_each != new_hot.user_max_unique_ips_global_each
|
||||
|| old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
|
||||
|| old_hot.user_max_unique_ips_window_secs != new_hot.user_max_unique_ips_window_secs
|
||||
{
|
||||
info!(
|
||||
"config reload: user_max_unique_ips policy mode={:?} window={}s",
|
||||
"config reload: user_max_unique_ips policy global_each={} mode={:?} window={}s",
|
||||
new_hot.user_max_unique_ips_global_each,
|
||||
new_hot.user_max_unique_ips_mode,
|
||||
new_hot.user_max_unique_ips_window_secs
|
||||
);
|
||||
@@ -970,18 +1159,32 @@ fn reload_config(
|
||||
log_tx: &watch::Sender<LogLevel>,
|
||||
detected_ip_v4: Option<IpAddr>,
|
||||
detected_ip_v6: Option<IpAddr>,
|
||||
) {
|
||||
let new_cfg = match ProxyConfig::load(config_path) {
|
||||
Ok(c) => c,
|
||||
reload_state: &mut ReloadState,
|
||||
) -> Option<WatchManifest> {
|
||||
let loaded = match ProxyConfig::load_with_metadata(config_path) {
|
||||
Ok(loaded) => loaded,
|
||||
Err(e) => {
|
||||
error!("config reload: failed to parse {:?}: {}", config_path, e);
|
||||
return;
|
||||
return None;
|
||||
}
|
||||
};
|
||||
let LoadedConfig {
|
||||
config: new_cfg,
|
||||
source_files,
|
||||
rendered_hash,
|
||||
} = loaded;
|
||||
let next_manifest = WatchManifest::from_source_files(&source_files);
|
||||
|
||||
if let Err(e) = new_cfg.validate() {
|
||||
error!("config reload: validation failed: {}; keeping old config", e);
|
||||
return;
|
||||
error!(
|
||||
"config reload: validation failed: {}; keeping old config",
|
||||
e
|
||||
);
|
||||
return Some(next_manifest);
|
||||
}
|
||||
|
||||
if reload_state.is_applied(rendered_hash) {
|
||||
return Some(next_manifest);
|
||||
}
|
||||
|
||||
let old_cfg = config_tx.borrow().clone();
|
||||
@@ -996,7 +1199,8 @@ fn reload_config(
|
||||
}
|
||||
|
||||
if !hot_changed {
|
||||
return;
|
||||
reload_state.mark_applied(rendered_hash);
|
||||
return Some(next_manifest);
|
||||
}
|
||||
|
||||
if old_hot.dns_overrides != applied_hot.dns_overrides
|
||||
@@ -1006,7 +1210,7 @@ fn reload_config(
|
||||
"config reload: invalid network.dns_overrides: {}; keeping old config",
|
||||
e
|
||||
);
|
||||
return;
|
||||
return Some(next_manifest);
|
||||
}
|
||||
|
||||
log_changes(
|
||||
@@ -1018,6 +1222,8 @@ fn reload_config(
|
||||
detected_ip_v6,
|
||||
);
|
||||
config_tx.send(Arc::new(applied_cfg)).ok();
|
||||
reload_state.mark_applied(rendered_hash);
|
||||
Some(next_manifest)
|
||||
}
|
||||
|
||||
// ── Public API ────────────────────────────────────────────────────────────────
|
||||
@@ -1038,82 +1244,95 @@ pub fn spawn_config_watcher(
|
||||
) -> (watch::Receiver<Arc<ProxyConfig>>, watch::Receiver<LogLevel>) {
|
||||
let initial_level = initial.general.log_level.clone();
|
||||
let (config_tx, config_rx) = watch::channel(initial);
|
||||
let (log_tx, log_rx) = watch::channel(initial_level);
|
||||
let (log_tx, log_rx) = watch::channel(initial_level);
|
||||
|
||||
// Bridge: sync notify callbacks → async task via mpsc.
|
||||
let (notify_tx, mut notify_rx) = mpsc::channel::<()>(4);
|
||||
let config_path = normalize_watch_path(&config_path);
|
||||
let initial_loaded = ProxyConfig::load_with_metadata(&config_path).ok();
|
||||
let initial_manifest = initial_loaded
|
||||
.as_ref()
|
||||
.map(|loaded| WatchManifest::from_source_files(&loaded.source_files))
|
||||
.unwrap_or_else(|| WatchManifest::from_source_files(std::slice::from_ref(&config_path)));
|
||||
let initial_snapshot_hash = initial_loaded.as_ref().map(|loaded| loaded.rendered_hash);
|
||||
|
||||
// Canonicalize so path matches what notify returns (absolute) in events.
|
||||
let config_path = match config_path.canonicalize() {
|
||||
Ok(p) => p,
|
||||
Err(_) => config_path.to_path_buf(),
|
||||
};
|
||||
|
||||
// Watch the parent directory rather than the file itself, because many
|
||||
// editors (vim, nano) and systemd write via rename, which would cause
|
||||
// inotify to lose track of the original inode.
|
||||
let watch_dir = config_path
|
||||
.parent()
|
||||
.unwrap_or_else(|| std::path::Path::new("."))
|
||||
.to_path_buf();
|
||||
|
||||
// ── inotify watcher (instant on local fs) ────────────────────────────
|
||||
let config_file = config_path.clone();
|
||||
let tx_inotify = notify_tx.clone();
|
||||
let inotify_ok = match recommended_watcher(move |res: notify::Result<notify::Event>| {
|
||||
let Ok(event) = res else { return };
|
||||
let is_our_file = event.paths.iter().any(|p| p == &config_file);
|
||||
if !is_our_file { return; }
|
||||
if matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
|
||||
let _ = tx_inotify.try_send(());
|
||||
}
|
||||
}) {
|
||||
Ok(mut w) => match w.watch(&watch_dir, RecursiveMode::NonRecursive) {
|
||||
Ok(()) => {
|
||||
info!("config watcher: inotify active on {:?}", config_path);
|
||||
Box::leak(Box::new(w));
|
||||
true
|
||||
}
|
||||
Err(e) => { warn!("config watcher: inotify watch failed: {}", e); false }
|
||||
},
|
||||
Err(e) => { warn!("config watcher: inotify unavailable: {}", e); false }
|
||||
};
|
||||
|
||||
// ── poll watcher (always active, fixes Docker bind mounts / NFS) ─────
|
||||
// inotify does not receive events for files mounted from the host into
|
||||
// a container. PollWatcher compares file contents every 3 s and fires
|
||||
// on any change regardless of the underlying fs.
|
||||
let config_file2 = config_path.clone();
|
||||
let tx_poll = notify_tx.clone();
|
||||
match notify::poll::PollWatcher::new(
|
||||
move |res: notify::Result<notify::Event>| {
|
||||
let Ok(event) = res else { return };
|
||||
let is_our_file = event.paths.iter().any(|p| p == &config_file2);
|
||||
if !is_our_file { return; }
|
||||
if matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
|
||||
let _ = tx_poll.try_send(());
|
||||
}
|
||||
},
|
||||
notify::Config::default()
|
||||
.with_poll_interval(std::time::Duration::from_secs(3))
|
||||
.with_compare_contents(true),
|
||||
) {
|
||||
Ok(mut w) => match w.watch(&config_path, RecursiveMode::NonRecursive) {
|
||||
Ok(()) => {
|
||||
if inotify_ok {
|
||||
info!("config watcher: poll watcher also active (Docker/NFS safe)");
|
||||
} else {
|
||||
info!("config watcher: poll watcher active on {:?} (3s interval)", config_path);
|
||||
}
|
||||
Box::leak(Box::new(w));
|
||||
}
|
||||
Err(e) => warn!("config watcher: poll watch failed: {}", e),
|
||||
},
|
||||
Err(e) => warn!("config watcher: poll watcher unavailable: {}", e),
|
||||
}
|
||||
|
||||
// ── event loop ───────────────────────────────────────────────────────
|
||||
tokio::spawn(async move {
|
||||
let (notify_tx, mut notify_rx) = mpsc::channel::<()>(4);
|
||||
let manifest_state = Arc::new(StdRwLock::new(WatchManifest::default()));
|
||||
let mut reload_state = ReloadState::new(initial_snapshot_hash);
|
||||
|
||||
let tx_inotify = notify_tx.clone();
|
||||
let manifest_for_inotify = manifest_state.clone();
|
||||
let mut inotify_watcher =
|
||||
match recommended_watcher(move |res: notify::Result<notify::Event>| {
|
||||
let Ok(event) = res else { return };
|
||||
if !matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
let is_our_file = manifest_for_inotify
|
||||
.read()
|
||||
.map(|manifest| manifest.matches_event_paths(&event.paths))
|
||||
.unwrap_or(false);
|
||||
if is_our_file {
|
||||
let _ = tx_inotify.try_send(());
|
||||
}
|
||||
}) {
|
||||
Ok(watcher) => Some(watcher),
|
||||
Err(e) => {
|
||||
warn!("config watcher: inotify unavailable: {}", e);
|
||||
None
|
||||
}
|
||||
};
|
||||
apply_watch_manifest(
|
||||
inotify_watcher.as_mut(),
|
||||
Option::<&mut notify::poll::PollWatcher>::None,
|
||||
&manifest_state,
|
||||
initial_manifest.clone(),
|
||||
);
|
||||
if inotify_watcher.is_some() {
|
||||
info!("config watcher: inotify active on {:?}", config_path);
|
||||
}
|
||||
|
||||
let tx_poll = notify_tx.clone();
|
||||
let manifest_for_poll = manifest_state.clone();
|
||||
let mut poll_watcher = match notify::poll::PollWatcher::new(
|
||||
move |res: notify::Result<notify::Event>| {
|
||||
let Ok(event) = res else { return };
|
||||
if !matches!(
|
||||
event.kind,
|
||||
EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
|
||||
) {
|
||||
return;
|
||||
}
|
||||
let is_our_file = manifest_for_poll
|
||||
.read()
|
||||
.map(|manifest| manifest.matches_event_paths(&event.paths))
|
||||
.unwrap_or(false);
|
||||
if is_our_file {
|
||||
let _ = tx_poll.try_send(());
|
||||
}
|
||||
},
|
||||
notify::Config::default()
|
||||
.with_poll_interval(Duration::from_secs(3))
|
||||
.with_compare_contents(true),
|
||||
) {
|
||||
Ok(watcher) => Some(watcher),
|
||||
Err(e) => {
|
||||
warn!("config watcher: poll watcher unavailable: {}", e);
|
||||
None
|
||||
}
|
||||
};
|
||||
apply_watch_manifest(
|
||||
Option::<&mut notify::RecommendedWatcher>::None,
|
||||
poll_watcher.as_mut(),
|
||||
&manifest_state,
|
||||
initial_manifest.clone(),
|
||||
);
|
||||
if poll_watcher.is_some() {
|
||||
info!("config watcher: poll watcher active (Docker/NFS safe)");
|
||||
}
|
||||
|
||||
#[cfg(unix)]
|
||||
let mut sighup = {
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
@@ -1131,13 +1350,43 @@ pub fn spawn_config_watcher(
|
||||
}
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
if notify_rx.recv().await.is_none() { break; }
|
||||
if notify_rx.recv().await.is_none() {
|
||||
break;
|
||||
}
|
||||
|
||||
// Debounce: drain extra events that arrive within 50 ms.
|
||||
tokio::time::sleep(std::time::Duration::from_millis(50)).await;
|
||||
// Debounce: drain extra events that arrive within a short quiet window.
|
||||
tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
|
||||
while notify_rx.try_recv().is_ok() {}
|
||||
|
||||
reload_config(&config_path, &config_tx, &log_tx, detected_ip_v4, detected_ip_v6);
|
||||
let mut next_manifest = reload_config(
|
||||
&config_path,
|
||||
&config_tx,
|
||||
&log_tx,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
&mut reload_state,
|
||||
);
|
||||
if next_manifest.is_none() {
|
||||
tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
|
||||
while notify_rx.try_recv().is_ok() {}
|
||||
next_manifest = reload_config(
|
||||
&config_path,
|
||||
&config_tx,
|
||||
&log_tx,
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
&mut reload_state,
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(next_manifest) = next_manifest {
|
||||
apply_watch_manifest(
|
||||
inotify_watcher.as_mut(),
|
||||
poll_watcher.as_mut(),
|
||||
&manifest_state,
|
||||
next_manifest,
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
@@ -1152,6 +1401,40 @@ mod tests {
|
||||
ProxyConfig::default()
|
||||
}
|
||||
|
||||
fn write_reload_config(path: &Path, ad_tag: Option<&str>, server_port: Option<u16>) {
|
||||
let mut config = String::from(
|
||||
r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#,
|
||||
);
|
||||
|
||||
if ad_tag.is_some() {
|
||||
config.push_str("\n[general]\n");
|
||||
if let Some(tag) = ad_tag {
|
||||
config.push_str(&format!("ad_tag = \"{tag}\"\n"));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(port) = server_port {
|
||||
config.push_str("\n[server]\n");
|
||||
config.push_str(&format!("port = {port}\n"));
|
||||
}
|
||||
|
||||
std::fs::write(path, config).unwrap();
|
||||
}
|
||||
|
||||
fn temp_config_path(prefix: &str) -> PathBuf {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
std::env::temp_dir().join(format!("{prefix}_{nonce}.toml"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn overlay_applies_hot_and_preserves_non_hot() {
|
||||
let old = sample_config();
|
||||
@@ -1171,7 +1454,10 @@ mod tests {
|
||||
new.server.port = old.server.port.saturating_add(1);
|
||||
|
||||
let applied = overlay_hot_fields(&old, &new);
|
||||
assert_eq!(HotFields::from_config(&old), HotFields::from_config(&applied));
|
||||
assert_eq!(
|
||||
HotFields::from_config(&old),
|
||||
HotFields::from_config(&applied)
|
||||
);
|
||||
assert_eq!(applied.server.port, old.server.port);
|
||||
}
|
||||
|
||||
@@ -1190,7 +1476,10 @@ mod tests {
|
||||
applied.general.me_bind_stale_mode,
|
||||
new.general.me_bind_stale_mode
|
||||
);
|
||||
assert_ne!(HotFields::from_config(&old), HotFields::from_config(&applied));
|
||||
assert_ne!(
|
||||
HotFields::from_config(&old),
|
||||
HotFields::from_config(&applied)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1204,7 +1493,10 @@ mod tests {
|
||||
applied.general.me_keepalive_interval_secs,
|
||||
old.general.me_keepalive_interval_secs
|
||||
);
|
||||
assert_eq!(HotFields::from_config(&old), HotFields::from_config(&applied));
|
||||
assert_eq!(
|
||||
HotFields::from_config(&old),
|
||||
HotFields::from_config(&applied)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -1216,7 +1508,92 @@ mod tests {
|
||||
|
||||
let applied = overlay_hot_fields(&old, &new);
|
||||
assert_eq!(applied.general.hardswap, new.general.hardswap);
|
||||
assert_eq!(applied.general.use_middle_proxy, old.general.use_middle_proxy);
|
||||
assert_eq!(
|
||||
applied.general.use_middle_proxy,
|
||||
old.general.use_middle_proxy
|
||||
);
|
||||
assert!(!config_equal(&applied, &new));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reload_applies_hot_change_on_first_observed_snapshot() {
|
||||
let initial_tag = "11111111111111111111111111111111";
|
||||
let final_tag = "22222222222222222222222222222222";
|
||||
let path = temp_config_path("telemt_hot_reload_stable");
|
||||
|
||||
write_reload_config(&path, Some(initial_tag), None);
|
||||
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
|
||||
let initial_hash = ProxyConfig::load_with_metadata(&path)
|
||||
.unwrap()
|
||||
.rendered_hash;
|
||||
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
|
||||
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
|
||||
let mut reload_state = ReloadState::new(Some(initial_hash));
|
||||
|
||||
write_reload_config(&path, Some(final_tag), None);
|
||||
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
|
||||
assert_eq!(
|
||||
config_tx.borrow().general.ad_tag.as_deref(),
|
||||
Some(final_tag)
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reload_keeps_hot_apply_when_non_hot_fields_change() {
|
||||
let initial_tag = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
|
||||
let final_tag = "bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb";
|
||||
let path = temp_config_path("telemt_hot_reload_mixed");
|
||||
|
||||
write_reload_config(&path, Some(initial_tag), None);
|
||||
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
|
||||
let initial_hash = ProxyConfig::load_with_metadata(&path)
|
||||
.unwrap()
|
||||
.rendered_hash;
|
||||
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
|
||||
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
|
||||
let mut reload_state = ReloadState::new(Some(initial_hash));
|
||||
|
||||
write_reload_config(&path, Some(final_tag), Some(initial_cfg.server.port + 1));
|
||||
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
|
||||
|
||||
let applied = config_tx.borrow().clone();
|
||||
assert_eq!(applied.general.ad_tag.as_deref(), Some(final_tag));
|
||||
assert_eq!(applied.server.port, initial_cfg.server.port);
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn reload_recovers_after_parse_error_on_next_attempt() {
|
||||
let initial_tag = "cccccccccccccccccccccccccccccccc";
|
||||
let final_tag = "dddddddddddddddddddddddddddddddd";
|
||||
let path = temp_config_path("telemt_hot_reload_parse_recovery");
|
||||
|
||||
write_reload_config(&path, Some(initial_tag), None);
|
||||
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
|
||||
let initial_hash = ProxyConfig::load_with_metadata(&path)
|
||||
.unwrap()
|
||||
.rendered_hash;
|
||||
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
|
||||
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
|
||||
let mut reload_state = ReloadState::new(Some(initial_hash));
|
||||
|
||||
std::fs::write(&path, "[access.users\nuser = \"broken\"\n").unwrap();
|
||||
assert!(reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).is_none());
|
||||
assert_eq!(
|
||||
config_tx.borrow().general.ad_tag.as_deref(),
|
||||
Some(initial_tag)
|
||||
);
|
||||
|
||||
write_reload_config(&path, Some(final_tag), None);
|
||||
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
|
||||
assert_eq!(
|
||||
config_tx.borrow().general.ad_tag.as_deref(),
|
||||
Some(final_tag)
|
||||
);
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,19 +1,51 @@
|
||||
#![allow(deprecated)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::path::Path;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use rand::Rng;
|
||||
use rand::RngExt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use shadowsocks::config::ServerConfig as ShadowsocksServerConfig;
|
||||
use tracing::warn;
|
||||
use serde::{Serialize, Deserialize};
|
||||
|
||||
use crate::error::{ProxyError, Result};
|
||||
|
||||
use super::defaults::*;
|
||||
use super::types::*;
|
||||
|
||||
fn preprocess_includes(content: &str, base_dir: &Path, depth: u8) -> Result<String> {
|
||||
#[derive(Debug, Clone)]
|
||||
pub(crate) struct LoadedConfig {
|
||||
pub(crate) config: ProxyConfig,
|
||||
pub(crate) source_files: Vec<PathBuf>,
|
||||
pub(crate) rendered_hash: u64,
|
||||
}
|
||||
|
||||
fn normalize_config_path(path: &Path) -> PathBuf {
|
||||
path.canonicalize().unwrap_or_else(|_| {
|
||||
if path.is_absolute() {
|
||||
path.to_path_buf()
|
||||
} else {
|
||||
std::env::current_dir()
|
||||
.map(|cwd| cwd.join(path))
|
||||
.unwrap_or_else(|_| path.to_path_buf())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
fn hash_rendered_snapshot(rendered: &str) -> u64 {
|
||||
let mut hasher = DefaultHasher::new();
|
||||
rendered.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
fn preprocess_includes(
|
||||
content: &str,
|
||||
base_dir: &Path,
|
||||
depth: u8,
|
||||
source_files: &mut BTreeSet<PathBuf>,
|
||||
) -> Result<String> {
|
||||
if depth > 10 {
|
||||
return Err(ProxyError::Config("Include depth > 10".into()));
|
||||
}
|
||||
@@ -25,10 +57,16 @@ fn preprocess_includes(content: &str, base_dir: &Path, depth: u8) -> Result<Stri
|
||||
if let Some(rest) = rest.strip_prefix('=') {
|
||||
let path_str = rest.trim().trim_matches('"');
|
||||
let resolved = base_dir.join(path_str);
|
||||
source_files.insert(normalize_config_path(&resolved));
|
||||
let included = std::fs::read_to_string(&resolved)
|
||||
.map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
let included_dir = resolved.parent().unwrap_or(base_dir);
|
||||
output.push_str(&preprocess_includes(&included, included_dir, depth + 1)?);
|
||||
output.push_str(&preprocess_includes(
|
||||
&included,
|
||||
included_dir,
|
||||
depth + 1,
|
||||
source_files,
|
||||
)?);
|
||||
output.push('\n');
|
||||
continue;
|
||||
}
|
||||
@@ -85,13 +123,37 @@ fn sanitize_ad_tag(ad_tag: &mut Option<String>) {
|
||||
};
|
||||
|
||||
if !is_valid_ad_tag(tag) {
|
||||
warn!(
|
||||
"Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled"
|
||||
);
|
||||
warn!("Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled");
|
||||
*ad_tag = None;
|
||||
}
|
||||
}
|
||||
|
||||
fn validate_upstreams(config: &ProxyConfig) -> Result<()> {
|
||||
let has_enabled_shadowsocks = config.upstreams.iter().any(|upstream| {
|
||||
upstream.enabled && matches!(upstream.upstream_type, UpstreamType::Shadowsocks { .. })
|
||||
});
|
||||
|
||||
if has_enabled_shadowsocks && config.general.use_middle_proxy {
|
||||
return Err(ProxyError::Config(
|
||||
"shadowsocks upstreams require general.use_middle_proxy = false".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
for upstream in &config.upstreams {
|
||||
if let UpstreamType::Shadowsocks { url, .. } = &upstream.upstream_type {
|
||||
let parsed = ShadowsocksServerConfig::from_url(url)
|
||||
.map_err(|error| ProxyError::Config(format!("invalid shadowsocks url: {error}")))?;
|
||||
if parsed.plugin().is_some() {
|
||||
return Err(ProxyError::Config(
|
||||
"shadowsocks plugins are not supported".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// ============= Main Config =============
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
@@ -138,10 +200,17 @@ pub struct ProxyConfig {
|
||||
|
||||
impl ProxyConfig {
|
||||
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||
Self::load_with_metadata(path).map(|loaded| loaded.config)
|
||||
}
|
||||
|
||||
pub(crate) fn load_with_metadata<P: AsRef<Path>>(path: P) -> Result<LoadedConfig> {
|
||||
let path = path.as_ref();
|
||||
let content =
|
||||
std::fs::read_to_string(&path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
let base_dir = path.as_ref().parent().unwrap_or(Path::new("."));
|
||||
let processed = preprocess_includes(&content, base_dir, 0)?;
|
||||
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
let base_dir = path.parent().unwrap_or(Path::new("."));
|
||||
let mut source_files = BTreeSet::new();
|
||||
source_files.insert(normalize_config_path(path));
|
||||
let processed = preprocess_includes(&content, base_dir, 0, &mut source_files)?;
|
||||
|
||||
let parsed_toml: toml::Value =
|
||||
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
@@ -164,15 +233,17 @@ impl ProxyConfig {
|
||||
.map(|table| table.contains_key("stun_servers"))
|
||||
.unwrap_or(false);
|
||||
|
||||
let mut config: ProxyConfig =
|
||||
parsed_toml.try_into().map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
let mut config: ProxyConfig = parsed_toml
|
||||
.try_into()
|
||||
.map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||
|
||||
if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) {
|
||||
config.general.update_every = None;
|
||||
}
|
||||
|
||||
let legacy_nat_stun = config.general.middle_proxy_nat_stun.take();
|
||||
let legacy_nat_stun_servers = std::mem::take(&mut config.general.middle_proxy_nat_stun_servers);
|
||||
let legacy_nat_stun_servers =
|
||||
std::mem::take(&mut config.general.middle_proxy_nat_stun_servers);
|
||||
let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty();
|
||||
if stun_servers_is_explicit {
|
||||
let mut explicit_stun_servers = Vec::new();
|
||||
@@ -182,7 +253,9 @@ impl ProxyConfig {
|
||||
config.network.stun_servers = explicit_stun_servers;
|
||||
|
||||
if legacy_nat_stun_used {
|
||||
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set");
|
||||
warn!(
|
||||
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set"
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Keep the default STUN pool unless network.stun_servers is explicitly overridden.
|
||||
@@ -197,7 +270,9 @@ impl ProxyConfig {
|
||||
config.network.stun_servers = unified_stun_servers;
|
||||
|
||||
if legacy_nat_stun_used {
|
||||
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers");
|
||||
warn!(
|
||||
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -285,6 +360,131 @@ impl ProxyConfig {
|
||||
));
|
||||
}
|
||||
|
||||
if config.timeouts.client_handshake == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"timeouts.client_handshake must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let handshake_timeout_ms = config
|
||||
.timeouts
|
||||
.client_handshake
|
||||
.checked_mul(1000)
|
||||
.ok_or_else(|| {
|
||||
ProxyError::Config(
|
||||
"timeouts.client_handshake is too large to validate milliseconds budget"
|
||||
.to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
if config.censorship.server_hello_delay_max_ms >= handshake_timeout_ms {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_bucket_floor_bytes == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_bucket_floor_bytes must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_bucket_cap_bytes
|
||||
< config.censorship.mask_shape_bucket_floor_bytes
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_above_cap_blur && !config.censorship.mask_shape_hardening {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_hardening_aggressive_mode
|
||||
&& !config.censorship.mask_shape_hardening
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_above_cap_blur
|
||||
&& config.censorship.mask_shape_above_cap_blur_max_bytes == 0
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_shape_above_cap_blur_max_bytes > 1_048_576 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_shape_above_cap_blur_max_bytes must be <= 1048576".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_timing_normalization_ceiling_ms
|
||||
< config.censorship.mask_timing_normalization_floor_ms
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_timing_normalization_enabled
|
||||
&& config.censorship.mask_timing_normalization_floor_ms == 0
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.censorship.mask_timing_normalization_ceiling_ms > 60_000 {
|
||||
return Err(ProxyError::Config(
|
||||
"censorship.mask_timing_normalization_ceiling_ms must be <= 60000".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.timeouts.relay_client_idle_soft_secs == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"timeouts.relay_client_idle_soft_secs must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.timeouts.relay_client_idle_hard_secs == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"timeouts.relay_client_idle_hard_secs must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.timeouts.relay_client_idle_hard_secs < config.timeouts.relay_client_idle_soft_secs
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"timeouts.relay_client_idle_hard_secs must be >= timeouts.relay_client_idle_soft_secs"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config
|
||||
.timeouts
|
||||
.relay_idle_grace_after_downstream_activity_secs
|
||||
> config.timeouts.relay_client_idle_hard_secs
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_writer_cmd_channel_capacity == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_writer_cmd_channel_capacity must be > 0".to_string(),
|
||||
@@ -303,6 +503,12 @@ impl ProxyConfig {
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_c2me_send_timeout_ms > 60_000 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_c2me_send_timeout_ms must be within [0, 60000]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_reader_route_data_wait_ms > 20 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_reader_route_data_wait_ms must be within [0, 20]".to_string(),
|
||||
@@ -329,13 +535,15 @@ impl ProxyConfig {
|
||||
|
||||
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]".to_string(),
|
||||
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(8192..=2 * 1024 * 1024).contains(&config.general.direct_relay_copy_buf_s2c_bytes) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]".to_string(),
|
||||
"general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -363,6 +571,35 @@ impl ProxyConfig {
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_pool_drain_soft_evict_grace_secs > 3600 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_pool_drain_soft_evict_grace_secs must be within [0, 3600]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_pool_drain_soft_evict_per_writer == 0
|
||||
|| config.general.me_pool_drain_soft_evict_per_writer > 16
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_pool_drain_soft_evict_per_writer must be within [1, 16]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_pool_drain_soft_evict_budget_per_core == 0
|
||||
|| config.general.me_pool_drain_soft_evict_budget_per_core > 64
|
||||
{
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_pool_drain_soft_evict_budget_per_core must be within [1, 64]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_pool_drain_soft_evict_cooldown_ms == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_pool_drain_soft_evict_cooldown_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.access.user_max_unique_ips_window_secs == 0 {
|
||||
return Err(ProxyError::Config(
|
||||
"access.user_max_unique_ips_window_secs must be > 0".to_string(),
|
||||
@@ -534,6 +771,12 @@ impl ProxyConfig {
|
||||
"general.me_route_backpressure_base_timeout_ms must be > 0".to_string(),
|
||||
));
|
||||
}
|
||||
if config.general.me_route_backpressure_base_timeout_ms > 5000 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_backpressure_base_timeout_ms must be within [1, 5000]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_route_backpressure_high_timeout_ms
|
||||
< config.general.me_route_backpressure_base_timeout_ms
|
||||
@@ -542,10 +785,17 @@ impl ProxyConfig {
|
||||
"general.me_route_backpressure_high_timeout_ms must be >= general.me_route_backpressure_base_timeout_ms".to_string(),
|
||||
));
|
||||
}
|
||||
if config.general.me_route_backpressure_high_timeout_ms > 5000 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_backpressure_high_timeout_ms must be within [1, 5000]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(1..=100).contains(&config.general.me_route_backpressure_high_watermark_pct) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_backpressure_high_watermark_pct must be within [1, 100]".to_string(),
|
||||
"general.me_route_backpressure_high_watermark_pct must be within [1, 100]"
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -555,6 +805,18 @@ impl ProxyConfig {
|
||||
));
|
||||
}
|
||||
|
||||
if !(50..=60_000).contains(&config.general.me_route_hybrid_max_wait_ms) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_hybrid_max_wait_ms must be within [50, 60000]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.me_route_blocking_send_timeout_ms > 5000 {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_route_blocking_send_timeout_ms must be within [0, 5000]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if !(2..=4).contains(&config.general.me_writer_pick_sample_size) {
|
||||
return Err(ProxyError::Config(
|
||||
"general.me_writer_pick_sample_size must be within [2, 4]".to_string(),
|
||||
@@ -615,6 +877,12 @@ impl ProxyConfig {
|
||||
));
|
||||
}
|
||||
|
||||
if config.server.accept_permit_timeout_ms > 60_000 {
|
||||
return Err(ProxyError::Config(
|
||||
"server.accept_permit_timeout_ms must be within [0, 60000]".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if config.general.effective_me_pool_force_close_secs() > 0
|
||||
&& config.general.effective_me_pool_force_close_secs()
|
||||
< config.general.me_pool_drain_ttl_secs
|
||||
@@ -673,6 +941,9 @@ impl ProxyConfig {
|
||||
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
||||
}
|
||||
|
||||
// Normalize optional TLS fetch scope: whitespace-only values disable scoped routing.
|
||||
config.censorship.tls_fetch_scope = config.censorship.tls_fetch_scope.trim().to_string();
|
||||
|
||||
// Merge primary + extra TLS domains, deduplicate (primary always first).
|
||||
if !config.censorship.tls_domains.is_empty() {
|
||||
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||
@@ -707,12 +978,16 @@ impl ProxyConfig {
|
||||
crate::network::dns_overrides::validate_entries(&config.network.dns_overrides)?;
|
||||
|
||||
if config.general.use_middle_proxy && config.network.ipv6 == Some(true) {
|
||||
warn!("IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME");
|
||||
warn!(
|
||||
"IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME"
|
||||
);
|
||||
}
|
||||
|
||||
// Random fake_cert_len only when default is in use.
|
||||
if !config.censorship.tls_emulation && config.censorship.fake_cert_len == default_fake_cert_len() {
|
||||
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
||||
if !config.censorship.tls_emulation
|
||||
&& config.censorship.fake_cert_len == default_fake_cert_len()
|
||||
{
|
||||
config.censorship.fake_cert_len = rand::rng().random_range(1024..4096);
|
||||
}
|
||||
|
||||
// Resolve listen_tcp: explicit value wins, otherwise auto-detect.
|
||||
@@ -721,8 +996,7 @@ impl ProxyConfig {
|
||||
let listen_tcp = config.server.listen_tcp.unwrap_or_else(|| {
|
||||
if config.server.listen_unix_sock.is_some() {
|
||||
// Unix socket present: TCP only if user explicitly set addresses or listeners.
|
||||
config.server.listen_addr_ipv4.is_some()
|
||||
|| !config.server.listeners.is_empty()
|
||||
config.server.listen_addr_ipv4.is_some() || !config.server.listeners.is_empty()
|
||||
} else {
|
||||
true
|
||||
}
|
||||
@@ -730,7 +1004,9 @@ impl ProxyConfig {
|
||||
|
||||
// Migration: Populate listeners if empty (skip when listen_tcp = false).
|
||||
if config.server.listeners.is_empty() && listen_tcp {
|
||||
let ipv4_str = config.server.listen_addr_ipv4
|
||||
let ipv4_str = config
|
||||
.server
|
||||
.listen_addr_ipv4
|
||||
.as_deref()
|
||||
.unwrap_or("0.0.0.0");
|
||||
if let Ok(ipv4) = ipv4_str.parse::<IpAddr>() {
|
||||
@@ -772,7 +1048,10 @@ impl ProxyConfig {
|
||||
// Migration: Populate upstreams if empty (Default Direct).
|
||||
if config.upstreams.is_empty() {
|
||||
config.upstreams.push(UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct { interface: None, bind_addresses: None },
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
@@ -786,7 +1065,13 @@ impl ProxyConfig {
|
||||
.entry("203".to_string())
|
||||
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
|
||||
|
||||
Ok(config)
|
||||
validate_upstreams(&config)?;
|
||||
|
||||
Ok(LoadedConfig {
|
||||
config,
|
||||
source_files: source_files.into_iter().collect(),
|
||||
rendered_hash: hash_rendered_snapshot(&processed),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn validate(&self) -> Result<()> {
|
||||
@@ -824,10 +1109,25 @@ impl ProxyConfig {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_idle_policy_tests.rs"]
|
||||
mod load_idle_policy_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_security_tests.rs"]
|
||||
mod load_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/load_mask_shape_security_tests.rs"]
|
||||
mod load_mask_shape_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
const TEST_SHADOWSOCKS_URL: &str =
|
||||
"ss://2022-blake3-aes-256-gcm:MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDE=@127.0.0.1:8388";
|
||||
|
||||
#[test]
|
||||
fn serde_defaults_remain_unchanged_for_present_sections() {
|
||||
let toml = r#"
|
||||
@@ -857,10 +1157,7 @@ mod tests {
|
||||
cfg.general.me_init_retry_attempts,
|
||||
default_me_init_retry_attempts()
|
||||
);
|
||||
assert_eq!(
|
||||
cfg.general.me2dc_fallback,
|
||||
default_me2dc_fallback()
|
||||
);
|
||||
assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
|
||||
assert_eq!(
|
||||
cfg.general.proxy_config_v4_cache_path,
|
||||
default_proxy_config_v4_cache_path()
|
||||
@@ -1111,6 +1408,48 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_with_metadata_collects_include_files() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let dir = std::env::temp_dir().join(format!("telemt_load_metadata_{nonce}"));
|
||||
std::fs::create_dir_all(&dir).unwrap();
|
||||
let main_path = dir.join("config.toml");
|
||||
let include_path = dir.join("included.toml");
|
||||
|
||||
std::fs::write(
|
||||
&include_path,
|
||||
r#"
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
std::fs::write(
|
||||
&main_path,
|
||||
r#"
|
||||
include = "included.toml"
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
"#,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let loaded = ProxyConfig::load_with_metadata(&main_path).unwrap();
|
||||
let main_normalized = normalize_config_path(&main_path);
|
||||
let include_normalized = normalize_config_path(&include_path);
|
||||
|
||||
assert!(loaded.source_files.contains(&main_normalized));
|
||||
assert!(loaded.source_files.contains(&include_normalized));
|
||||
|
||||
let _ = std::fs::remove_file(main_path);
|
||||
let _ = std::fs::remove_file(include_path);
|
||||
let _ = std::fs::remove_dir(dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn dc_overrides_inject_dc203_default() {
|
||||
let toml = r#"
|
||||
@@ -1127,11 +1466,12 @@ mod tests {
|
||||
let path = dir.join("telemt_dc_override_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert!(cfg
|
||||
.dc_overrides
|
||||
.get("203")
|
||||
.map(|v| v.contains(&"91.105.192.100:443".to_string()))
|
||||
.unwrap_or(false));
|
||||
assert!(
|
||||
cfg.dc_overrides
|
||||
.get("203")
|
||||
.map(|v| v.contains(&"91.105.192.100:443".to_string()))
|
||||
.unwrap_or(false)
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
@@ -1318,11 +1658,9 @@ mod tests {
|
||||
let path = dir.join("telemt_me_adaptive_floor_min_writers_out_of_range_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(
|
||||
err.contains(
|
||||
"general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]"
|
||||
)
|
||||
);
|
||||
assert!(err.contains(
|
||||
"general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]"
|
||||
));
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
@@ -1482,6 +1820,51 @@ mod tests {
|
||||
let _ = std::fs::remove_file(path_valid);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn me_route_backpressure_base_timeout_ms_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
me_route_backpressure_base_timeout_ms = 5001
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_me_route_backpressure_base_timeout_ms_out_of_range_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(
|
||||
err.contains("general.me_route_backpressure_base_timeout_ms must be within [1, 5000]")
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn me_route_backpressure_high_timeout_ms_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
me_route_backpressure_base_timeout_ms = 100
|
||||
me_route_backpressure_high_timeout_ms = 5001
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_me_route_backpressure_high_timeout_ms_out_of_range_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
assert!(
|
||||
err.contains("general.me_route_backpressure_high_timeout_ms must be within [1, 5000]")
|
||||
);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn me_route_no_writer_wait_ms_out_of_range_is_rejected() {
|
||||
let toml = r#"
|
||||
@@ -1844,6 +2227,45 @@ mod tests {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn force_close_default_matches_drain_ttl() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_force_close_default_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90);
|
||||
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 90);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn force_close_zero_uses_runtime_safety_fallback() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
me_reinit_drain_timeout_secs = 0
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_force_close_zero_fallback_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 0);
|
||||
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 300);
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn force_close_bumped_when_below_drain_ttl() {
|
||||
let toml = r#"
|
||||
@@ -1865,6 +2287,59 @@ mod tests {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_scope_default_is_empty() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_scope_default_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert!(cfg.censorship.tls_fetch_scope.is_empty());
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_scope_is_trimmed_during_load() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
tls_fetch_scope = " me "
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_scope_trim_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert_eq!(cfg.censorship.tls_fetch_scope, "me");
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_fetch_scope_whitespace_becomes_empty() {
|
||||
let toml = r#"
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
tls_fetch_scope = " "
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_tls_fetch_scope_blank_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
assert!(cfg.censorship.tls_fetch_scope.is_empty());
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_ad_tag_is_disabled_during_load() {
|
||||
let toml = r#"
|
||||
@@ -1908,6 +2383,124 @@ mod tests {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shadowsocks_upstream_url_loads_successfully() {
|
||||
let toml = format!(
|
||||
r#"
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "{url}"
|
||||
interface = "127.0.0.2"
|
||||
"#,
|
||||
url = TEST_SHADOWSOCKS_URL,
|
||||
);
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_shadowsocks_valid_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let cfg = ProxyConfig::load(&path).unwrap();
|
||||
|
||||
assert!(matches!(
|
||||
&cfg.upstreams[0].upstream_type,
|
||||
UpstreamType::Shadowsocks { url, interface }
|
||||
if url == TEST_SHADOWSOCKS_URL && interface.as_deref() == Some("127.0.0.2")
|
||||
));
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shadowsocks_requires_direct_mode() {
|
||||
let toml = format!(
|
||||
r#"
|
||||
[general]
|
||||
use_middle_proxy = true
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "{url}"
|
||||
"#,
|
||||
url = TEST_SHADOWSOCKS_URL,
|
||||
);
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_shadowsocks_me_reject_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
|
||||
assert!(err.contains("shadowsocks upstreams require general.use_middle_proxy = false"));
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_shadowsocks_url_is_rejected() {
|
||||
let toml = r#"
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "not-a-valid-ss-url"
|
||||
"#;
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_shadowsocks_invalid_url_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
|
||||
assert!(err.contains("invalid shadowsocks url"));
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn shadowsocks_plugins_are_rejected() {
|
||||
let toml = format!(
|
||||
r#"
|
||||
[general]
|
||||
use_middle_proxy = false
|
||||
|
||||
[censorship]
|
||||
tls_domain = "example.com"
|
||||
|
||||
[access.users]
|
||||
user = "00000000000000000000000000000000"
|
||||
|
||||
[[upstreams]]
|
||||
type = "shadowsocks"
|
||||
url = "{url}?plugin=obfs-local%3Bobfs%3Dhttp"
|
||||
"#,
|
||||
url = TEST_SHADOWSOCKS_URL,
|
||||
);
|
||||
let dir = std::env::temp_dir();
|
||||
let path = dir.join("telemt_shadowsocks_plugin_reject_test.toml");
|
||||
std::fs::write(&path, toml).unwrap();
|
||||
let err = ProxyConfig::load(&path).unwrap_err().to_string();
|
||||
|
||||
assert!(err.contains("shadowsocks plugins are not supported"));
|
||||
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_user_ad_tag_reports_access_user_ad_tags_key() {
|
||||
let toml = r#"
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
//! Configuration.
|
||||
|
||||
pub(crate) mod defaults;
|
||||
mod types;
|
||||
mod load;
|
||||
pub mod hot_reload;
|
||||
mod load;
|
||||
mod types;
|
||||
|
||||
pub use load::ProxyConfig;
|
||||
pub use types::*;
|
||||
|
||||
80
src/config/tests/load_idle_policy_tests.rs
Normal file
80
src/config/tests/load_idle_policy_tests.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-idle-policy-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 120
|
||||
relay_client_idle_hard_secs = 60
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with hard<soft must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"timeouts.relay_client_idle_hard_secs must be >= timeouts.relay_client_idle_soft_secs"
|
||||
),
|
||||
"error must explain the violated hard>=soft invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_relay_grace_larger_than_hard_idle_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
relay_client_idle_soft_secs = 60
|
||||
relay_client_idle_hard_secs = 120
|
||||
relay_idle_grace_after_downstream_activity_secs = 121
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with grace>hard must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"),
|
||||
"error must explain the violated grace<=hard invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_handshake_timeout_with_clear_error() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("config with zero handshake timeout must fail");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("timeouts.client_handshake must be > 0"),
|
||||
"error must explain that handshake timeout must be positive, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
238
src/config/tests/load_mask_shape_security_tests.rs
Normal file
238
src/config/tests/load_mask_shape_security_tests.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-mask-shape-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_zero_mask_shape_bucket_floor_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 0
|
||||
mask_shape_bucket_cap_bytes = 4096
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("zero mask_shape_bucket_floor_bytes must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_bucket_floor_bytes must be > 0"),
|
||||
"error must explain floor>0 invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_mask_shape_bucket_cap_less_than_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 512
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("mask_shape_bucket_cap_bytes < floor must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
|
||||
),
|
||||
"error must explain cap>=floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_mask_shape_bucket_cap_equal_to_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_bucket_floor_bytes = 1024
|
||||
mask_shape_bucket_cap_bytes = 1024
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path).expect("equal cap and floor must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_floor_bytes, 1024);
|
||||
assert_eq!(cfg.censorship.mask_shape_bucket_cap_bytes, 1024);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 64
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
|
||||
),
|
||||
"error must explain blur prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_above_cap_blur_with_zero_max_bytes() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 0
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("above-cap blur max bytes must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"),
|
||||
"error must explain blur max bytes invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_floor_zero_when_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 0
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("timing normalization floor must be > 0 when enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"),
|
||||
"error must explain timing floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_timing_normalization_ceiling_below_floor() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 220
|
||||
mask_timing_normalization_ceiling_ms = 200
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path).expect_err("timing normalization ceiling must be >= floor");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"),
|
||||
"error must explain timing ceiling/floor invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_valid_timing_normalization_and_above_cap_blur_config() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 128
|
||||
mask_timing_normalization_enabled = true
|
||||
mask_timing_normalization_floor_ms = 150
|
||||
mask_timing_normalization_ceiling_ms = 240
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("valid blur and timing normalization settings must be accepted");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
assert_eq!(cfg.censorship.mask_shape_above_cap_blur_max_bytes, 128);
|
||||
assert!(cfg.censorship.mask_timing_normalization_enabled);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_floor_ms, 150);
|
||||
assert_eq!(cfg.censorship.mask_timing_normalization_ceiling_ms, 240);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_aggressive_shape_mode_when_shape_hardening_disabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = false
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
"#,
|
||||
);
|
||||
|
||||
let err = ProxyConfig::load(&path)
|
||||
.expect_err("aggressive shape hardening mode must require shape hardening enabled");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains("censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"),
|
||||
"error must explain aggressive-mode prerequisite, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_aggressive_shape_mode_when_shape_hardening_enabled() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[censorship]
|
||||
mask_shape_hardening = true
|
||||
mask_shape_hardening_aggressive_mode = true
|
||||
mask_shape_above_cap_blur = true
|
||||
mask_shape_above_cap_blur_max_bytes = 8
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg = ProxyConfig::load(&path)
|
||||
.expect("aggressive shape hardening mode should be accepted when prerequisites are met");
|
||||
assert!(cfg.censorship.mask_shape_hardening);
|
||||
assert!(cfg.censorship.mask_shape_hardening_aggressive_mode);
|
||||
assert!(cfg.censorship.mask_shape_above_cap_blur);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
88
src/config/tests/load_security_tests.rs
Normal file
88
src/config/tests/load_security_tests.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
use super::*;
|
||||
use std::fs;
|
||||
use std::path::PathBuf;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
fn write_temp_config(contents: &str) -> PathBuf {
|
||||
let nonce = SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("system time must be after unix epoch")
|
||||
.as_nanos();
|
||||
let path = std::env::temp_dir().join(format!("telemt-load-security-{nonce}.toml"));
|
||||
fs::write(&path, contents).expect("temp config write must succeed");
|
||||
path
|
||||
}
|
||||
|
||||
fn remove_temp_config(path: &PathBuf) {
|
||||
let _ = fs::remove_file(path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_equal_to_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1000
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay equal to handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_rejects_server_hello_delay_larger_than_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 1500
|
||||
"#,
|
||||
);
|
||||
|
||||
let err =
|
||||
ProxyConfig::load(&path).expect_err("delay larger than handshake timeout must be rejected");
|
||||
let msg = err.to_string();
|
||||
assert!(
|
||||
msg.contains(
|
||||
"censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
|
||||
),
|
||||
"error must explain delay<timeout invariant, got: {msg}"
|
||||
);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn load_accepts_server_hello_delay_strictly_below_handshake_timeout_budget() {
|
||||
let path = write_temp_config(
|
||||
r#"
|
||||
[timeouts]
|
||||
client_handshake = 1
|
||||
|
||||
[censorship]
|
||||
server_hello_delay_max_ms = 999
|
||||
"#,
|
||||
);
|
||||
|
||||
let cfg =
|
||||
ProxyConfig::load(&path).expect("delay below handshake timeout budget must be accepted");
|
||||
assert_eq!(cfg.timeouts.client_handshake, 1);
|
||||
assert_eq!(cfg.censorship.server_hello_delay_max_ms, 999);
|
||||
|
||||
remove_temp_config(&path);
|
||||
}
|
||||
@@ -3,6 +3,7 @@ use ipnetwork::IpNetwork;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
|
||||
use super::defaults::*;
|
||||
|
||||
@@ -134,8 +135,8 @@ impl MeSocksKdfPolicy {
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum MeBindStaleMode {
|
||||
Never,
|
||||
#[default]
|
||||
Never,
|
||||
Ttl,
|
||||
Always,
|
||||
}
|
||||
@@ -356,6 +357,9 @@ impl Default for NetworkConfig {
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct GeneralConfig {
|
||||
#[serde(default)]
|
||||
pub data_path: Option<PathBuf>,
|
||||
|
||||
#[serde(default)]
|
||||
pub modes: ProxyModes,
|
||||
|
||||
@@ -458,6 +462,11 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_c2me_channel_capacity")]
|
||||
pub me_c2me_channel_capacity: usize,
|
||||
|
||||
/// Maximum wait in milliseconds for enqueueing C2ME commands when the queue is full.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_me_c2me_send_timeout_ms")]
|
||||
pub me_c2me_send_timeout_ms: u64,
|
||||
|
||||
/// Bounded wait in milliseconds for routing ME DATA to per-connection queue.
|
||||
/// `0` keeps legacy no-wait behavior.
|
||||
#[serde(default = "default_me_reader_route_data_wait_ms")]
|
||||
@@ -712,6 +721,15 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_route_no_writer_wait_ms")]
|
||||
pub me_route_no_writer_wait_ms: u64,
|
||||
|
||||
/// Maximum cumulative wait in milliseconds for hybrid no-writer mode before failfast.
|
||||
#[serde(default = "default_me_route_hybrid_max_wait_ms")]
|
||||
pub me_route_hybrid_max_wait_ms: u64,
|
||||
|
||||
/// Maximum wait in milliseconds for blocking ME writer channel send fallback.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_me_route_blocking_send_timeout_ms")]
|
||||
pub me_route_blocking_send_timeout_ms: u64,
|
||||
|
||||
/// Number of inline recovery attempts in legacy mode.
|
||||
#[serde(default = "default_me_route_inline_recovery_attempts")]
|
||||
pub me_route_inline_recovery_attempts: u32,
|
||||
@@ -794,6 +812,35 @@ pub struct GeneralConfig {
|
||||
#[serde(default = "default_me_pool_drain_ttl_secs")]
|
||||
pub me_pool_drain_ttl_secs: u64,
|
||||
|
||||
/// Force-remove any draining writer on the next cleanup tick, regardless of age/deadline.
|
||||
#[serde(default = "default_me_instadrain")]
|
||||
pub me_instadrain: bool,
|
||||
|
||||
/// Maximum allowed number of draining ME writers before oldest ones are force-closed in batches.
|
||||
/// Set to 0 to disable threshold-based draining cleanup and keep timeout-only behavior.
|
||||
#[serde(default = "default_me_pool_drain_threshold")]
|
||||
pub me_pool_drain_threshold: u64,
|
||||
|
||||
/// Enable staged client eviction for draining ME writers that remain non-empty past TTL.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_enabled")]
|
||||
pub me_pool_drain_soft_evict_enabled: bool,
|
||||
|
||||
/// Extra grace in seconds after drain TTL before soft-eviction stage starts.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_grace_secs")]
|
||||
pub me_pool_drain_soft_evict_grace_secs: u64,
|
||||
|
||||
/// Maximum number of client sessions to evict from one draining writer per health tick.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_per_writer")]
|
||||
pub me_pool_drain_soft_evict_per_writer: u8,
|
||||
|
||||
/// Soft-eviction budget per CPU core for one health tick.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_budget_per_core")]
|
||||
pub me_pool_drain_soft_evict_budget_per_core: u16,
|
||||
|
||||
/// Cooldown for repetitive soft-eviction on the same writer in milliseconds.
|
||||
#[serde(default = "default_me_pool_drain_soft_evict_cooldown_ms")]
|
||||
pub me_pool_drain_soft_evict_cooldown_ms: u64,
|
||||
|
||||
/// Policy for new binds on stale draining writers.
|
||||
#[serde(default)]
|
||||
pub me_bind_stale_mode: MeBindStaleMode,
|
||||
@@ -808,7 +855,7 @@ pub struct GeneralConfig {
|
||||
pub me_pool_min_fresh_ratio: f32,
|
||||
|
||||
/// Drain timeout in seconds for stale ME writers after endpoint map changes.
|
||||
/// Set to 0 to keep stale writers draining indefinitely (no force-close).
|
||||
/// Set to 0 to use the runtime safety fallback timeout.
|
||||
#[serde(default = "default_me_reinit_drain_timeout_secs")]
|
||||
pub me_reinit_drain_timeout_secs: u64,
|
||||
|
||||
@@ -866,6 +913,7 @@ pub struct GeneralConfig {
|
||||
impl Default for GeneralConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
data_path: None,
|
||||
modes: ProxyModes::default(),
|
||||
prefer_ipv6: false,
|
||||
fast_mode: default_true(),
|
||||
@@ -891,6 +939,7 @@ impl Default for GeneralConfig {
|
||||
me_writer_cmd_channel_capacity: default_me_writer_cmd_channel_capacity(),
|
||||
me_route_channel_capacity: default_me_route_channel_capacity(),
|
||||
me_c2me_channel_capacity: default_me_c2me_channel_capacity(),
|
||||
me_c2me_send_timeout_ms: default_me_c2me_send_timeout_ms(),
|
||||
me_reader_route_data_wait_ms: default_me_reader_route_data_wait_ms(),
|
||||
me_d2c_flush_batch_max_frames: default_me_d2c_flush_batch_max_frames(),
|
||||
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
|
||||
@@ -906,24 +955,38 @@ impl Default for GeneralConfig {
|
||||
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
|
||||
me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(),
|
||||
me_single_endpoint_shadow_writers: default_me_single_endpoint_shadow_writers(),
|
||||
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(),
|
||||
me_single_endpoint_outage_disable_quarantine: default_me_single_endpoint_outage_disable_quarantine(),
|
||||
me_single_endpoint_outage_backoff_min_ms: default_me_single_endpoint_outage_backoff_min_ms(),
|
||||
me_single_endpoint_outage_backoff_max_ms: default_me_single_endpoint_outage_backoff_max_ms(),
|
||||
me_single_endpoint_shadow_rotate_every_secs: default_me_single_endpoint_shadow_rotate_every_secs(),
|
||||
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(
|
||||
),
|
||||
me_single_endpoint_outage_disable_quarantine:
|
||||
default_me_single_endpoint_outage_disable_quarantine(),
|
||||
me_single_endpoint_outage_backoff_min_ms:
|
||||
default_me_single_endpoint_outage_backoff_min_ms(),
|
||||
me_single_endpoint_outage_backoff_max_ms:
|
||||
default_me_single_endpoint_outage_backoff_max_ms(),
|
||||
me_single_endpoint_shadow_rotate_every_secs:
|
||||
default_me_single_endpoint_shadow_rotate_every_secs(),
|
||||
me_floor_mode: MeFloorMode::default(),
|
||||
me_adaptive_floor_idle_secs: default_me_adaptive_floor_idle_secs(),
|
||||
me_adaptive_floor_min_writers_single_endpoint: default_me_adaptive_floor_min_writers_single_endpoint(),
|
||||
me_adaptive_floor_min_writers_multi_endpoint: default_me_adaptive_floor_min_writers_multi_endpoint(),
|
||||
me_adaptive_floor_min_writers_single_endpoint:
|
||||
default_me_adaptive_floor_min_writers_single_endpoint(),
|
||||
me_adaptive_floor_min_writers_multi_endpoint:
|
||||
default_me_adaptive_floor_min_writers_multi_endpoint(),
|
||||
me_adaptive_floor_recover_grace_secs: default_me_adaptive_floor_recover_grace_secs(),
|
||||
me_adaptive_floor_writers_per_core_total: default_me_adaptive_floor_writers_per_core_total(),
|
||||
me_adaptive_floor_writers_per_core_total:
|
||||
default_me_adaptive_floor_writers_per_core_total(),
|
||||
me_adaptive_floor_cpu_cores_override: default_me_adaptive_floor_cpu_cores_override(),
|
||||
me_adaptive_floor_max_extra_writers_single_per_core: default_me_adaptive_floor_max_extra_writers_single_per_core(),
|
||||
me_adaptive_floor_max_extra_writers_multi_per_core: default_me_adaptive_floor_max_extra_writers_multi_per_core(),
|
||||
me_adaptive_floor_max_active_writers_per_core: default_me_adaptive_floor_max_active_writers_per_core(),
|
||||
me_adaptive_floor_max_warm_writers_per_core: default_me_adaptive_floor_max_warm_writers_per_core(),
|
||||
me_adaptive_floor_max_active_writers_global: default_me_adaptive_floor_max_active_writers_global(),
|
||||
me_adaptive_floor_max_warm_writers_global: default_me_adaptive_floor_max_warm_writers_global(),
|
||||
me_adaptive_floor_max_extra_writers_single_per_core:
|
||||
default_me_adaptive_floor_max_extra_writers_single_per_core(),
|
||||
me_adaptive_floor_max_extra_writers_multi_per_core:
|
||||
default_me_adaptive_floor_max_extra_writers_multi_per_core(),
|
||||
me_adaptive_floor_max_active_writers_per_core:
|
||||
default_me_adaptive_floor_max_active_writers_per_core(),
|
||||
me_adaptive_floor_max_warm_writers_per_core:
|
||||
default_me_adaptive_floor_max_warm_writers_per_core(),
|
||||
me_adaptive_floor_max_active_writers_global:
|
||||
default_me_adaptive_floor_max_active_writers_global(),
|
||||
me_adaptive_floor_max_warm_writers_global:
|
||||
default_me_adaptive_floor_max_warm_writers_global(),
|
||||
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
|
||||
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
|
||||
upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
|
||||
@@ -938,13 +1001,16 @@ impl Default for GeneralConfig {
|
||||
me_socks_kdf_policy: MeSocksKdfPolicy::Strict,
|
||||
me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(),
|
||||
me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(),
|
||||
me_route_backpressure_high_watermark_pct: default_me_route_backpressure_high_watermark_pct(),
|
||||
me_route_backpressure_high_watermark_pct:
|
||||
default_me_route_backpressure_high_watermark_pct(),
|
||||
me_health_interval_ms_unhealthy: default_me_health_interval_ms_unhealthy(),
|
||||
me_health_interval_ms_healthy: default_me_health_interval_ms_healthy(),
|
||||
me_admission_poll_ms: default_me_admission_poll_ms(),
|
||||
me_warn_rate_limit_ms: default_me_warn_rate_limit_ms(),
|
||||
me_route_no_writer_mode: MeRouteNoWriterMode::default(),
|
||||
me_route_no_writer_wait_ms: default_me_route_no_writer_wait_ms(),
|
||||
me_route_hybrid_max_wait_ms: default_me_route_hybrid_max_wait_ms(),
|
||||
me_route_blocking_send_timeout_ms: default_me_route_blocking_send_timeout_ms(),
|
||||
me_route_inline_recovery_attempts: default_me_route_inline_recovery_attempts(),
|
||||
me_route_inline_recovery_wait_ms: default_me_route_inline_recovery_wait_ms(),
|
||||
links: LinksConfig::default(),
|
||||
@@ -962,7 +1028,8 @@ impl Default for GeneralConfig {
|
||||
me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(),
|
||||
me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(),
|
||||
me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(),
|
||||
me_hardswap_warmup_pass_backoff_base_ms: default_me_hardswap_warmup_pass_backoff_base_ms(),
|
||||
me_hardswap_warmup_pass_backoff_base_ms:
|
||||
default_me_hardswap_warmup_pass_backoff_base_ms(),
|
||||
me_config_stable_snapshots: default_me_config_stable_snapshots(),
|
||||
me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(),
|
||||
me_snapshot_require_http_2xx: default_me_snapshot_require_http_2xx(),
|
||||
@@ -973,6 +1040,14 @@ impl Default for GeneralConfig {
|
||||
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
|
||||
proxy_secret_len_max: default_proxy_secret_len_max(),
|
||||
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
|
||||
me_instadrain: default_me_instadrain(),
|
||||
me_pool_drain_threshold: default_me_pool_drain_threshold(),
|
||||
me_pool_drain_soft_evict_enabled: default_me_pool_drain_soft_evict_enabled(),
|
||||
me_pool_drain_soft_evict_grace_secs: default_me_pool_drain_soft_evict_grace_secs(),
|
||||
me_pool_drain_soft_evict_per_writer: default_me_pool_drain_soft_evict_per_writer(),
|
||||
me_pool_drain_soft_evict_budget_per_core:
|
||||
default_me_pool_drain_soft_evict_budget_per_core(),
|
||||
me_pool_drain_soft_evict_cooldown_ms: default_me_pool_drain_soft_evict_cooldown_ms(),
|
||||
me_bind_stale_mode: MeBindStaleMode::default(),
|
||||
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
|
||||
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
|
||||
@@ -997,8 +1072,10 @@ impl GeneralConfig {
|
||||
/// Resolve the active updater interval for ME infrastructure refresh tasks.
|
||||
/// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used.
|
||||
pub fn effective_update_every_secs(&self) -> u64 {
|
||||
self.update_every
|
||||
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs))
|
||||
self.update_every.unwrap_or_else(|| {
|
||||
self.proxy_secret_auto_reload_secs
|
||||
.min(self.proxy_config_auto_reload_secs)
|
||||
})
|
||||
}
|
||||
|
||||
/// Resolve periodic zero-downtime reinit interval for ME writers.
|
||||
@@ -1008,8 +1085,13 @@ impl GeneralConfig {
|
||||
|
||||
/// Resolve force-close timeout for stale writers.
|
||||
/// `me_reinit_drain_timeout_secs` remains backward-compatible alias.
|
||||
/// A configured `0` uses the runtime safety fallback (300s).
|
||||
pub fn effective_me_pool_force_close_secs(&self) -> u64 {
|
||||
self.me_reinit_drain_timeout_secs
|
||||
if self.me_reinit_drain_timeout_secs == 0 {
|
||||
300
|
||||
} else {
|
||||
self.me_reinit_drain_timeout_secs
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1145,9 +1227,24 @@ pub struct ServerConfig {
|
||||
#[serde(default = "default_proxy_protocol_header_timeout_ms")]
|
||||
pub proxy_protocol_header_timeout_ms: u64,
|
||||
|
||||
/// Trusted source CIDRs allowed to send incoming PROXY protocol headers.
|
||||
///
|
||||
/// When non-empty, connections from addresses outside this allowlist are
|
||||
/// rejected before `src_addr` is applied.
|
||||
#[serde(default)]
|
||||
pub proxy_protocol_trusted_cidrs: Vec<IpNetwork>,
|
||||
|
||||
/// Port for the Prometheus-compatible metrics endpoint.
|
||||
/// Enables metrics when set; binds on all interfaces (dual-stack) by default.
|
||||
#[serde(default)]
|
||||
pub metrics_port: Option<u16>,
|
||||
|
||||
/// Listen address for metrics in `IP:PORT` format (e.g. `"127.0.0.1:9090"`).
|
||||
/// When set, takes precedence over `metrics_port` and binds on the specified address only.
|
||||
#[serde(default)]
|
||||
pub metrics_listen: Option<String>,
|
||||
|
||||
/// CIDR whitelist for the metrics endpoint.
|
||||
#[serde(default = "default_metrics_whitelist")]
|
||||
pub metrics_whitelist: Vec<IpNetwork>,
|
||||
|
||||
@@ -1156,6 +1253,16 @@ pub struct ServerConfig {
|
||||
|
||||
#[serde(default)]
|
||||
pub listeners: Vec<ListenerConfig>,
|
||||
|
||||
/// Maximum number of concurrent client connections.
|
||||
/// 0 means unlimited.
|
||||
#[serde(default = "default_server_max_connections")]
|
||||
pub max_connections: u32,
|
||||
|
||||
/// Maximum wait in milliseconds while acquiring a connection slot permit.
|
||||
/// `0` keeps legacy unbounded wait behavior.
|
||||
#[serde(default = "default_accept_permit_timeout_ms")]
|
||||
pub accept_permit_timeout_ms: u64,
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
@@ -1169,10 +1276,14 @@ impl Default for ServerConfig {
|
||||
listen_tcp: None,
|
||||
proxy_protocol: false,
|
||||
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
|
||||
proxy_protocol_trusted_cidrs: Vec::new(),
|
||||
metrics_port: None,
|
||||
metrics_listen: None,
|
||||
metrics_whitelist: default_metrics_whitelist(),
|
||||
api: ApiConfig::default(),
|
||||
listeners: Vec::new(),
|
||||
max_connections: default_server_max_connections(),
|
||||
accept_permit_timeout_ms: default_accept_permit_timeout_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1182,6 +1293,24 @@ pub struct TimeoutsConfig {
|
||||
#[serde(default = "default_handshake_timeout")]
|
||||
pub client_handshake: u64,
|
||||
|
||||
/// Enables soft/hard relay client idle policy for middle-relay sessions.
|
||||
#[serde(default = "default_relay_idle_policy_v2_enabled")]
|
||||
pub relay_idle_policy_v2_enabled: bool,
|
||||
|
||||
/// Soft idle threshold for middle-relay client uplink activity in seconds.
|
||||
/// Hitting this threshold marks the session as idle-candidate, but does not close it.
|
||||
#[serde(default = "default_relay_client_idle_soft_secs")]
|
||||
pub relay_client_idle_soft_secs: u64,
|
||||
|
||||
/// Hard idle threshold for middle-relay client uplink activity in seconds.
|
||||
/// Hitting this threshold closes the session.
|
||||
#[serde(default = "default_relay_client_idle_hard_secs")]
|
||||
pub relay_client_idle_hard_secs: u64,
|
||||
|
||||
/// Additional grace in seconds added to hard idle window after recent downstream activity.
|
||||
#[serde(default = "default_relay_idle_grace_after_downstream_activity_secs")]
|
||||
pub relay_idle_grace_after_downstream_activity_secs: u64,
|
||||
|
||||
#[serde(default = "default_connect_timeout")]
|
||||
pub tg_connect: u64,
|
||||
|
||||
@@ -1204,6 +1333,11 @@ impl Default for TimeoutsConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
client_handshake: default_handshake_timeout(),
|
||||
relay_idle_policy_v2_enabled: default_relay_idle_policy_v2_enabled(),
|
||||
relay_client_idle_soft_secs: default_relay_client_idle_soft_secs(),
|
||||
relay_client_idle_hard_secs: default_relay_client_idle_hard_secs(),
|
||||
relay_idle_grace_after_downstream_activity_secs:
|
||||
default_relay_idle_grace_after_downstream_activity_secs(),
|
||||
tg_connect: default_connect_timeout(),
|
||||
client_keepalive: default_keepalive(),
|
||||
client_ack: default_ack_timeout(),
|
||||
@@ -1222,6 +1356,11 @@ pub struct AntiCensorshipConfig {
|
||||
#[serde(default)]
|
||||
pub tls_domains: Vec<String>,
|
||||
|
||||
/// Upstream scope used for TLS front metadata fetches.
|
||||
/// Empty value keeps default upstream routing behavior.
|
||||
#[serde(default = "default_tls_fetch_scope")]
|
||||
pub tls_fetch_scope: String,
|
||||
|
||||
#[serde(default = "default_true")]
|
||||
pub mask: bool,
|
||||
|
||||
@@ -1272,6 +1411,46 @@ pub struct AntiCensorshipConfig {
|
||||
/// Allows the backend to see the real client IP.
|
||||
#[serde(default)]
|
||||
pub mask_proxy_protocol: u8,
|
||||
|
||||
/// Enable shape-channel hardening on mask backend path by padding
|
||||
/// client->mask stream tail to configured buckets on stream end.
|
||||
#[serde(default = "default_mask_shape_hardening")]
|
||||
pub mask_shape_hardening: bool,
|
||||
|
||||
/// Opt-in aggressive shape hardening mode.
|
||||
/// When enabled, masking may shape some backend-silent timeout paths and
|
||||
/// enforces strictly positive above-cap blur when blur is enabled.
|
||||
#[serde(default = "default_mask_shape_hardening_aggressive_mode")]
|
||||
pub mask_shape_hardening_aggressive_mode: bool,
|
||||
|
||||
/// Minimum bucket size for mask shape hardening padding.
|
||||
#[serde(default = "default_mask_shape_bucket_floor_bytes")]
|
||||
pub mask_shape_bucket_floor_bytes: usize,
|
||||
|
||||
/// Maximum bucket size for mask shape hardening padding.
|
||||
#[serde(default = "default_mask_shape_bucket_cap_bytes")]
|
||||
pub mask_shape_bucket_cap_bytes: usize,
|
||||
|
||||
/// Add bounded random tail bytes even when total bytes already exceed
|
||||
/// mask_shape_bucket_cap_bytes.
|
||||
#[serde(default = "default_mask_shape_above_cap_blur")]
|
||||
pub mask_shape_above_cap_blur: bool,
|
||||
|
||||
/// Maximum random bytes appended above cap when above-cap blur is enabled.
|
||||
#[serde(default = "default_mask_shape_above_cap_blur_max_bytes")]
|
||||
pub mask_shape_above_cap_blur_max_bytes: usize,
|
||||
|
||||
/// Enable outcome-time normalization envelope for masking fallback.
|
||||
#[serde(default = "default_mask_timing_normalization_enabled")]
|
||||
pub mask_timing_normalization_enabled: bool,
|
||||
|
||||
/// Lower bound (ms) for masking outcome timing envelope.
|
||||
#[serde(default = "default_mask_timing_normalization_floor_ms")]
|
||||
pub mask_timing_normalization_floor_ms: u64,
|
||||
|
||||
/// Upper bound (ms) for masking outcome timing envelope.
|
||||
#[serde(default = "default_mask_timing_normalization_ceiling_ms")]
|
||||
pub mask_timing_normalization_ceiling_ms: u64,
|
||||
}
|
||||
|
||||
impl Default for AntiCensorshipConfig {
|
||||
@@ -1279,6 +1458,7 @@ impl Default for AntiCensorshipConfig {
|
||||
Self {
|
||||
tls_domain: default_tls_domain(),
|
||||
tls_domains: Vec::new(),
|
||||
tls_fetch_scope: default_tls_fetch_scope(),
|
||||
mask: default_true(),
|
||||
mask_host: None,
|
||||
mask_port: default_mask_port(),
|
||||
@@ -1292,6 +1472,15 @@ impl Default for AntiCensorshipConfig {
|
||||
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
|
||||
alpn_enforce: default_alpn_enforce(),
|
||||
mask_proxy_protocol: 0,
|
||||
mask_shape_hardening: default_mask_shape_hardening(),
|
||||
mask_shape_hardening_aggressive_mode: default_mask_shape_hardening_aggressive_mode(),
|
||||
mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(),
|
||||
mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
|
||||
mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),
|
||||
mask_shape_above_cap_blur_max_bytes: default_mask_shape_above_cap_blur_max_bytes(),
|
||||
mask_timing_normalization_enabled: default_mask_timing_normalization_enabled(),
|
||||
mask_timing_normalization_floor_ms: default_mask_timing_normalization_floor_ms(),
|
||||
mask_timing_normalization_ceiling_ms: default_mask_timing_normalization_ceiling_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1317,6 +1506,11 @@ pub struct AccessConfig {
|
||||
#[serde(default)]
|
||||
pub user_max_unique_ips: HashMap<String, usize>,
|
||||
|
||||
/// Global per-user unique IP limit applied when a user has no individual override.
|
||||
/// `0` disables the inherited limit.
|
||||
#[serde(default = "default_user_max_unique_ips_global_each")]
|
||||
pub user_max_unique_ips_global_each: usize,
|
||||
|
||||
#[serde(default)]
|
||||
pub user_max_unique_ips_mode: UserMaxUniqueIpsMode,
|
||||
|
||||
@@ -1342,6 +1536,7 @@ impl Default for AccessConfig {
|
||||
user_expirations: HashMap::new(),
|
||||
user_data_quota: HashMap::new(),
|
||||
user_max_unique_ips: HashMap::new(),
|
||||
user_max_unique_ips_global_each: default_user_max_unique_ips_global_each(),
|
||||
user_max_unique_ips_mode: UserMaxUniqueIpsMode::default(),
|
||||
user_max_unique_ips_window_secs: default_user_max_unique_ips_window_secs(),
|
||||
replay_check_len: default_replay_check_len(),
|
||||
@@ -1378,6 +1573,11 @@ pub enum UpstreamType {
|
||||
#[serde(default)]
|
||||
password: Option<String>,
|
||||
},
|
||||
Shadowsocks {
|
||||
url: String,
|
||||
#[serde(default)]
|
||||
interface: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
@@ -1458,7 +1658,10 @@ impl ShowLink {
|
||||
}
|
||||
|
||||
impl Serialize for ShowLink {
|
||||
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
|
||||
fn serialize<S: serde::Serializer>(
|
||||
&self,
|
||||
serializer: S,
|
||||
) -> std::result::Result<S::Ok, S::Error> {
|
||||
match self {
|
||||
ShowLink::None => Vec::<String>::new().serialize(serializer),
|
||||
ShowLink::All => serializer.serialize_str("*"),
|
||||
@@ -1468,7 +1671,9 @@ impl Serialize for ShowLink {
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ShowLink {
|
||||
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
|
||||
fn deserialize<D: serde::Deserializer<'de>>(
|
||||
deserializer: D,
|
||||
) -> std::result::Result<Self, D::Error> {
|
||||
use serde::de;
|
||||
|
||||
struct ShowLinkVisitor;
|
||||
@@ -1484,14 +1689,14 @@ impl<'de> Deserialize<'de> for ShowLink {
|
||||
if v == "*" {
|
||||
Ok(ShowLink::All)
|
||||
} else {
|
||||
Err(de::Error::invalid_value(
|
||||
de::Unexpected::Str(v),
|
||||
&r#""*""#,
|
||||
))
|
||||
Err(de::Error::invalid_value(de::Unexpected::Str(v), &r#""*""#))
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> {
|
||||
fn visit_seq<A: de::SeqAccess<'de>>(
|
||||
self,
|
||||
mut seq: A,
|
||||
) -> std::result::Result<ShowLink, A::Error> {
|
||||
let mut names = Vec::new();
|
||||
while let Some(name) = seq.next_element::<String>()? {
|
||||
names.push(name);
|
||||
|
||||
@@ -13,10 +13,13 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use aes::Aes256;
|
||||
use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
|
||||
use zeroize::Zeroize;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use aes::Aes256;
|
||||
use ctr::{
|
||||
Ctr128BE,
|
||||
cipher::{KeyIvInit, StreamCipher},
|
||||
};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
type Aes256Ctr = Ctr128BE<Aes256>;
|
||||
|
||||
@@ -42,33 +45,39 @@ impl AesCtr {
|
||||
cipher: Aes256Ctr::new(key.into(), (&iv_bytes).into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create from key and IV slices
|
||||
pub fn from_key_iv(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
let key: [u8; 32] = key.try_into().unwrap();
|
||||
let iv = u128::from_be_bytes(iv.try_into().unwrap());
|
||||
Ok(Self::new(&key, iv))
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt/decrypt data in-place (CTR mode is symmetric)
|
||||
pub fn apply(&mut self, data: &mut [u8]) {
|
||||
self.cipher.apply_keystream(data);
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data, returning new buffer
|
||||
pub fn encrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
let mut output = data.to_vec();
|
||||
self.apply(&mut output);
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data (for CTR, identical to encrypt)
|
||||
pub fn decrypt(&mut self, data: &[u8]) -> Vec<u8> {
|
||||
self.encrypt(data)
|
||||
@@ -99,27 +108,33 @@ impl Drop for AesCbc {
|
||||
impl AesCbc {
|
||||
/// AES block size
|
||||
const BLOCK_SIZE: usize = 16;
|
||||
|
||||
|
||||
/// Create new AES-CBC cipher with key and IV
|
||||
pub fn new(key: [u8; 32], iv: [u8; 16]) -> Self {
|
||||
Self { key, iv }
|
||||
}
|
||||
|
||||
|
||||
/// Create from slices
|
||||
pub fn from_slices(key: &[u8], iv: &[u8]) -> Result<Self> {
|
||||
if key.len() != 32 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 32,
|
||||
got: key.len(),
|
||||
});
|
||||
}
|
||||
if iv.len() != 16 {
|
||||
return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
|
||||
return Err(ProxyError::InvalidKeyLength {
|
||||
expected: 16,
|
||||
got: iv.len(),
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Ok(Self {
|
||||
key: key.try_into().unwrap(),
|
||||
iv: iv.try_into().unwrap(),
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt a single block using raw AES (no chaining)
|
||||
fn encrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockEncrypt;
|
||||
@@ -127,7 +142,7 @@ impl AesCbc {
|
||||
key_schedule.encrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt a single block using raw AES (no chaining)
|
||||
fn decrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
|
||||
use aes::cipher::BlockDecrypt;
|
||||
@@ -135,7 +150,7 @@ impl AesCbc {
|
||||
key_schedule.decrypt_block((&mut output).into());
|
||||
output
|
||||
}
|
||||
|
||||
|
||||
/// XOR two 16-byte blocks
|
||||
fn xor_blocks(a: &[u8; 16], b: &[u8; 16]) -> [u8; 16] {
|
||||
let mut result = [0u8; 16];
|
||||
@@ -144,27 +159,28 @@ impl AesCbc {
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
|
||||
pub fn encrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let plaintext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let xored = Self::xor_blocks(&plaintext, &prev_ciphertext);
|
||||
@@ -172,30 +188,31 @@ impl AesCbc {
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&ciphertext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data using CBC mode with proper chaining
|
||||
///
|
||||
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
|
||||
pub fn decrypt(&self, data: &[u8]) -> Result<Vec<u8>> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut result = Vec::with_capacity(data.len());
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for chunk in data.chunks(Self::BLOCK_SIZE) {
|
||||
let ciphertext: [u8; 16] = chunk.try_into().unwrap();
|
||||
let decrypted = self.decrypt_block(&ciphertext, &key_schedule);
|
||||
@@ -203,75 +220,77 @@ impl AesCbc {
|
||||
prev_ciphertext = ciphertext;
|
||||
result.extend_from_slice(&plaintext);
|
||||
}
|
||||
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
|
||||
/// Encrypt data in-place
|
||||
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.encrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
prev_ciphertext = *block_array;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Decrypt data in-place
|
||||
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
|
||||
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
|
||||
return Err(ProxyError::Crypto(
|
||||
format!("CBC data must be aligned to 16 bytes, got {}", data.len())
|
||||
));
|
||||
return Err(ProxyError::Crypto(format!(
|
||||
"CBC data must be aligned to 16 bytes, got {}",
|
||||
data.len()
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
if data.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
use aes::cipher::KeyInit;
|
||||
let key_schedule = aes::Aes256::new((&self.key).into());
|
||||
|
||||
|
||||
let mut prev_ciphertext = self.iv;
|
||||
|
||||
|
||||
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
|
||||
let block = &mut data[i..i + Self::BLOCK_SIZE];
|
||||
|
||||
|
||||
let current_ciphertext: [u8; 16] = block.try_into().unwrap();
|
||||
|
||||
|
||||
let block_array: &mut [u8; 16] = block.try_into().unwrap();
|
||||
*block_array = self.decrypt_block(block_array, &key_schedule);
|
||||
|
||||
|
||||
for j in 0..Self::BLOCK_SIZE {
|
||||
block[j] ^= prev_ciphertext[j];
|
||||
}
|
||||
|
||||
|
||||
prev_ciphertext = current_ciphertext;
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -318,227 +337,227 @@ impl Decryptor for PassthroughEncryptor {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
// ============= AES-CTR Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = 12345u128;
|
||||
|
||||
|
||||
let original = b"Hello, MTProto!";
|
||||
|
||||
|
||||
let mut enc = AesCtr::new(&key, iv);
|
||||
let encrypted = enc.encrypt(original);
|
||||
|
||||
|
||||
let mut dec = AesCtr::new(&key, iv);
|
||||
let decrypted = dec.decrypt(&encrypted);
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_ctr_in_place() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = 999u128;
|
||||
|
||||
|
||||
let original = b"Test data for in-place encryption";
|
||||
let mut data = original.to_vec();
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_ne!(&data[..], original);
|
||||
|
||||
|
||||
let mut cipher = AesCtr::new(&key, iv);
|
||||
cipher.apply(&mut data);
|
||||
|
||||
|
||||
assert_eq!(&data[..], original);
|
||||
}
|
||||
|
||||
|
||||
// ============= AES-CBC Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_roundtrip() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
|
||||
|
||||
let original = [0u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let encrypted = cipher.encrypt(&original).unwrap();
|
||||
let decrypted = cipher.decrypt(&encrypted).unwrap();
|
||||
|
||||
|
||||
assert_eq!(original.as_slice(), decrypted.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_chaining_works() {
|
||||
let key = [0x42u8; 32];
|
||||
let iv = [0x00u8; 16];
|
||||
|
||||
|
||||
let plaintext = [0xAAu8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let block1 = &ciphertext[0..16];
|
||||
let block2 = &ciphertext[16..32];
|
||||
|
||||
|
||||
assert_ne!(
|
||||
block1, block2,
|
||||
"CBC chaining broken: identical plaintext blocks produced identical ciphertext"
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_known_vector() {
|
||||
let key = [0u8; 32];
|
||||
let iv = [0u8; 16];
|
||||
let plaintext = [0u8; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
assert_eq!(plaintext.as_slice(), decrypted.as_slice());
|
||||
|
||||
|
||||
assert_ne!(ciphertext.as_slice(), plaintext.as_slice());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_multi_block() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let plaintext: Vec<u8> = (0..80).collect();
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
let ciphertext = cipher.encrypt(&plaintext).unwrap();
|
||||
let decrypted = cipher.decrypt(&ciphertext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(plaintext, decrypted);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_in_place() {
|
||||
let key = [0x12u8; 32];
|
||||
let iv = [0x34u8; 16];
|
||||
|
||||
|
||||
let original = [0x56u8; 48];
|
||||
let mut buffer = original;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
cipher.encrypt_in_place(&mut buffer).unwrap();
|
||||
assert_ne!(&buffer[..], &original[..]);
|
||||
|
||||
|
||||
cipher.decrypt_in_place(&mut buffer).unwrap();
|
||||
assert_eq!(&buffer[..], &original[..]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_empty_data() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let encrypted = cipher.encrypt(&[]).unwrap();
|
||||
assert!(encrypted.is_empty());
|
||||
|
||||
|
||||
let decrypted = cipher.decrypt(&[]).unwrap();
|
||||
assert!(decrypted.is_empty());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_unaligned_error() {
|
||||
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 15]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = cipher.encrypt(&[0u8; 17]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_avalanche_effect() {
|
||||
let key = [0xAB; 32];
|
||||
let iv = [0xCD; 16];
|
||||
|
||||
|
||||
let plaintext1 = [0u8; 32];
|
||||
let mut plaintext2 = [0u8; 32];
|
||||
plaintext2[0] = 0x01;
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext1).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext2).unwrap();
|
||||
|
||||
|
||||
assert_ne!(&ciphertext1[0..16], &ciphertext2[0..16]);
|
||||
assert_ne!(&ciphertext1[16..32], &ciphertext2[16..32]);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_iv_matters() {
|
||||
let key = [0x55; 32];
|
||||
let plaintext = [0x77u8; 16];
|
||||
|
||||
|
||||
let cipher1 = AesCbc::new(key, [0u8; 16]);
|
||||
let cipher2 = AesCbc::new(key, [1u8; 16]);
|
||||
|
||||
|
||||
let ciphertext1 = cipher1.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher2.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_ne!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_deterministic() {
|
||||
let key = [0x99; 32];
|
||||
let iv = [0x88; 16];
|
||||
let plaintext = [0x77u8; 32];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
|
||||
|
||||
let ciphertext1 = cipher.encrypt(&plaintext).unwrap();
|
||||
let ciphertext2 = cipher.encrypt(&plaintext).unwrap();
|
||||
|
||||
|
||||
assert_eq!(ciphertext1, ciphertext2);
|
||||
}
|
||||
|
||||
|
||||
// ============= Zeroize Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_aes_cbc_zeroize_on_drop() {
|
||||
let key = [0xAA; 32];
|
||||
let iv = [0xBB; 16];
|
||||
|
||||
|
||||
let cipher = AesCbc::new(key, iv);
|
||||
// Verify key/iv are set
|
||||
assert_eq!(cipher.key, [0xAA; 32]);
|
||||
assert_eq!(cipher.iv, [0xBB; 16]);
|
||||
|
||||
|
||||
drop(cipher);
|
||||
// After drop, key/iv are zeroized (can't observe directly,
|
||||
// but the Drop impl runs without panic)
|
||||
}
|
||||
|
||||
|
||||
// ============= Error Handling Tests =============
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_key_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 16], &[0u8; 16]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_iv_length() {
|
||||
let result = AesCtr::from_key_iv(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let result = AesCbc::from_slices(&[0u8; 32], &[0u8; 8]);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
//! usages are intentional and protocol-mandated.
|
||||
|
||||
use hmac::{Hmac, Mac};
|
||||
use sha2::Sha256;
|
||||
use md5::Md5;
|
||||
use sha1::Sha1;
|
||||
use sha2::Digest;
|
||||
use sha2::Sha256;
|
||||
|
||||
type HmacSha256 = Hmac<Sha256>;
|
||||
|
||||
@@ -28,8 +28,7 @@ pub fn sha256(data: &[u8]) -> [u8; 32] {
|
||||
|
||||
/// SHA-256 HMAC
|
||||
pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] {
|
||||
let mut mac = HmacSha256::new_from_slice(key)
|
||||
.expect("HMAC accepts any key length");
|
||||
let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length");
|
||||
mac.update(data);
|
||||
mac.finalize().into_bytes().into()
|
||||
}
|
||||
@@ -124,27 +123,18 @@ pub fn derive_middleproxy_keys(
|
||||
srv_ipv6: Option<&[u8; 16]>,
|
||||
) -> ([u8; 32], [u8; 16]) {
|
||||
let s = build_middleproxy_prekey(
|
||||
nonce_srv,
|
||||
nonce_clt,
|
||||
clt_ts,
|
||||
srv_ip,
|
||||
clt_port,
|
||||
purpose,
|
||||
clt_ip,
|
||||
srv_port,
|
||||
secret,
|
||||
clt_ipv6,
|
||||
srv_ipv6,
|
||||
nonce_srv, nonce_clt, clt_ts, srv_ip, clt_port, purpose, clt_ip, srv_port, secret,
|
||||
clt_ipv6, srv_ipv6,
|
||||
);
|
||||
|
||||
let md5_1 = md5(&s[1..]);
|
||||
let sha1_sum = sha1(&s);
|
||||
let md5_2 = md5(&s[2..]);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
key[..12].copy_from_slice(&md5_1[..12]);
|
||||
key[12..].copy_from_slice(&sha1_sum);
|
||||
|
||||
|
||||
(key, md5_2)
|
||||
}
|
||||
|
||||
@@ -164,17 +154,8 @@ mod tests {
|
||||
let secret = vec![0x55u8; 128];
|
||||
|
||||
let prekey = build_middleproxy_prekey(
|
||||
&nonce_srv,
|
||||
&nonce_clt,
|
||||
&clt_ts,
|
||||
srv_ip,
|
||||
&clt_port,
|
||||
b"CLIENT",
|
||||
clt_ip,
|
||||
&srv_port,
|
||||
&secret,
|
||||
None,
|
||||
None,
|
||||
&nonce_srv, &nonce_clt, &clt_ts, srv_ip, &clt_port, b"CLIENT", clt_ip, &srv_port,
|
||||
&secret, None, None,
|
||||
);
|
||||
let digest = sha256(&prekey);
|
||||
assert_eq!(
|
||||
|
||||
@@ -4,7 +4,7 @@ pub mod aes;
|
||||
pub mod hash;
|
||||
pub mod random;
|
||||
|
||||
pub use aes::{AesCtr, AesCbc};
|
||||
pub use aes::{AesCbc, AesCtr};
|
||||
pub use hash::{
|
||||
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
|
||||
};
|
||||
|
||||
@@ -3,11 +3,11 @@
|
||||
#![allow(deprecated)]
|
||||
#![allow(dead_code)]
|
||||
|
||||
use rand::{Rng, RngCore, SeedableRng};
|
||||
use rand::rngs::StdRng;
|
||||
use parking_lot::Mutex;
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::AesCtr;
|
||||
use parking_lot::Mutex;
|
||||
use rand::rngs::StdRng;
|
||||
use rand::{Rng, RngExt, SeedableRng};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Cryptographically secure PRNG with AES-CTR
|
||||
pub struct SecureRandom {
|
||||
@@ -34,16 +34,16 @@ impl SecureRandom {
|
||||
pub fn new() -> Self {
|
||||
let mut seed_source = rand::rng();
|
||||
let mut rng = StdRng::from_rng(&mut seed_source);
|
||||
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
rng.fill_bytes(&mut key);
|
||||
let iv: u128 = rng.random();
|
||||
|
||||
|
||||
let cipher = AesCtr::new(&key, iv);
|
||||
|
||||
|
||||
// Zeroize local key copy — cipher already consumed it
|
||||
key.zeroize();
|
||||
|
||||
|
||||
Self {
|
||||
inner: Mutex::new(SecureRandomInner {
|
||||
rng,
|
||||
@@ -53,7 +53,7 @@ impl SecureRandom {
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Fill a caller-provided buffer with random bytes.
|
||||
pub fn fill(&self, out: &mut [u8]) {
|
||||
let mut inner = self.inner.lock();
|
||||
@@ -94,25 +94,25 @@ impl SecureRandom {
|
||||
self.fill(&mut out);
|
||||
out
|
||||
}
|
||||
|
||||
|
||||
/// Generate random number in range [0, max)
|
||||
pub fn range(&self, max: usize) -> usize {
|
||||
if max == 0 {
|
||||
return 0;
|
||||
}
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.gen_range(0..max)
|
||||
inner.rng.random_range(0..max)
|
||||
}
|
||||
|
||||
|
||||
/// Generate random bits
|
||||
pub fn bits(&self, k: usize) -> u64 {
|
||||
if k == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
let bytes_needed = k.div_ceil(8);
|
||||
let bytes = self.bytes(bytes_needed.min(8));
|
||||
|
||||
|
||||
let mut result = 0u64;
|
||||
for (i, &b) in bytes.iter().enumerate() {
|
||||
if i >= 8 {
|
||||
@@ -120,14 +120,14 @@ impl SecureRandom {
|
||||
}
|
||||
result |= (b as u64) << (i * 8);
|
||||
}
|
||||
|
||||
|
||||
if k < 64 {
|
||||
result &= (1u64 << k) - 1;
|
||||
}
|
||||
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
|
||||
/// Choose random element from slice
|
||||
pub fn choose<'a, T>(&self, slice: &'a [T]) -> Option<&'a T> {
|
||||
if slice.is_empty() {
|
||||
@@ -136,22 +136,22 @@ impl SecureRandom {
|
||||
Some(&slice[self.range(slice.len())])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Shuffle slice in place
|
||||
pub fn shuffle<T>(&self, slice: &mut [T]) {
|
||||
let mut inner = self.inner.lock();
|
||||
for i in (1..slice.len()).rev() {
|
||||
let j = inner.rng.gen_range(0..=i);
|
||||
let j = inner.rng.random_range(0..=i);
|
||||
slice.swap(i, j);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u32
|
||||
pub fn u32(&self) -> u32 {
|
||||
let mut inner = self.inner.lock();
|
||||
inner.rng.random()
|
||||
}
|
||||
|
||||
|
||||
/// Generate random u64
|
||||
pub fn u64(&self) -> u64 {
|
||||
let mut inner = self.inner.lock();
|
||||
@@ -169,7 +169,7 @@ impl Default for SecureRandom {
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::collections::HashSet;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_uniqueness() {
|
||||
let rng = SecureRandom::new();
|
||||
@@ -177,7 +177,7 @@ mod tests {
|
||||
let b = rng.bytes(32);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bytes_length() {
|
||||
let rng = SecureRandom::new();
|
||||
@@ -186,63 +186,63 @@ mod tests {
|
||||
assert_eq!(rng.bytes(100).len(), 100);
|
||||
assert_eq!(rng.bytes(1000).len(), 1000);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_range() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..1000 {
|
||||
let n = rng.range(10);
|
||||
assert!(n < 10);
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(rng.range(1), 0);
|
||||
assert_eq!(rng.range(0), 0);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_bits() {
|
||||
let rng = SecureRandom::new();
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(1) <= 1);
|
||||
}
|
||||
|
||||
|
||||
for _ in 0..100 {
|
||||
assert!(rng.bits(8) <= 255);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_choose() {
|
||||
let rng = SecureRandom::new();
|
||||
let items = vec![1, 2, 3, 4, 5];
|
||||
|
||||
|
||||
let mut seen = HashSet::new();
|
||||
for _ in 0..1000 {
|
||||
if let Some(&item) = rng.choose(&items) {
|
||||
seen.insert(item);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
assert_eq!(seen.len(), 5);
|
||||
|
||||
|
||||
let empty: Vec<i32> = vec![];
|
||||
assert!(rng.choose(&empty).is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_shuffle() {
|
||||
let rng = SecureRandom::new();
|
||||
let original = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
|
||||
|
||||
|
||||
let mut shuffled = original.clone();
|
||||
rng.shuffle(&mut shuffled);
|
||||
|
||||
|
||||
let mut sorted = shuffled.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(sorted, original);
|
||||
|
||||
|
||||
assert_ne!(shuffled, original);
|
||||
}
|
||||
}
|
||||
|
||||
208
src/error.rs
208
src/error.rs
@@ -12,28 +12,15 @@ use thiserror::Error;
|
||||
#[derive(Debug)]
|
||||
pub enum StreamError {
|
||||
/// Partial read: got fewer bytes than expected
|
||||
PartialRead {
|
||||
expected: usize,
|
||||
got: usize,
|
||||
},
|
||||
PartialRead { expected: usize, got: usize },
|
||||
/// Partial write: wrote fewer bytes than expected
|
||||
PartialWrite {
|
||||
expected: usize,
|
||||
written: usize,
|
||||
},
|
||||
PartialWrite { expected: usize, written: usize },
|
||||
/// Stream is in poisoned state and cannot be used
|
||||
Poisoned {
|
||||
reason: String,
|
||||
},
|
||||
Poisoned { reason: String },
|
||||
/// Buffer overflow: attempted to buffer more than allowed
|
||||
BufferOverflow {
|
||||
limit: usize,
|
||||
attempted: usize,
|
||||
},
|
||||
BufferOverflow { limit: usize, attempted: usize },
|
||||
/// Invalid frame format
|
||||
InvalidFrame {
|
||||
details: String,
|
||||
},
|
||||
InvalidFrame { details: String },
|
||||
/// Unexpected end of stream
|
||||
UnexpectedEof,
|
||||
/// Underlying I/O error
|
||||
@@ -47,13 +34,21 @@ impl fmt::Display for StreamError {
|
||||
write!(f, "partial read: expected {} bytes, got {}", expected, got)
|
||||
}
|
||||
Self::PartialWrite { expected, written } => {
|
||||
write!(f, "partial write: expected {} bytes, wrote {}", expected, written)
|
||||
write!(
|
||||
f,
|
||||
"partial write: expected {} bytes, wrote {}",
|
||||
expected, written
|
||||
)
|
||||
}
|
||||
Self::Poisoned { reason } => {
|
||||
write!(f, "stream poisoned: {}", reason)
|
||||
}
|
||||
Self::BufferOverflow { limit, attempted } => {
|
||||
write!(f, "buffer overflow: limit {}, attempted {}", limit, attempted)
|
||||
write!(
|
||||
f,
|
||||
"buffer overflow: limit {}, attempted {}",
|
||||
limit, attempted
|
||||
)
|
||||
}
|
||||
Self::InvalidFrame { details } => {
|
||||
write!(f, "invalid frame: {}", details)
|
||||
@@ -90,9 +85,7 @@ impl From<StreamError> for std::io::Error {
|
||||
StreamError::UnexpectedEof => {
|
||||
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
StreamError::Poisoned { .. } => std::io::Error::other(err),
|
||||
StreamError::BufferOverflow { .. } => {
|
||||
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
|
||||
}
|
||||
@@ -112,7 +105,7 @@ impl From<StreamError> for std::io::Error {
|
||||
pub trait Recoverable {
|
||||
/// Check if error is recoverable (can retry operation)
|
||||
fn is_recoverable(&self) -> bool;
|
||||
|
||||
|
||||
/// Check if connection can continue after this error
|
||||
fn can_continue(&self) -> bool;
|
||||
}
|
||||
@@ -123,19 +116,22 @@ impl Recoverable for StreamError {
|
||||
Self::PartialRead { .. } | Self::PartialWrite { .. } => true,
|
||||
Self::Io(e) => matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
),
|
||||
Self::Poisoned { .. }
|
||||
Self::Poisoned { .. }
|
||||
| Self::BufferOverflow { .. }
|
||||
| Self::InvalidFrame { .. }
|
||||
| Self::UnexpectedEof => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
|
||||
!matches!(
|
||||
self,
|
||||
Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,19 +139,19 @@ impl Recoverable for std::io::Error {
|
||||
fn is_recoverable(&self) -> bool {
|
||||
matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
std::io::ErrorKind::WouldBlock
|
||||
| std::io::ErrorKind::Interrupted
|
||||
| std::io::ErrorKind::TimedOut
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
!matches!(
|
||||
self.kind(),
|
||||
std::io::ErrorKind::BrokenPipe
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
| std::io::ErrorKind::ConnectionReset
|
||||
| std::io::ErrorKind::ConnectionAborted
|
||||
| std::io::ErrorKind::NotConnected
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -165,96 +161,88 @@ impl Recoverable for std::io::Error {
|
||||
#[derive(Error, Debug)]
|
||||
pub enum ProxyError {
|
||||
// ============= Crypto Errors =============
|
||||
|
||||
#[error("Crypto error: {0}")]
|
||||
Crypto(String),
|
||||
|
||||
|
||||
#[error("Invalid key length: expected {expected}, got {got}")]
|
||||
InvalidKeyLength { expected: usize, got: usize },
|
||||
|
||||
|
||||
// ============= Stream Errors =============
|
||||
|
||||
#[error("Stream error: {0}")]
|
||||
Stream(#[from] StreamError),
|
||||
|
||||
|
||||
// ============= Protocol Errors =============
|
||||
|
||||
#[error("Invalid handshake: {0}")]
|
||||
InvalidHandshake(String),
|
||||
|
||||
|
||||
#[error("Invalid protocol tag: {0:02x?}")]
|
||||
InvalidProtoTag([u8; 4]),
|
||||
|
||||
|
||||
#[error("Invalid TLS record: type={record_type}, version={version:02x?}")]
|
||||
InvalidTlsRecord { record_type: u8, version: [u8; 2] },
|
||||
|
||||
|
||||
#[error("Replay attack detected from {addr}")]
|
||||
ReplayAttack { addr: SocketAddr },
|
||||
|
||||
|
||||
#[error("Time skew detected: client={client_time}, server={server_time}")]
|
||||
TimeSkew { client_time: u32, server_time: u32 },
|
||||
|
||||
|
||||
#[error("Invalid message length: {len} (min={min}, max={max})")]
|
||||
InvalidMessageLength { len: usize, min: usize, max: usize },
|
||||
|
||||
|
||||
#[error("Checksum mismatch: expected={expected:08x}, got={got:08x}")]
|
||||
ChecksumMismatch { expected: u32, got: u32 },
|
||||
|
||||
|
||||
#[error("Sequence number mismatch: expected={expected}, got={got}")]
|
||||
SeqNoMismatch { expected: i32, got: i32 },
|
||||
|
||||
|
||||
#[error("TLS handshake failed: {reason}")]
|
||||
TlsHandshakeFailed { reason: String },
|
||||
|
||||
|
||||
#[error("Telegram handshake timeout")]
|
||||
TgHandshakeTimeout,
|
||||
|
||||
|
||||
// ============= Network Errors =============
|
||||
|
||||
#[error("Connection timeout to {addr}")]
|
||||
ConnectionTimeout { addr: String },
|
||||
|
||||
|
||||
#[error("Connection refused by {addr}")]
|
||||
ConnectionRefused { addr: String },
|
||||
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
|
||||
// ============= Proxy Protocol Errors =============
|
||||
|
||||
#[error("Invalid proxy protocol header")]
|
||||
InvalidProxyProtocol,
|
||||
|
||||
|
||||
#[error("Proxy error: {0}")]
|
||||
Proxy(String),
|
||||
|
||||
|
||||
// ============= Config Errors =============
|
||||
|
||||
#[error("Config error: {0}")]
|
||||
Config(String),
|
||||
|
||||
|
||||
#[error("Invalid secret for user {user}: {reason}")]
|
||||
InvalidSecret { user: String, reason: String },
|
||||
|
||||
|
||||
// ============= User Errors =============
|
||||
|
||||
#[error("User {user} expired")]
|
||||
UserExpired { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded connection limit")]
|
||||
ConnectionLimitExceeded { user: String },
|
||||
|
||||
|
||||
#[error("User {user} exceeded data quota")]
|
||||
DataQuotaExceeded { user: String },
|
||||
|
||||
|
||||
#[error("Unknown user")]
|
||||
UnknownUser,
|
||||
|
||||
|
||||
#[error("Rate limited")]
|
||||
RateLimited,
|
||||
|
||||
|
||||
// ============= General Errors =============
|
||||
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
@@ -269,7 +257,7 @@ impl Recoverable for ProxyError {
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn can_continue(&self) -> bool {
|
||||
match self {
|
||||
Self::Stream(e) => e.can_continue(),
|
||||
@@ -301,17 +289,19 @@ impl<T, R, W> HandshakeResult<T, R, W> {
|
||||
pub fn is_success(&self) -> bool {
|
||||
matches!(self, HandshakeResult::Success(_))
|
||||
}
|
||||
|
||||
|
||||
/// Check if bad client
|
||||
pub fn is_bad_client(&self) -> bool {
|
||||
matches!(self, HandshakeResult::BadClient { .. })
|
||||
}
|
||||
|
||||
|
||||
/// Map the success value
|
||||
pub fn map<U, F: FnOnce(T) -> U>(self, f: F) -> HandshakeResult<U, R, W> {
|
||||
match self {
|
||||
HandshakeResult::Success(v) => HandshakeResult::Success(f(v)),
|
||||
HandshakeResult::BadClient { reader, writer } => HandshakeResult::BadClient { reader, writer },
|
||||
HandshakeResult::BadClient { reader, writer } => {
|
||||
HandshakeResult::BadClient { reader, writer }
|
||||
}
|
||||
HandshakeResult::Error(e) => HandshakeResult::Error(e),
|
||||
}
|
||||
}
|
||||
@@ -338,76 +328,104 @@ impl<T, R, W> From<StreamError> for HandshakeResult<T, R, W> {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_display() {
|
||||
let err = StreamError::PartialRead { expected: 100, got: 50 };
|
||||
let err = StreamError::PartialRead {
|
||||
expected: 100,
|
||||
got: 50,
|
||||
};
|
||||
assert!(err.to_string().contains("100"));
|
||||
assert!(err.to_string().contains("50"));
|
||||
|
||||
let err = StreamError::Poisoned { reason: "test".into() };
|
||||
|
||||
let err = StreamError::Poisoned {
|
||||
reason: "test".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("test"));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_recoverable() {
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.is_recoverable());
|
||||
assert!(StreamError::PartialWrite { expected: 10, written: 5 }.is_recoverable());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(
|
||||
StreamError::PartialWrite {
|
||||
expected: 10,
|
||||
written: 5
|
||||
}
|
||||
.is_recoverable()
|
||||
);
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.is_recoverable());
|
||||
assert!(!StreamError::UnexpectedEof.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_can_continue() {
|
||||
assert!(!StreamError::Poisoned { reason: "x".into() }.can_continue());
|
||||
assert!(!StreamError::UnexpectedEof.can_continue());
|
||||
assert!(StreamError::PartialRead { expected: 10, got: 5 }.can_continue());
|
||||
assert!(
|
||||
StreamError::PartialRead {
|
||||
expected: 10,
|
||||
got: 5
|
||||
}
|
||||
.can_continue()
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_stream_error_to_io_error() {
|
||||
let stream_err = StreamError::UnexpectedEof;
|
||||
let io_err: std::io::Error = stream_err.into();
|
||||
assert_eq!(io_err.kind(), std::io::ErrorKind::UnexpectedEof);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
assert!(success.is_success());
|
||||
assert!(!success.is_bad_client());
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient { reader: (), writer: () };
|
||||
|
||||
let bad: HandshakeResult<i32, (), ()> = HandshakeResult::BadClient {
|
||||
reader: (),
|
||||
writer: (),
|
||||
};
|
||||
assert!(!bad.is_success());
|
||||
assert!(bad.is_bad_client());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_handshake_result_map() {
|
||||
let success: HandshakeResult<i32, (), ()> = HandshakeResult::Success(42);
|
||||
let mapped = success.map(|x| x * 2);
|
||||
|
||||
|
||||
match mapped {
|
||||
HandshakeResult::Success(v) => assert_eq!(v, 84),
|
||||
_ => panic!("Expected success"),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proxy_error_recoverable() {
|
||||
let err = ProxyError::RateLimited;
|
||||
assert!(err.is_recoverable());
|
||||
|
||||
|
||||
let err = ProxyError::InvalidHandshake("bad".into());
|
||||
assert!(!err.is_recoverable());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
let err = ProxyError::ConnectionTimeout { addr: "1.2.3.4:443".into() };
|
||||
let err = ProxyError::ConnectionTimeout {
|
||||
addr: "1.2.3.4:443".into(),
|
||||
};
|
||||
assert!(err.to_string().contains("1.2.3.4:443"));
|
||||
|
||||
|
||||
let err = ProxyError::InvalidProxyProtocol;
|
||||
assert!(err.to_string().contains("proxy protocol"));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,10 +5,11 @@
|
||||
use std::collections::HashMap;
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::{Mutex as AsyncMutex, RwLock};
|
||||
|
||||
use crate::config::UserMaxUniqueIpsMode;
|
||||
|
||||
@@ -17,9 +18,12 @@ pub struct UserIpTracker {
|
||||
active_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, usize>>>>,
|
||||
recent_ips: Arc<RwLock<HashMap<String, HashMap<IpAddr, Instant>>>>,
|
||||
max_ips: Arc<RwLock<HashMap<String, usize>>>,
|
||||
default_max_ips: Arc<RwLock<usize>>,
|
||||
limit_mode: Arc<RwLock<UserMaxUniqueIpsMode>>,
|
||||
limit_window: Arc<RwLock<Duration>>,
|
||||
last_compact_epoch_secs: Arc<AtomicU64>,
|
||||
cleanup_queue: Arc<Mutex<Vec<(String, IpAddr)>>>,
|
||||
cleanup_drain_lock: Arc<AsyncMutex<()>>,
|
||||
}
|
||||
|
||||
impl UserIpTracker {
|
||||
@@ -28,9 +32,83 @@ impl UserIpTracker {
|
||||
active_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
recent_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
max_ips: Arc::new(RwLock::new(HashMap::new())),
|
||||
default_max_ips: Arc::new(RwLock::new(0)),
|
||||
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
|
||||
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
|
||||
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
|
||||
cleanup_queue: Arc::new(Mutex::new(Vec::new())),
|
||||
cleanup_drain_lock: Arc::new(AsyncMutex::new(())),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enqueue_cleanup(&self, user: String, ip: IpAddr) {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => queue.push((user, ip)),
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
queue.push((user.clone(), ip));
|
||||
self.cleanup_queue.clear_poison();
|
||||
tracing::warn!(
|
||||
"UserIpTracker cleanup_queue lock poisoned; recovered and enqueued IP cleanup for {} ({})",
|
||||
user,
|
||||
ip
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_len_for_tests(&self) -> usize {
|
||||
self.cleanup_queue
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
.len()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn cleanup_queue_mutex_for_tests(&self) -> Arc<Mutex<Vec<(String, IpAddr)>>> {
|
||||
Arc::clone(&self.cleanup_queue)
|
||||
}
|
||||
|
||||
pub(crate) async fn drain_cleanup_queue(&self) {
|
||||
// Serialize queue draining and active-IP mutation so check-and-add cannot
|
||||
// observe stale active entries that are already queued for removal.
|
||||
let _drain_guard = self.cleanup_drain_lock.lock().await;
|
||||
let to_remove = {
|
||||
match self.cleanup_queue.lock() {
|
||||
Ok(mut queue) => {
|
||||
if queue.is_empty() {
|
||||
return;
|
||||
}
|
||||
std::mem::take(&mut *queue)
|
||||
}
|
||||
Err(poisoned) => {
|
||||
let mut queue = poisoned.into_inner();
|
||||
if queue.is_empty() {
|
||||
self.cleanup_queue.clear_poison();
|
||||
return;
|
||||
}
|
||||
let drained = std::mem::take(&mut *queue);
|
||||
self.cleanup_queue.clear_poison();
|
||||
drained
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
for (user, ip) in to_remove {
|
||||
if let Some(user_ips) = active_ips.get_mut(&user) {
|
||||
if let Some(count) = user_ips.get_mut(&ip) {
|
||||
if *count > 1 {
|
||||
*count -= 1;
|
||||
} else {
|
||||
user_ips.remove(&ip);
|
||||
}
|
||||
}
|
||||
if user_ips.is_empty() {
|
||||
active_ips.remove(&user);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,7 +141,8 @@ impl UserIpTracker {
|
||||
|
||||
let mut active_ips = self.active_ips.write().await;
|
||||
let mut recent_ips = self.recent_ips.write().await;
|
||||
let mut users = Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
let mut users =
|
||||
Vec::<String>::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
|
||||
users.extend(active_ips.keys().cloned());
|
||||
for user in recent_ips.keys() {
|
||||
if !active_ips.contains_key(user) {
|
||||
@@ -72,8 +151,14 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
for user in users {
|
||||
let active_empty = active_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||
let recent_empty = recent_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
|
||||
let active_empty = active_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
let recent_empty = recent_ips
|
||||
.get(&user)
|
||||
.map(|ips| ips.is_empty())
|
||||
.unwrap_or(true);
|
||||
if active_empty && recent_empty {
|
||||
active_ips.remove(&user);
|
||||
recent_ips.remove(&user);
|
||||
@@ -100,7 +185,10 @@ impl UserIpTracker {
|
||||
limits.remove(username);
|
||||
}
|
||||
|
||||
pub async fn load_limits(&self, limits: &HashMap<String, usize>) {
|
||||
pub async fn load_limits(&self, default_limit: usize, limits: &HashMap<String, usize>) {
|
||||
let mut default_max_ips = self.default_max_ips.write().await;
|
||||
*default_max_ips = default_limit;
|
||||
drop(default_max_ips);
|
||||
let mut max_ips = self.max_ips.write().await;
|
||||
max_ips.clone_from(limits);
|
||||
}
|
||||
@@ -113,10 +201,16 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
|
||||
self.drain_cleanup_queue().await;
|
||||
self.maybe_compact_empty_users().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let limit = {
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
};
|
||||
let mode = *self.limit_mode.read().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
@@ -184,6 +278,7 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap<String, usize> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
@@ -204,6 +299,7 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let mut out = HashMap::with_capacity(users.len());
|
||||
for user in users {
|
||||
@@ -218,6 +314,7 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap<String, Vec<IpAddr>> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let window = *self.limit_window.read().await;
|
||||
let now = Instant::now();
|
||||
let recent_ips = self.recent_ips.read().await;
|
||||
@@ -240,11 +337,13 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_active_ip_count(&self, username: &str) -> usize {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
|
||||
}
|
||||
|
||||
pub async fn get_active_ips(&self, username: &str) -> Vec<IpAddr> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
@@ -253,12 +352,19 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
|
||||
let mut stats = Vec::new();
|
||||
for (username, user_ips) in active_ips.iter() {
|
||||
let limit = max_ips.get(username).copied().unwrap_or(0);
|
||||
let limit = max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
.unwrap_or(0);
|
||||
stats.push((username.clone(), user_ips.len(), limit));
|
||||
}
|
||||
|
||||
@@ -285,6 +391,7 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
|
||||
self.drain_cleanup_queue().await;
|
||||
let active_ips = self.active_ips.read().await;
|
||||
active_ips
|
||||
.get(username)
|
||||
@@ -293,8 +400,13 @@ impl UserIpTracker {
|
||||
}
|
||||
|
||||
pub async fn get_user_limit(&self, username: &str) -> Option<usize> {
|
||||
let default_max_ips = *self.default_max_ips.read().await;
|
||||
let max_ips = self.max_ips.read().await;
|
||||
max_ips.get(username).copied()
|
||||
max_ips
|
||||
.get(username)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((default_max_ips > 0).then_some(default_max_ips))
|
||||
}
|
||||
|
||||
pub async fn format_stats(&self) -> String {
|
||||
@@ -546,7 +658,7 @@ mod tests {
|
||||
config_limits.insert("user1".to_string(), 5);
|
||||
config_limits.insert("user2".to_string(), 3);
|
||||
|
||||
tracker.load_limits(&config_limits).await;
|
||||
tracker.load_limits(0, &config_limits).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, Some(5));
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(3));
|
||||
@@ -560,16 +672,46 @@ mod tests {
|
||||
let mut first = HashMap::new();
|
||||
first.insert("user1".to_string(), 2);
|
||||
first.insert("user2".to_string(), 3);
|
||||
tracker.load_limits(&first).await;
|
||||
tracker.load_limits(0, &first).await;
|
||||
|
||||
let mut second = HashMap::new();
|
||||
second.insert("user2".to_string(), 5);
|
||||
tracker.load_limits(&second).await;
|
||||
tracker.load_limits(0, &second).await;
|
||||
|
||||
assert_eq!(tracker.get_user_limit("user1").await, None);
|
||||
assert_eq!(tracker.get_user_limit("user2").await, Some(5));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_global_each_limit_applies_without_user_override() {
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker.load_limits(2, &HashMap::new()).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 16, 0, 1);
|
||||
let ip2 = test_ipv4(172, 16, 0, 2);
|
||||
let ip3 = test_ipv4(172, 16, 0, 3);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip3).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(2));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_user_override_wins_over_global_each_limit() {
|
||||
let tracker = UserIpTracker::new();
|
||||
let mut limits = HashMap::new();
|
||||
limits.insert("test_user".to_string(), 1);
|
||||
tracker.load_limits(3, &limits).await;
|
||||
|
||||
let ip1 = test_ipv4(172, 17, 0, 1);
|
||||
let ip2 = test_ipv4(172, 17, 0, 2);
|
||||
|
||||
assert!(tracker.check_and_add("test_user", ip1).await.is_ok());
|
||||
assert!(tracker.check_and_add("test_user", ip2).await.is_err());
|
||||
assert_eq!(tracker.get_user_limit("test_user").await, Some(1));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_time_window_mode_blocks_recent_ip_churn() {
|
||||
let tracker = UserIpTracker::new();
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
|
||||
@@ -11,10 +13,10 @@ use crate::startup::{
|
||||
COMPONENT_DC_CONNECTIVITY_PING, COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{
|
||||
MePingFamily, MePingSample, MePool, format_me_route, format_sample_line, run_me_ping,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
pub(crate) async fn run_startup_connectivity(
|
||||
config: &Arc<ProxyConfig>,
|
||||
@@ -47,11 +49,15 @@ pub(crate) async fn run_startup_connectivity(
|
||||
|
||||
let v4_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V4)
|
||||
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
let v6_ok = me_results.iter().any(|r| {
|
||||
matches!(r.family, MePingFamily::V6)
|
||||
&& r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
&& r.samples
|
||||
.iter()
|
||||
.any(|s| s.error.is_none() && s.handshake_ms.is_some())
|
||||
});
|
||||
|
||||
info!("================= Telegram ME Connectivity =================");
|
||||
@@ -131,8 +137,14 @@ pub(crate) async fn run_startup_connectivity(
|
||||
.await;
|
||||
|
||||
for upstream_result in &ping_results {
|
||||
let v6_works = upstream_result.v6_results.iter().any(|r| r.rtt_ms.is_some());
|
||||
let v4_works = upstream_result.v4_results.iter().any(|r| r.rtt_ms.is_some());
|
||||
let v6_works = upstream_result
|
||||
.v6_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
let v4_works = upstream_result
|
||||
.v4_results
|
||||
.iter()
|
||||
.any(|r| r.rtt_ms.is_some());
|
||||
|
||||
if upstream_result.both_available {
|
||||
if prefer_ipv6 {
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#![allow(clippy::items_after_test_module)]
|
||||
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use tokio::sync::watch;
|
||||
@@ -9,8 +12,22 @@ use crate::transport::middle_proxy::{
|
||||
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
|
||||
};
|
||||
|
||||
pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
||||
pub(crate) fn resolve_runtime_config_path(
|
||||
config_path_cli: &str,
|
||||
startup_cwd: &std::path::Path,
|
||||
) -> PathBuf {
|
||||
let raw = PathBuf::from(config_path_cli);
|
||||
let absolute = if raw.is_absolute() {
|
||||
raw
|
||||
} else {
|
||||
startup_cwd.join(raw)
|
||||
};
|
||||
absolute.canonicalize().unwrap_or(absolute)
|
||||
}
|
||||
|
||||
pub(crate) fn parse_cli() -> (String, Option<PathBuf>, bool, Option<String>) {
|
||||
let mut config_path = "config.toml".to_string();
|
||||
let mut data_path: Option<PathBuf> = None;
|
||||
let mut silent = false;
|
||||
let mut log_level: Option<String> = None;
|
||||
|
||||
@@ -28,6 +45,20 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
match args[i].as_str() {
|
||||
"--data-path" => {
|
||||
i += 1;
|
||||
if i < args.len() {
|
||||
data_path = Some(PathBuf::from(args[i].clone()));
|
||||
} else {
|
||||
eprintln!("Missing value for --data-path");
|
||||
std::process::exit(0);
|
||||
}
|
||||
}
|
||||
s if s.starts_with("--data-path=") => {
|
||||
data_path = Some(PathBuf::from(
|
||||
s.trim_start_matches("--data-path=").to_string(),
|
||||
));
|
||||
}
|
||||
"--silent" | "-s" => {
|
||||
silent = true;
|
||||
}
|
||||
@@ -44,6 +75,9 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
||||
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
|
||||
eprintln!();
|
||||
eprintln!("Options:");
|
||||
eprintln!(
|
||||
" --data-path <DIR> Set data directory (absolute path; overrides config value)"
|
||||
);
|
||||
eprintln!(" --silent, -s Suppress info logs");
|
||||
eprintln!(" --log-level <LEVEL> debug|verbose|normal|silent");
|
||||
eprintln!(" --help, -h Show this help");
|
||||
@@ -78,12 +112,55 @@ pub(crate) fn parse_cli() -> (String, bool, Option<String>) {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
(config_path, silent, log_level)
|
||||
(config_path, data_path, silent, log_level)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::resolve_runtime_config_path;
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_anchors_relative_to_startup_cwd() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
let target = startup_cwd.join("config.toml");
|
||||
std::fs::write(&target, " ").unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("config.toml", &startup_cwd);
|
||||
assert_eq!(resolved, target.canonicalize().unwrap());
|
||||
|
||||
let _ = std::fs::remove_file(&target);
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn resolve_runtime_config_path_keeps_absolute_for_missing_file() {
|
||||
let nonce = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_nanos();
|
||||
let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
|
||||
std::fs::create_dir_all(&startup_cwd).unwrap();
|
||||
|
||||
let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd);
|
||||
assert_eq!(resolved, startup_cwd.join("missing.toml"));
|
||||
|
||||
let _ = std::fs::remove_dir(&startup_cwd);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
|
||||
for user_name in config.general.links.show.resolve_users(&config.access.users) {
|
||||
for user_name in config
|
||||
.general
|
||||
.links
|
||||
.show
|
||||
.resolve_users(&config.access.users)
|
||||
{
|
||||
if let Some(secret) = config.access.users.get(user_name) {
|
||||
info!(target: "telemt::links", "User: {}", user_name);
|
||||
if config.general.modes.classic {
|
||||
@@ -190,6 +267,7 @@ pub(crate) fn format_uptime(total_secs: u64) -> String {
|
||||
format!("{} / {} seconds", parts.join(", "), total_secs)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn wait_until_admission_open(admission_rx: &mut watch::Receiver<bool>) -> bool {
|
||||
loop {
|
||||
if *admission_rx.borrow() {
|
||||
@@ -223,7 +301,10 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||
return Some(cfg);
|
||||
}
|
||||
|
||||
warn!(snapshot = label, url, "Startup proxy-config is empty; trying disk cache");
|
||||
warn!(
|
||||
snapshot = label,
|
||||
url, "Startup proxy-config is empty; trying disk cache"
|
||||
);
|
||||
if let Some(path) = cache_path {
|
||||
match load_proxy_config_cache(path).await {
|
||||
Ok(cached) if !cached.map.is_empty() => {
|
||||
@@ -238,8 +319,7 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path,
|
||||
"Startup proxy-config cache is empty; ignoring cache file"
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
@@ -283,8 +363,7 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
|
||||
Ok(_) => {
|
||||
warn!(
|
||||
snapshot = label,
|
||||
path,
|
||||
"Startup proxy-config cache is empty; ignoring cache file"
|
||||
path, "Startup proxy-config cache is empty; ignoring cache file"
|
||||
);
|
||||
}
|
||||
Err(cache_err) => {
|
||||
|
||||
@@ -12,19 +12,17 @@ use tracing::{debug, error, info, warn};
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::proxy::ClientHandler;
|
||||
use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
|
||||
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::tls_front::TlsFrontCache;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::{
|
||||
ListenOptions, UpstreamManager, create_listener, find_listener_processes,
|
||||
};
|
||||
use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes};
|
||||
|
||||
use super::helpers::{is_expected_handshake_eof, print_proxy_links, wait_until_admission_open};
|
||||
use super::helpers::{is_expected_handshake_eof, print_proxy_links};
|
||||
|
||||
pub(crate) struct BoundListeners {
|
||||
pub(crate) listeners: Vec<(TcpListener, bool)>,
|
||||
@@ -81,8 +79,9 @@ pub(crate) async fn bind_listeners(
|
||||
Ok(socket) => {
|
||||
let listener = TcpListener::from_std(socket.into())?;
|
||||
info!("Listening on {}", addr);
|
||||
let listener_proxy_protocol =
|
||||
listener_conf.proxy_protocol.unwrap_or(config.server.proxy_protocol);
|
||||
let listener_proxy_protocol = listener_conf
|
||||
.proxy_protocol
|
||||
.unwrap_or(config.server.proxy_protocol);
|
||||
|
||||
let public_host = if let Some(ref announce) = listener_conf.announce {
|
||||
announce.clone()
|
||||
@@ -100,8 +99,14 @@ pub(crate) async fn bind_listeners(
|
||||
listener_conf.ip.to_string()
|
||||
};
|
||||
|
||||
if config.general.links.public_host.is_none() && !config.general.links.show.is_empty() {
|
||||
let link_port = config.general.links.public_port.unwrap_or(config.server.port);
|
||||
if config.general.links.public_host.is_none()
|
||||
&& !config.general.links.show.is_empty()
|
||||
{
|
||||
let link_port = config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port);
|
||||
print_proxy_links(&public_host, link_port, config);
|
||||
}
|
||||
|
||||
@@ -145,12 +150,14 @@ pub(crate) async fn bind_listeners(
|
||||
let (host, port) = if let Some(ref h) = config.general.links.public_host {
|
||||
(
|
||||
h.clone(),
|
||||
config.general.links.public_port.unwrap_or(config.server.port),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
} else {
|
||||
let ip = detected_ip_v4
|
||||
.or(detected_ip_v6)
|
||||
.map(|ip| ip.to_string());
|
||||
let ip = detected_ip_v4.or(detected_ip_v6).map(|ip| ip.to_string());
|
||||
if ip.is_none() {
|
||||
warn!(
|
||||
"show_link is configured but public IP could not be detected. Set public_host in config."
|
||||
@@ -158,7 +165,11 @@ pub(crate) async fn bind_listeners(
|
||||
}
|
||||
(
|
||||
ip.unwrap_or_else(|| "UNKNOWN".to_string()),
|
||||
config.general.links.public_port.unwrap_or(config.server.port),
|
||||
config
|
||||
.general
|
||||
.links
|
||||
.public_port
|
||||
.unwrap_or(config.server.port),
|
||||
)
|
||||
};
|
||||
|
||||
@@ -178,13 +189,19 @@ pub(crate) async fn bind_listeners(
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let perms = std::fs::Permissions::from_mode(mode);
|
||||
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
|
||||
error!("Failed to set unix socket permissions to {}: {}", perm_str, e);
|
||||
error!(
|
||||
"Failed to set unix socket permissions to {}: {}",
|
||||
perm_str, e
|
||||
);
|
||||
} else {
|
||||
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Invalid listen_unix_sock_perm '{}': {}. Ignoring.", perm_str, e);
|
||||
warn!(
|
||||
"Invalid listen_unix_sock_perm '{}': {}. Ignoring.",
|
||||
perm_str, e
|
||||
);
|
||||
info!("Listening on unix:{}", unix_path);
|
||||
}
|
||||
}
|
||||
@@ -195,7 +212,7 @@ pub(crate) async fn bind_listeners(
|
||||
has_unix_listener = true;
|
||||
|
||||
let mut config_rx_unix: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let mut admission_rx_unix = admission_rx.clone();
|
||||
let admission_rx_unix = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
@@ -212,17 +229,42 @@ pub(crate) async fn bind_listeners(
|
||||
let unix_conn_counter = Arc::new(std::sync::atomic::AtomicU64::new(1));
|
||||
|
||||
loop {
|
||||
if !wait_until_admission_open(&mut admission_rx_unix).await {
|
||||
warn!("Conditional-admission gate channel closed for unix listener");
|
||||
break;
|
||||
}
|
||||
match unix_listener.accept().await {
|
||||
Ok((stream, _)) => {
|
||||
let permit = match max_connections_unix.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
if !*admission_rx_unix.borrow() {
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx_unix.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_unix.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_unix.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted unix connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let conn_id =
|
||||
@@ -312,7 +354,7 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||
) {
|
||||
for (listener, listener_proxy_protocol) in listeners {
|
||||
let mut config_rx: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
|
||||
let mut admission_rx_tcp = admission_rx.clone();
|
||||
let admission_rx_tcp = admission_rx.clone();
|
||||
let stats = stats.clone();
|
||||
let upstream_manager = upstream_manager.clone();
|
||||
let replay_checker = replay_checker.clone();
|
||||
@@ -327,17 +369,44 @@ pub(crate) fn spawn_tcp_accept_loops(
|
||||
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
if !wait_until_admission_open(&mut admission_rx_tcp).await {
|
||||
warn!("Conditional-admission gate channel closed for tcp listener");
|
||||
break;
|
||||
}
|
||||
match listener.accept().await {
|
||||
Ok((stream, peer_addr)) => {
|
||||
let permit = match max_connections_tcp.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
if !*admission_rx_tcp.borrow() {
|
||||
debug!(peer = %peer_addr, "Admission gate closed, dropping connection");
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
let accept_permit_timeout_ms =
|
||||
config_rx.borrow().server.accept_permit_timeout_ms;
|
||||
let permit = if accept_permit_timeout_ms == 0 {
|
||||
match max_connections_tcp.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(_) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
match tokio::time::timeout(
|
||||
Duration::from_millis(accept_permit_timeout_ms),
|
||||
max_connections_tcp.clone().acquire_owned(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(Ok(permit)) => permit,
|
||||
Ok(Err(_)) => {
|
||||
error!("Connection limiter is closed");
|
||||
break;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!(
|
||||
peer = %peer_addr,
|
||||
timeout_ms = accept_permit_timeout_ms,
|
||||
"Dropping accepted connection: permit wait timeout"
|
||||
);
|
||||
drop(stream);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
};
|
||||
let config = config_rx.borrow_and_update().clone();
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
#![allow(clippy::too_many_arguments)]
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -12,8 +14,8 @@ use crate::startup::{
|
||||
COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH, StartupMeStatus, StartupTracker,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
|
||||
use super::helpers::load_startup_proxy_config_snapshot;
|
||||
|
||||
@@ -229,14 +231,25 @@ pub(crate) async fn initialize_me_pool(
|
||||
config.general.me_adaptive_floor_recover_grace_secs,
|
||||
config.general.me_adaptive_floor_writers_per_core_total,
|
||||
config.general.me_adaptive_floor_cpu_cores_override,
|
||||
config.general.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
config.general.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_single_per_core,
|
||||
config
|
||||
.general
|
||||
.me_adaptive_floor_max_extra_writers_multi_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_warm_writers_per_core,
|
||||
config.general.me_adaptive_floor_max_active_writers_global,
|
||||
config.general.me_adaptive_floor_max_warm_writers_global,
|
||||
config.general.hardswap,
|
||||
config.general.me_pool_drain_ttl_secs,
|
||||
config.general.me_instadrain,
|
||||
config.general.me_pool_drain_threshold,
|
||||
config.general.me_pool_drain_soft_evict_enabled,
|
||||
config.general.me_pool_drain_soft_evict_grace_secs,
|
||||
config.general.me_pool_drain_soft_evict_per_writer,
|
||||
config.general.me_pool_drain_soft_evict_budget_per_core,
|
||||
config.general.me_pool_drain_soft_evict_cooldown_ms,
|
||||
config.general.effective_me_pool_force_close_secs(),
|
||||
config.general.me_pool_min_fresh_ratio,
|
||||
config.general.me_hardswap_warmup_delay_min_ms,
|
||||
@@ -323,18 +336,76 @@ pub(crate) async fn initialize_me_pool(
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
let pool_health = pool_bg.clone();
|
||||
let rng_health = rng_bg.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
pool_health,
|
||||
rng_health,
|
||||
min_conns,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
break;
|
||||
// ── Supervised background tasks ──────────────────
|
||||
// Each task runs inside a nested tokio::spawn so
|
||||
// that a panic is caught via JoinHandle and the
|
||||
// outer loop restarts the task automatically.
|
||||
let pool_health = pool_bg.clone();
|
||||
let rng_health = rng_bg.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_health.clone();
|
||||
let r = rng_health.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool_bg.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
// CRITICAL: keep the current-thread runtime
|
||||
// alive. Without this, block_on() returns,
|
||||
// the Runtime is dropped, and ALL spawned
|
||||
// background tasks (health monitor, drain
|
||||
// enforcer, zombie watchdog) are silently
|
||||
// cancelled — causing the draining-writer
|
||||
// leak that brought us here.
|
||||
std::future::pending::<()>().await;
|
||||
unreachable!();
|
||||
}
|
||||
Err(e) => {
|
||||
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
|
||||
@@ -392,14 +463,69 @@ pub(crate) async fn initialize_me_pool(
|
||||
"Middle-End pool initialized successfully"
|
||||
);
|
||||
|
||||
// ── Supervised background tasks ──────────────────
|
||||
let pool_clone = pool.clone();
|
||||
let rng_clone = rng.clone();
|
||||
let min_conns = pool_size;
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
pool_clone, rng_clone, min_conns,
|
||||
)
|
||||
.await;
|
||||
loop {
|
||||
let p = pool_clone.clone();
|
||||
let r = rng_clone.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_health_monitor(
|
||||
p, r, min_conns,
|
||||
)
|
||||
.await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_health_monitor exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_drain_enforcer = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_drain_enforcer.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_drain_timeout_enforcer exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
let pool_watchdog = pool.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
let p = pool_watchdog.clone();
|
||||
let res = tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
|
||||
})
|
||||
.await;
|
||||
match res {
|
||||
Ok(()) => warn!(
|
||||
"me_zombie_writer_watchdog exited unexpectedly, restarting"
|
||||
),
|
||||
Err(e) => {
|
||||
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
break Some(pool);
|
||||
|
||||
@@ -11,9 +11,9 @@
|
||||
// - admission: conditional-cast gate and route mode switching.
|
||||
// - listeners: TCP/Unix listener bind and accept-loop orchestration.
|
||||
// - shutdown: graceful shutdown sequence and uptime logging.
|
||||
mod helpers;
|
||||
mod admission;
|
||||
mod connectivity;
|
||||
mod helpers;
|
||||
mod listeners;
|
||||
mod me_startup;
|
||||
mod runtime_tasks;
|
||||
@@ -33,19 +33,19 @@ use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
|
||||
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
|
||||
COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
|
||||
COMPONENT_ME_SECRET_FETCH, COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::startup::{
|
||||
COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD,
|
||||
COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
|
||||
COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
|
||||
COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus, StartupTracker,
|
||||
};
|
||||
use crate::stream::BufferPool;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use crate::transport::UpstreamManager;
|
||||
use helpers::parse_cli;
|
||||
use crate::transport::middle_proxy::MePool;
|
||||
use helpers::{parse_cli, resolve_runtime_config_path};
|
||||
|
||||
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
|
||||
pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
@@ -56,20 +56,34 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
.as_secs();
|
||||
let startup_tracker = Arc::new(StartupTracker::new(process_started_at_epoch_secs));
|
||||
startup_tracker
|
||||
.start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
|
||||
.start_component(
|
||||
COMPONENT_CONFIG_LOAD,
|
||||
Some("load and validate config".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_path, cli_silent, cli_log_level) = parse_cli();
|
||||
let (config_path_cli, data_path, cli_silent, cli_log_level) = parse_cli();
|
||||
let startup_cwd = match std::env::current_dir() {
|
||||
Ok(cwd) => cwd,
|
||||
Err(e) => {
|
||||
eprintln!("[telemt] Can't read current_dir: {}", e);
|
||||
std::process::exit(1);
|
||||
}
|
||||
};
|
||||
let config_path = resolve_runtime_config_path(&config_path_cli, &startup_cwd);
|
||||
|
||||
let mut config = match ProxyConfig::load(&config_path) {
|
||||
Ok(c) => c,
|
||||
Err(e) => {
|
||||
if std::path::Path::new(&config_path).exists() {
|
||||
if config_path.exists() {
|
||||
eprintln!("[telemt] Error: {}", e);
|
||||
std::process::exit(1);
|
||||
} else {
|
||||
let default = ProxyConfig::default();
|
||||
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
|
||||
eprintln!("[telemt] Created default config at {}", config_path);
|
||||
eprintln!(
|
||||
"[telemt] Created default config at {}",
|
||||
config_path.display()
|
||||
);
|
||||
default
|
||||
}
|
||||
}
|
||||
@@ -80,6 +94,48 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if let Some(p) = data_path {
|
||||
config.general.data_path = Some(p);
|
||||
}
|
||||
|
||||
if let Some(ref data_path) = config.general.data_path {
|
||||
if !data_path.is_absolute() {
|
||||
eprintln!(
|
||||
"[telemt] data_path must be absolute: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
|
||||
if data_path.exists() {
|
||||
if !data_path.is_dir() {
|
||||
eprintln!(
|
||||
"[telemt] data_path exists but is not a directory: {}",
|
||||
data_path.display()
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
} else {
|
||||
if let Err(e) = std::fs::create_dir_all(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't create data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = std::env::set_current_dir(data_path) {
|
||||
eprintln!(
|
||||
"[telemt] Can't use data_path {}: {}",
|
||||
data_path.display(),
|
||||
e
|
||||
);
|
||||
std::process::exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) {
|
||||
eprintln!("[telemt] Invalid network.dns_overrides: {}", e);
|
||||
std::process::exit(1);
|
||||
@@ -99,7 +155,10 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
|
||||
startup_tracker
|
||||
.start_component(COMPONENT_TRACING_INIT, Some("initialize tracing subscriber".to_string()))
|
||||
.start_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("initialize tracing subscriber".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
// Configure color output based on config
|
||||
@@ -114,7 +173,10 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
.with(fmt_layer)
|
||||
.init();
|
||||
startup_tracker
|
||||
.complete_component(COMPONENT_TRACING_INIT, Some("tracing initialized".to_string()))
|
||||
.complete_component(
|
||||
COMPONENT_TRACING_INIT,
|
||||
Some("tracing initialized".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
|
||||
@@ -168,17 +230,25 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
stats.clone(),
|
||||
));
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.load_limits(&config.access.user_max_unique_ips).await;
|
||||
ip_tracker
|
||||
.load_limits(
|
||||
config.access.user_max_unique_ips_global_each,
|
||||
&config.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
ip_tracker
|
||||
.set_limit_policy(
|
||||
config.access.user_max_unique_ips_mode,
|
||||
config.access.user_max_unique_ips_window_secs,
|
||||
)
|
||||
.await;
|
||||
if !config.access.user_max_unique_ips.is_empty() {
|
||||
if config.access.user_max_unique_ips_global_each > 0
|
||||
|| !config.access.user_max_unique_ips.is_empty()
|
||||
{
|
||||
info!(
|
||||
"IP limits configured for {} users",
|
||||
config.access.user_max_unique_ips.len()
|
||||
global_each_limit = config.access.user_max_unique_ips_global_each,
|
||||
explicit_user_limits = config.access.user_max_unique_ips.len(),
|
||||
"User unique IP limits configured"
|
||||
);
|
||||
}
|
||||
if !config.network.dns_overrides.is_empty() {
|
||||
@@ -200,7 +270,10 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
let route_runtime = Arc::new(RouteRuntimeController::new(initial_route_mode));
|
||||
let api_me_pool = Arc::new(RwLock::new(None::<Arc<MePool>>));
|
||||
startup_tracker
|
||||
.start_component(COMPONENT_API_BOOTSTRAP, Some("spawn API listener task".to_string()))
|
||||
.start_component(
|
||||
COMPONENT_API_BOOTSTRAP,
|
||||
Some("spawn API listener task".to_string()),
|
||||
)
|
||||
.await;
|
||||
|
||||
if config.server.api.enabled {
|
||||
@@ -223,7 +296,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
let route_runtime_api = route_runtime.clone();
|
||||
let config_rx_api = api_config_rx.clone();
|
||||
let admission_rx_api = admission_rx.clone();
|
||||
let config_path_api = std::path::PathBuf::from(&config_path);
|
||||
let config_path_api = config_path.clone();
|
||||
let startup_tracker_api = startup_tracker.clone();
|
||||
let detected_ips_rx_api = detected_ips_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
@@ -283,7 +356,10 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
.await;
|
||||
|
||||
startup_tracker
|
||||
.start_component(COMPONENT_NETWORK_PROBE, Some("probe network capabilities".to_string()))
|
||||
.start_component(
|
||||
COMPONENT_NETWORK_PROBE,
|
||||
Some("probe network capabilities".to_string()),
|
||||
)
|
||||
.await;
|
||||
let probe = run_probe(
|
||||
&config.network,
|
||||
@@ -296,11 +372,8 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
probe.detected_ipv4.map(IpAddr::V4),
|
||||
probe.detected_ipv6.map(IpAddr::V6),
|
||||
));
|
||||
let decision = decide_network_capabilities(
|
||||
&config.network,
|
||||
&probe,
|
||||
config.general.middle_proxy_nat_ip,
|
||||
);
|
||||
let decision =
|
||||
decide_network_capabilities(&config.network, &probe, config.general.middle_proxy_nat_ip);
|
||||
log_probe_result(&probe, &decision);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
@@ -314,8 +387,13 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
let beobachten = Arc::new(BeobachtenStore::new());
|
||||
let rng = Arc::new(SecureRandom::new());
|
||||
|
||||
// Connection concurrency limit
|
||||
let max_connections = Arc::new(Semaphore::new(10_000));
|
||||
// Connection concurrency limit (0 = unlimited)
|
||||
let max_connections_limit = if config.server.max_connections == 0 {
|
||||
Semaphore::MAX_PERMITS
|
||||
} else {
|
||||
config.server.max_connections as usize
|
||||
};
|
||||
let max_connections = Arc::new(Semaphore::new(max_connections_limit));
|
||||
|
||||
let me2dc_fallback = config.general.me2dc_fallback;
|
||||
let me_init_retry_attempts = config.general.me_init_retry_attempts;
|
||||
@@ -398,24 +476,16 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
|
||||
// If ME failed to initialize, force direct-only mode.
|
||||
if me_pool.is_some() {
|
||||
startup_tracker
|
||||
.set_transport_mode("middle_proxy")
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_degraded(false)
|
||||
.await;
|
||||
startup_tracker.set_transport_mode("middle_proxy").await;
|
||||
startup_tracker.set_degraded(false).await;
|
||||
info!("Transport: Middle-End Proxy - all DC-over-RPC");
|
||||
} else {
|
||||
let _ = use_middle_proxy;
|
||||
use_middle_proxy = false;
|
||||
// Make runtime config reflect direct-only mode for handlers.
|
||||
config.general.use_middle_proxy = false;
|
||||
startup_tracker
|
||||
.set_transport_mode("direct")
|
||||
.await;
|
||||
startup_tracker
|
||||
.set_degraded(true)
|
||||
.await;
|
||||
startup_tracker.set_transport_mode("direct").await;
|
||||
startup_tracker.set_degraded(true).await;
|
||||
if me2dc_fallback {
|
||||
startup_tracker
|
||||
.set_me_status(StartupMeStatus::Failed, "fallback_to_direct")
|
||||
@@ -436,7 +506,7 @@ pub async fn run() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
||||
Duration::from_secs(config.access.replay_window_secs),
|
||||
));
|
||||
|
||||
let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096));
|
||||
let buffer_pool = Arc::new(BufferPool::with_config(64 * 1024, 4096));
|
||||
|
||||
connectivity::run_startup_connectivity(
|
||||
&config,
|
||||
|
||||
@@ -1,24 +1,27 @@
|
||||
use std::net::IpAddr;
|
||||
use std::path::PathBuf;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::{mpsc, watch};
|
||||
use tracing::{debug, warn};
|
||||
use tracing_subscriber::reload;
|
||||
use tracing_subscriber::EnvFilter;
|
||||
use tracing_subscriber::reload;
|
||||
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::config::hot_reload::spawn_config_watcher;
|
||||
use crate::config::{LogLevel, ProxyConfig};
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::metrics;
|
||||
use crate::network::probe::NetworkProbe;
|
||||
use crate::startup::{COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY, StartupTracker};
|
||||
use crate::startup::{
|
||||
COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY,
|
||||
StartupTracker,
|
||||
};
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::telemetry::TelemetryPolicy;
|
||||
use crate::stats::{ReplayChecker, Stats};
|
||||
use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
|
||||
use crate::transport::UpstreamManager;
|
||||
use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
|
||||
|
||||
use super::helpers::write_beobachten_snapshot;
|
||||
|
||||
@@ -32,7 +35,7 @@ pub(crate) struct RuntimeWatches {
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn spawn_runtime_tasks(
|
||||
config: &Arc<ProxyConfig>,
|
||||
config_path: &str,
|
||||
config_path: &Path,
|
||||
probe: &NetworkProbe,
|
||||
prefer_ipv6: bool,
|
||||
decision_ipv4_dc: bool,
|
||||
@@ -79,15 +82,13 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
Some("spawn config hot-reload watcher".to_string()),
|
||||
)
|
||||
.await;
|
||||
let (config_rx, log_level_rx): (
|
||||
watch::Receiver<Arc<ProxyConfig>>,
|
||||
watch::Receiver<LogLevel>,
|
||||
) = spawn_config_watcher(
|
||||
PathBuf::from(config_path),
|
||||
config.clone(),
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
);
|
||||
let (config_rx, log_level_rx): (watch::Receiver<Arc<ProxyConfig>>, watch::Receiver<LogLevel>) =
|
||||
spawn_config_watcher(
|
||||
config_path.to_path_buf(),
|
||||
config.clone(),
|
||||
detected_ip_v4,
|
||||
detected_ip_v6,
|
||||
);
|
||||
startup_tracker
|
||||
.complete_component(
|
||||
COMPONENT_CONFIG_WATCHER_START,
|
||||
@@ -114,7 +115,8 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
break;
|
||||
}
|
||||
let cfg = config_rx_policy.borrow_and_update().clone();
|
||||
stats_policy.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
|
||||
stats_policy
|
||||
.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
|
||||
if let Some(pool) = &me_pool_for_policy {
|
||||
pool.update_runtime_transport_policy(
|
||||
cfg.general.me_socks_kdf_policy,
|
||||
@@ -130,7 +132,15 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
let ip_tracker_policy = ip_tracker.clone();
|
||||
let mut config_rx_ip_limits = config_rx.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
|
||||
let mut prev_limits = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.clone();
|
||||
let mut prev_global_each = config_rx_ip_limits
|
||||
.borrow()
|
||||
.access
|
||||
.user_max_unique_ips_global_each;
|
||||
let mut prev_mode = config_rx_ip_limits.borrow().access.user_max_unique_ips_mode;
|
||||
let mut prev_window = config_rx_ip_limits
|
||||
.borrow()
|
||||
@@ -143,9 +153,17 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
}
|
||||
let cfg = config_rx_ip_limits.borrow_and_update().clone();
|
||||
|
||||
if prev_limits != cfg.access.user_max_unique_ips {
|
||||
ip_tracker_policy.load_limits(&cfg.access.user_max_unique_ips).await;
|
||||
if prev_limits != cfg.access.user_max_unique_ips
|
||||
|| prev_global_each != cfg.access.user_max_unique_ips_global_each
|
||||
{
|
||||
ip_tracker_policy
|
||||
.load_limits(
|
||||
cfg.access.user_max_unique_ips_global_each,
|
||||
&cfg.access.user_max_unique_ips,
|
||||
)
|
||||
.await;
|
||||
prev_limits = cfg.access.user_max_unique_ips.clone();
|
||||
prev_global_each = cfg.access.user_max_unique_ips_global_each;
|
||||
}
|
||||
|
||||
if prev_mode != cfg.access.user_max_unique_ips_mode
|
||||
@@ -171,7 +189,9 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
let sleep_secs = cfg.general.beobachten_flush_secs.max(1);
|
||||
|
||||
if cfg.general.beobachten {
|
||||
let ttl = std::time::Duration::from_secs(cfg.general.beobachten_minutes.saturating_mul(60));
|
||||
let ttl = std::time::Duration::from_secs(
|
||||
cfg.general.beobachten_minutes.saturating_mul(60),
|
||||
);
|
||||
let path = cfg.general.beobachten_file.clone();
|
||||
let snapshot = beobachten_writer.snapshot_text(ttl);
|
||||
if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await {
|
||||
@@ -215,8 +235,11 @@ pub(crate) async fn spawn_runtime_tasks(
|
||||
let config_rx_clone_rot = config_rx.clone();
|
||||
let reinit_tx_rotation = reinit_tx.clone();
|
||||
tokio::spawn(async move {
|
||||
crate::transport::middle_proxy::me_rotation_task(config_rx_clone_rot, reinit_tx_rotation)
|
||||
.await;
|
||||
crate::transport::middle_proxy::me_rotation_task(
|
||||
config_rx_clone_rot,
|
||||
reinit_tx_rotation,
|
||||
)
|
||||
.await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -267,11 +290,32 @@ pub(crate) async fn spawn_metrics_if_configured(
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
config_rx: watch::Receiver<Arc<ProxyConfig>>,
|
||||
) {
|
||||
if let Some(port) = config.server.metrics_port {
|
||||
// metrics_listen takes precedence; fall back to metrics_port for backward compat.
|
||||
let metrics_target: Option<(u16, Option<String>)> =
|
||||
if let Some(ref listen) = config.server.metrics_listen {
|
||||
match listen.parse::<std::net::SocketAddr>() {
|
||||
Ok(addr) => Some((addr.port(), Some(listen.clone()))),
|
||||
Err(e) => {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("invalid metrics_listen \"{}\": {}", listen, e)),
|
||||
)
|
||||
.await;
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
config.server.metrics_port.map(|p| (p, None))
|
||||
};
|
||||
|
||||
if let Some((port, listen)) = metrics_target {
|
||||
let fallback_label = format!("port {}", port);
|
||||
let label = listen.as_deref().unwrap_or(&fallback_label);
|
||||
startup_tracker
|
||||
.start_component(
|
||||
COMPONENT_METRICS_START,
|
||||
Some(format!("spawn metrics endpoint on {}", port)),
|
||||
Some(format!("spawn metrics endpoint on {}", label)),
|
||||
)
|
||||
.await;
|
||||
let stats = stats.clone();
|
||||
@@ -282,6 +326,7 @@ pub(crate) async fn spawn_metrics_if_configured(
|
||||
tokio::spawn(async move {
|
||||
metrics::serve(
|
||||
port,
|
||||
listen,
|
||||
stats,
|
||||
beobachten,
|
||||
ip_tracker_metrics,
|
||||
@@ -296,7 +341,7 @@ pub(crate) async fn spawn_metrics_if_configured(
|
||||
Some("metrics task spawned".to_string()),
|
||||
)
|
||||
.await;
|
||||
} else {
|
||||
} else if config.server.metrics_listen.is_none() {
|
||||
startup_tracker
|
||||
.skip_component(
|
||||
COMPONENT_METRICS_START,
|
||||
|
||||
@@ -16,8 +16,11 @@ pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Opti
|
||||
let uptime_secs = process_started_at.elapsed().as_secs();
|
||||
info!("Uptime: {}", format_uptime(uptime_secs));
|
||||
if let Some(pool) = &me_pool {
|
||||
match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
|
||||
.await
|
||||
match tokio::time::timeout(
|
||||
Duration::from_secs(2),
|
||||
pool.shutdown_send_close_conn_all(),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(total) => {
|
||||
info!(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use rand::Rng;
|
||||
use rand::RngExt;
|
||||
use tracing::warn;
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
@@ -38,12 +38,15 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
.clone()
|
||||
.unwrap_or_else(|| config.censorship.tls_domain.clone());
|
||||
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
|
||||
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
|
||||
.then(|| config.censorship.tls_fetch_scope.clone());
|
||||
let fetch_timeout = Duration::from_secs(5);
|
||||
|
||||
let cache_initial = cache.clone();
|
||||
let domains_initial = tls_domains.to_vec();
|
||||
let host_initial = mask_host.clone();
|
||||
let unix_sock_initial = mask_unix_sock.clone();
|
||||
let scope_initial = tls_fetch_scope.clone();
|
||||
let upstream_initial = upstream_manager.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut join = tokio::task::JoinSet::new();
|
||||
@@ -51,6 +54,7 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
let cache_domain = cache_initial.clone();
|
||||
let host_domain = host_initial.clone();
|
||||
let unix_sock_domain = unix_sock_initial.clone();
|
||||
let scope_domain = scope_initial.clone();
|
||||
let upstream_domain = upstream_initial.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
@@ -59,6 +63,7 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
@@ -100,6 +105,7 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
let domains_refresh = tls_domains.to_vec();
|
||||
let host_refresh = mask_host.clone();
|
||||
let unix_sock_refresh = mask_unix_sock.clone();
|
||||
let scope_refresh = tls_fetch_scope.clone();
|
||||
let upstream_refresh = upstream_manager.clone();
|
||||
tokio::spawn(async move {
|
||||
loop {
|
||||
@@ -112,6 +118,7 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
let cache_domain = cache_refresh.clone();
|
||||
let host_domain = host_refresh.clone();
|
||||
let unix_sock_domain = unix_sock_refresh.clone();
|
||||
let scope_domain = scope_refresh.clone();
|
||||
let upstream_domain = upstream_refresh.clone();
|
||||
join.spawn(async move {
|
||||
match crate::tls_front::fetcher::fetch_real_tls(
|
||||
@@ -120,6 +127,7 @@ pub(crate) async fn bootstrap_tls_front(
|
||||
&domain,
|
||||
fetch_timeout,
|
||||
Some(upstream_domain),
|
||||
scope_domain.as_deref(),
|
||||
proxy_protocol,
|
||||
unix_sock_domain.as_deref(),
|
||||
)
|
||||
|
||||
@@ -6,6 +6,15 @@ mod config;
|
||||
mod crypto;
|
||||
mod error;
|
||||
mod ip_tracker;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
|
||||
mod ip_tracker_hotpath_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_encapsulation_adversarial_tests.rs"]
|
||||
mod ip_tracker_encapsulation_adversarial_tests;
|
||||
#[cfg(test)]
|
||||
#[path = "tests/ip_tracker_regression_tests.rs"]
|
||||
mod ip_tracker_regression_tests;
|
||||
mod maestro;
|
||||
mod metrics;
|
||||
mod network;
|
||||
|
||||
536
src/metrics.rs
536
src/metrics.rs
@@ -1,5 +1,5 @@
|
||||
use std::convert::Infallible;
|
||||
use std::collections::{BTreeSet, HashMap};
|
||||
use std::convert::Infallible;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -11,16 +11,17 @@ use hyper::service::service_fn;
|
||||
use hyper::{Request, Response, StatusCode};
|
||||
use ipnetwork::IpNetwork;
|
||||
use tokio::net::TcpListener;
|
||||
use tracing::{info, warn, debug};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::stats::Stats;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::{ListenOptions, create_listener};
|
||||
|
||||
pub async fn serve(
|
||||
port: u16,
|
||||
listen: Option<String>,
|
||||
stats: Arc<Stats>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
@@ -28,13 +29,43 @@ pub async fn serve(
|
||||
whitelist: Vec<IpNetwork>,
|
||||
) {
|
||||
let whitelist = Arc::new(whitelist);
|
||||
|
||||
// If `metrics_listen` is set, bind on that single address only.
|
||||
if let Some(ref listen_addr) = listen {
|
||||
let addr: SocketAddr = match listen_addr.parse() {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Invalid metrics_listen address: {}", listen_addr);
|
||||
return;
|
||||
}
|
||||
};
|
||||
let is_ipv6 = addr.is_ipv6();
|
||||
match bind_metrics_listener(addr, is_ipv6) {
|
||||
Ok(listener) => {
|
||||
info!("Metrics endpoint: http://{}/metrics and /beobachten", addr);
|
||||
serve_listener(
|
||||
listener, stats, beobachten, ip_tracker, config_rx, whitelist,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!(error = %e, "Failed to bind metrics on {}", addr);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Fallback: bind on 0.0.0.0 and [::] using metrics_port.
|
||||
let mut listener_v4 = None;
|
||||
let mut listener_v6 = None;
|
||||
|
||||
let addr_v4 = SocketAddr::from(([0, 0, 0, 0], port));
|
||||
match bind_metrics_listener(addr_v4, false) {
|
||||
Ok(listener) => {
|
||||
info!("Metrics endpoint: http://{}/metrics and /beobachten", addr_v4);
|
||||
info!(
|
||||
"Metrics endpoint: http://{}/metrics and /beobachten",
|
||||
addr_v4
|
||||
);
|
||||
listener_v4 = Some(listener);
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -45,7 +76,10 @@ pub async fn serve(
|
||||
let addr_v6 = SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], port));
|
||||
match bind_metrics_listener(addr_v6, true) {
|
||||
Ok(listener) => {
|
||||
info!("Metrics endpoint: http://[::]:{}/metrics and /beobachten", port);
|
||||
info!(
|
||||
"Metrics endpoint: http://[::]:{}/metrics and /beobachten",
|
||||
port
|
||||
);
|
||||
listener_v6 = Some(listener);
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -81,12 +115,7 @@ pub async fn serve(
|
||||
.await;
|
||||
});
|
||||
serve_listener(
|
||||
listener4,
|
||||
stats,
|
||||
beobachten,
|
||||
ip_tracker,
|
||||
config_rx,
|
||||
whitelist,
|
||||
listener4, stats, beobachten, ip_tracker, config_rx, whitelist,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -203,7 +232,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
let _ = writeln!(out, "# TYPE telemt_uptime_seconds gauge");
|
||||
let _ = writeln!(out, "telemt_uptime_seconds {:.1}", stats.uptime_secs());
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_telemetry_core_enabled Runtime core telemetry switch");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_telemetry_core_enabled Runtime core telemetry switch"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_telemetry_core_enabled gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -211,7 +243,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
if core_enabled { 1 } else { 0 }
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_telemetry_user_enabled Runtime per-user telemetry switch");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_telemetry_user_enabled Runtime per-user telemetry switch"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_telemetry_user_enabled gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -219,7 +254,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
if user_enabled { 1 } else { 0 }
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_telemetry_me_level Runtime ME telemetry level flag");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_telemetry_me_level Runtime ME telemetry level flag"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_telemetry_me_level gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -249,23 +287,40 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_connections_total Total accepted connections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_connections_total Total accepted connections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_connections_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_connections_total {}",
|
||||
if core_enabled { stats.get_connects_all() } else { 0 }
|
||||
if core_enabled {
|
||||
stats.get_connects_all()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_connections_bad_total Bad/rejected connections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_connections_bad_total Bad/rejected connections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_connections_bad_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_connections_bad_total {}",
|
||||
if core_enabled { stats.get_connects_bad() } else { 0 }
|
||||
if core_enabled {
|
||||
stats.get_connects_bad()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_handshake_timeouts_total Handshake timeouts");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_handshake_timeouts_total Handshake timeouts"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -344,7 +399,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_upstream_connect_attempts_per_request Histogram-like buckets for attempts per upstream connect request cycle"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_upstream_connect_attempts_per_request counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_upstream_connect_attempts_per_request counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_upstream_connect_attempts_per_request{{bucket=\"1\"}} {}",
|
||||
@@ -386,7 +444,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_upstream_connect_duration_success_total Histogram-like buckets of successful upstream connect cycle duration"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_upstream_connect_duration_success_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_upstream_connect_duration_success_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_upstream_connect_duration_success_total{{bucket=\"le_100ms\"}} {}",
|
||||
@@ -428,7 +489,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_upstream_connect_duration_fail_total Histogram-like buckets of failed upstream connect cycle duration"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_upstream_connect_duration_fail_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_upstream_connect_duration_fail_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_upstream_connect_duration_fail_total{{bucket=\"le_100ms\"}} {}",
|
||||
@@ -466,7 +530,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_keepalive_sent_total ME keepalive frames sent");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_keepalive_sent_total ME keepalive frames sent"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_keepalive_sent_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -478,7 +545,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_keepalive_failed_total ME keepalive send failures");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_keepalive_failed_total ME keepalive send failures"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_keepalive_failed_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -490,7 +560,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_keepalive_pong_total ME keepalive pong replies");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_keepalive_pong_total ME keepalive pong replies"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_keepalive_pong_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -502,7 +575,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_keepalive_timeout_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -518,7 +594,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_rpc_proxy_req_signal_sent_total Service RPC_PROXY_REQ activity signals sent"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_rpc_proxy_req_signal_sent_total {}",
|
||||
@@ -601,7 +680,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_reconnect_attempts_total ME reconnect attempts"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_reconnect_attempts_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -613,7 +695,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_reconnect_success_total ME reconnect successes");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_reconnect_success_total ME reconnect successes"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_reconnect_success_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -625,7 +710,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_handshake_reject_total ME handshake rejects from upstream");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_handshake_reject_total ME handshake rejects from upstream"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_handshake_reject_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -637,20 +725,25 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_handshake_error_code_total ME handshake reject errors by code");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_handshake_error_code_total ME handshake reject errors by code"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_handshake_error_code_total counter");
|
||||
if me_allows_normal {
|
||||
for (error_code, count) in stats.get_me_handshake_error_code_counts() {
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_handshake_error_code_total{{error_code=\"{}\"}} {}",
|
||||
error_code,
|
||||
count
|
||||
error_code, count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_reader_eof_total ME reader EOF terminations");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_reader_eof_total ME reader EOF terminations"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_reader_eof_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -677,6 +770,69 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_relay_idle_soft_mark_total Middle-relay sessions marked as soft-idle candidates"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_relay_idle_soft_mark_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_relay_idle_soft_mark_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_relay_idle_soft_mark_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_relay_idle_hard_close_total Middle-relay sessions closed by hard-idle policy"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_relay_idle_hard_close_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_relay_idle_hard_close_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_relay_idle_hard_close_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_relay_pressure_evict_total Middle-relay sessions evicted under resource pressure"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_relay_pressure_evict_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_relay_pressure_evict_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_relay_pressure_evict_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_relay_protocol_desync_close_total Middle-relay sessions closed due to protocol desync"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_relay_protocol_desync_close_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_relay_protocol_desync_close_total {}",
|
||||
if me_allows_normal {
|
||||
stats.get_relay_protocol_desync_close_total()
|
||||
} else {
|
||||
0
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_crc_mismatch_total ME CRC mismatches");
|
||||
let _ = writeln!(out, "# TYPE telemt_me_crc_mismatch_total counter");
|
||||
let _ = writeln!(
|
||||
@@ -689,7 +845,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_seq_mismatch_total ME sequence mismatches");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_seq_mismatch_total ME sequence mismatches"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_seq_mismatch_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -701,7 +860,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_route_drop_no_conn_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -713,8 +875,14 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed");
|
||||
let _ = writeln!(out, "# TYPE telemt_me_route_drop_channel_closed_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_route_drop_channel_closed_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_route_drop_channel_closed_total {}",
|
||||
@@ -725,7 +893,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_route_drop_queue_full_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -882,7 +1053,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_writer_pick_mode_switch_total Writer-pick mode switches via runtime updates"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_writer_pick_mode_switch_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_writer_pick_mode_switch_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_writer_pick_mode_switch_total {}",
|
||||
@@ -932,7 +1106,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_kdf_drift_total ME KDF input drift detections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_kdf_drift_total ME KDF input drift detections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_kdf_drift_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -978,7 +1155,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_hardswap_pending_ttl_expired_total Pending hardswap generations reset by TTL expiration"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_hardswap_pending_ttl_expired_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_hardswap_pending_ttl_expired_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_hardswap_pending_ttl_expired_total {}",
|
||||
@@ -1210,10 +1390,7 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_adaptive_floor_global_cap_raw Runtime raw global adaptive floor cap"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_adaptive_floor_global_cap_raw gauge"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_adaptive_floor_global_cap_raw gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_adaptive_floor_global_cap_raw {}",
|
||||
@@ -1396,7 +1573,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1408,7 +1588,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_desync_total Total crypto-desync detections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_desync_total Total crypto-desync detections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_desync_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1420,7 +1603,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_desync_full_logged_total Full forensic desync logs emitted"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_desync_full_logged_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1432,7 +1618,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_desync_suppressed_total Suppressed desync forensic events");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_desync_suppressed_total Suppressed desync forensic events"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_desync_suppressed_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1444,7 +1633,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_desync_frames_bucket_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1483,7 +1675,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_pool_swap_total Successful ME pool swaps");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_pool_swap_total Successful ME pool swaps"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_pool_swap_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1495,7 +1690,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_pool_drain_active Active draining ME writers");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_pool_drain_active Active draining ME writers"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_pool_drain_active gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1507,7 +1705,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_pool_force_close_total Forced close events for draining writers");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_pool_force_close_total Forced close events for draining writers"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_pool_force_close_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1519,7 +1720,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_pool_stale_pick_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1531,7 +1735,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_writer_removed_total Total ME writer removals");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_writer_removed_total Total ME writer removals"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_writer_removed_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1547,7 +1754,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_writer_removed_unexpected_total Unexpected ME writer removals that triggered refill"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_writer_removed_unexpected_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_writer_removed_unexpected_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_writer_removed_unexpected_total {}",
|
||||
@@ -1558,7 +1768,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_refill_triggered_total Immediate ME refill runs started"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1574,7 +1787,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_refill_skipped_inflight_total Immediate ME refill skips due to inflight dedup"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_refill_skipped_inflight_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_refill_skipped_inflight_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_refill_skipped_inflight_total {}",
|
||||
@@ -1585,7 +1801,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
}
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_me_refill_failed_total Immediate ME refill failures");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_me_refill_failed_total Immediate ME refill failures"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_refill_failed_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1601,7 +1820,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_writer_restored_same_endpoint_total Refilled ME writer restored on the same endpoint"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_writer_restored_same_endpoint_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_writer_restored_same_endpoint_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_writer_restored_same_endpoint_total {}",
|
||||
@@ -1616,7 +1838,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
out,
|
||||
"# HELP telemt_me_writer_restored_fallback_total Refilled ME writer restored via fallback endpoint"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_me_writer_restored_fallback_total counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# TYPE telemt_me_writer_restored_fallback_total counter"
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_me_writer_restored_fallback_total {}",
|
||||
@@ -1694,17 +1919,35 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
unresolved_writer_losses
|
||||
);
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_connections_total Per-user total connections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
||||
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_connections_current Per-user active connections"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_connections_current gauge");
|
||||
let _ = writeln!(out, "# HELP telemt_user_octets_from_client Per-user bytes received");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_octets_from_client Per-user bytes received"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_octets_from_client counter");
|
||||
let _ = writeln!(out, "# HELP telemt_user_octets_to_client Per-user bytes sent");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_octets_to_client Per-user bytes sent"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_octets_to_client counter");
|
||||
let _ = writeln!(out, "# HELP telemt_user_msgs_from_client Per-user messages received");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_msgs_from_client Per-user messages received"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_msgs_from_client counter");
|
||||
let _ = writeln!(out, "# HELP telemt_user_msgs_to_client Per-user messages sent");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_msgs_to_client Per-user messages sent"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_msgs_to_client counter");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
@@ -1744,12 +1987,45 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
for entry in stats.iter_user_stats() {
|
||||
let user = entry.key();
|
||||
let s = entry.value();
|
||||
let _ = writeln!(out, "telemt_user_connections_total{{user=\"{}\"}} {}", user, s.connects.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(out, "telemt_user_connections_current{{user=\"{}\"}} {}", user, s.curr_connects.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(out, "telemt_user_octets_from_client{{user=\"{}\"}} {}", user, s.octets_from_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(out, "telemt_user_octets_to_client{{user=\"{}\"}} {}", user, s.octets_to_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(out, "telemt_user_msgs_from_client{{user=\"{}\"}} {}", user, s.msgs_from_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed));
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_connections_total{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.connects.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_connections_current{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.curr_connects.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_octets_from_client{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.octets_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_octets_to_client{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.octets_to_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_msgs_from_client{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.msgs_from_client
|
||||
.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_msgs_to_client{{user=\"{}\"}} {}",
|
||||
user,
|
||||
s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed)
|
||||
);
|
||||
}
|
||||
|
||||
let ip_stats = ip_tracker.get_stats().await;
|
||||
@@ -1767,39 +2043,63 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
.get_recent_counts_for_users(&unique_users_vec)
|
||||
.await;
|
||||
|
||||
let _ = writeln!(out, "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_current gauge");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_unique_ips_recent_window Per-user unique IPs seen in configured observation window"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_recent_window gauge");
|
||||
let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_unique_ips_limit Effective per-user unique IP limit (0 means unlimited)"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge");
|
||||
let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)");
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)"
|
||||
);
|
||||
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge");
|
||||
|
||||
for user in unique_users {
|
||||
let current = ip_counts.get(&user).copied().unwrap_or(0);
|
||||
let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0);
|
||||
let limit = config
|
||||
.access
|
||||
.user_max_unique_ips
|
||||
.get(&user)
|
||||
.copied()
|
||||
.filter(|limit| *limit > 0)
|
||||
.or((config.access.user_max_unique_ips_global_each > 0)
|
||||
.then_some(config.access.user_max_unique_ips_global_each))
|
||||
.unwrap_or(0);
|
||||
let utilization = if limit > 0 {
|
||||
current as f64 / limit as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
let _ = writeln!(out, "telemt_user_unique_ips_current{{user=\"{}\"}} {}", user, current);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_unique_ips_current{{user=\"{}\"}} {}",
|
||||
user, current
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_unique_ips_recent_window{{user=\"{}\"}} {}",
|
||||
user,
|
||||
recent_counts.get(&user).copied().unwrap_or(0)
|
||||
);
|
||||
let _ = writeln!(out, "telemt_user_unique_ips_limit{{user=\"{}\"}} {}", user, limit);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_unique_ips_limit{{user=\"{}\"}} {}",
|
||||
user, limit
|
||||
);
|
||||
let _ = writeln!(
|
||||
out,
|
||||
"telemt_user_unique_ips_utilization{{user=\"{}\"}} {:.6}",
|
||||
user,
|
||||
utilization
|
||||
user, utilization
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1810,8 +2110,8 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::net::IpAddr;
|
||||
use http_body_util::BodyExt;
|
||||
use std::net::IpAddr;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_render_metrics_format() {
|
||||
@@ -1841,6 +2141,10 @@ mod tests {
|
||||
stats.increment_me_rpc_proxy_req_signal_response_total();
|
||||
stats.increment_me_rpc_proxy_req_signal_close_sent_total();
|
||||
stats.increment_me_idle_close_by_peer_total();
|
||||
stats.increment_relay_idle_soft_mark_total();
|
||||
stats.increment_relay_idle_hard_close_total();
|
||||
stats.increment_relay_pressure_evict_total();
|
||||
stats.increment_relay_protocol_desync_close_total();
|
||||
stats.increment_user_connects("alice");
|
||||
stats.increment_user_curr_connects("alice");
|
||||
stats.add_user_octets_from("alice", 1024);
|
||||
@@ -1862,13 +2166,10 @@ mod tests {
|
||||
assert!(output.contains("telemt_upstream_connect_success_total 1"));
|
||||
assert!(output.contains("telemt_upstream_connect_fail_total 1"));
|
||||
assert!(output.contains("telemt_upstream_connect_failfast_hard_error_total 1"));
|
||||
assert!(output.contains("telemt_upstream_connect_attempts_per_request{bucket=\"2\"} 1"));
|
||||
assert!(
|
||||
output.contains("telemt_upstream_connect_attempts_per_request{bucket=\"2\"} 1")
|
||||
);
|
||||
assert!(
|
||||
output.contains(
|
||||
"telemt_upstream_connect_duration_success_total{bucket=\"101_500ms\"} 1"
|
||||
)
|
||||
output
|
||||
.contains("telemt_upstream_connect_duration_success_total{bucket=\"101_500ms\"} 1")
|
||||
);
|
||||
assert!(
|
||||
output.contains("telemt_upstream_connect_duration_fail_total{bucket=\"gt_1000ms\"} 1")
|
||||
@@ -1879,6 +2180,10 @@ mod tests {
|
||||
assert!(output.contains("telemt_me_rpc_proxy_req_signal_response_total 1"));
|
||||
assert!(output.contains("telemt_me_rpc_proxy_req_signal_close_sent_total 1"));
|
||||
assert!(output.contains("telemt_me_idle_close_by_peer_total 1"));
|
||||
assert!(output.contains("telemt_relay_idle_soft_mark_total 1"));
|
||||
assert!(output.contains("telemt_relay_idle_hard_close_total 1"));
|
||||
assert!(output.contains("telemt_relay_pressure_evict_total 1"));
|
||||
assert!(output.contains("telemt_relay_protocol_desync_close_total 1"));
|
||||
assert!(output.contains("telemt_user_connections_total{user=\"alice\"} 1"));
|
||||
assert!(output.contains("telemt_user_connections_current{user=\"alice\"} 1"));
|
||||
assert!(output.contains("telemt_user_octets_from_client{user=\"alice\"} 1024"));
|
||||
@@ -1904,6 +2209,25 @@ mod tests {
|
||||
assert!(output.contains("telemt_user_unique_ips_recent_window{user="));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_render_uses_global_each_unique_ip_limit() {
|
||||
let stats = Stats::new();
|
||||
stats.increment_user_connects("alice");
|
||||
stats.increment_user_curr_connects("alice");
|
||||
let tracker = UserIpTracker::new();
|
||||
tracker
|
||||
.check_and_add("alice", "203.0.113.10".parse().unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_unique_ips_global_each = 2;
|
||||
|
||||
let output = render_metrics(&stats, &config, &tracker).await;
|
||||
|
||||
assert!(output.contains("telemt_user_unique_ips_limit{user=\"alice\"} 2"));
|
||||
assert!(output.contains("telemt_user_unique_ips_utilization{user=\"alice\"} 0.500000"));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_render_has_type_annotations() {
|
||||
let stats = Stats::new();
|
||||
@@ -1917,10 +2241,15 @@ mod tests {
|
||||
assert!(output.contains("# TYPE telemt_upstream_connect_attempt_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_idle_close_by_peer_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_idle_soft_mark_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_idle_hard_close_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_pressure_evict_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_relay_protocol_desync_close_total counter"));
|
||||
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
|
||||
assert!(output.contains(
|
||||
"# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge"
|
||||
));
|
||||
assert!(
|
||||
output
|
||||
.contains("# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge")
|
||||
);
|
||||
assert!(output.contains("# TYPE telemt_user_unique_ips_current gauge"));
|
||||
assert!(output.contains("# TYPE telemt_user_unique_ips_recent_window gauge"));
|
||||
assert!(output.contains("# TYPE telemt_user_unique_ips_limit gauge"));
|
||||
@@ -1937,14 +2266,17 @@ mod tests {
|
||||
stats.increment_connects_all();
|
||||
stats.increment_connects_all();
|
||||
|
||||
let req = Request::builder()
|
||||
.uri("/metrics")
|
||||
.body(())
|
||||
let req = Request::builder().uri("/metrics").body(()).unwrap();
|
||||
let resp = handle(req, &stats, &beobachten, &tracker, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
let resp = handle(req, &stats, &beobachten, &tracker, &config).await.unwrap();
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
let body = resp.into_body().collect().await.unwrap().to_bytes();
|
||||
assert!(std::str::from_utf8(body.as_ref()).unwrap().contains("telemt_connections_total 3"));
|
||||
assert!(
|
||||
std::str::from_utf8(body.as_ref())
|
||||
.unwrap()
|
||||
.contains("telemt_connections_total 3")
|
||||
);
|
||||
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = 10;
|
||||
@@ -1953,10 +2285,7 @@ mod tests {
|
||||
"203.0.113.10".parse::<IpAddr>().unwrap(),
|
||||
Duration::from_secs(600),
|
||||
);
|
||||
let req_beob = Request::builder()
|
||||
.uri("/beobachten")
|
||||
.body(())
|
||||
.unwrap();
|
||||
let req_beob = Request::builder().uri("/beobachten").body(()).unwrap();
|
||||
let resp_beob = handle(req_beob, &stats, &beobachten, &tracker, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -1966,10 +2295,7 @@ mod tests {
|
||||
assert!(beob_text.contains("[TLS-scanner]"));
|
||||
assert!(beob_text.contains("203.0.113.10-1"));
|
||||
|
||||
let req404 = Request::builder()
|
||||
.uri("/other")
|
||||
.body(())
|
||||
.unwrap();
|
||||
let req404 = Request::builder().uri("/other").body(()).unwrap();
|
||||
let resp404 = handle(req404, &stats, &beobachten, &tracker, &config)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -26,9 +26,7 @@ fn parse_ip_spec(ip_spec: &str) -> Result<IpAddr> {
|
||||
}
|
||||
|
||||
let ip = ip_spec.parse::<IpAddr>().map_err(|_| {
|
||||
ProxyError::Config(format!(
|
||||
"network.dns_overrides IP is invalid: '{ip_spec}'"
|
||||
))
|
||||
ProxyError::Config(format!("network.dns_overrides IP is invalid: '{ip_spec}'"))
|
||||
})?;
|
||||
if matches!(ip, IpAddr::V6(_)) {
|
||||
return Err(ProxyError::Config(format!(
|
||||
@@ -103,9 +101,9 @@ pub fn validate_entries(entries: &[String]) -> Result<()> {
|
||||
/// Replace runtime DNS overrides with a new validated snapshot.
|
||||
pub fn install_entries(entries: &[String]) -> Result<()> {
|
||||
let parsed = parse_entries(entries)?;
|
||||
let mut guard = overrides_store()
|
||||
.write()
|
||||
.map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
|
||||
let mut guard = overrides_store().write().map_err(|_| {
|
||||
ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string())
|
||||
})?;
|
||||
*guard = parsed;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
#![allow(dead_code)]
|
||||
#![allow(clippy::items_after_test_module)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
|
||||
@@ -10,7 +11,9 @@ use tracing::{debug, info, warn};
|
||||
|
||||
use crate::config::{NetworkConfig, UpstreamConfig, UpstreamType};
|
||||
use crate::error::Result;
|
||||
use crate::network::stun::{stun_probe_family_with_bind, DualStunResult, IpFamily, StunProbeResult};
|
||||
use crate::network::stun::{
|
||||
DualStunResult, IpFamily, StunProbeResult, stun_probe_family_with_bind,
|
||||
};
|
||||
use crate::transport::UpstreamManager;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
@@ -78,13 +81,8 @@ pub async fn run_probe(
|
||||
warn!("STUN probe is enabled but network.stun_servers is empty");
|
||||
DualStunResult::default()
|
||||
} else {
|
||||
probe_stun_servers_parallel(
|
||||
&servers,
|
||||
stun_nat_probe_concurrency.max(1),
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
probe_stun_servers_parallel(&servers, stun_nat_probe_concurrency.max(1), None, None)
|
||||
.await
|
||||
}
|
||||
} else if nat_probe {
|
||||
info!("STUN probe is disabled by network.stun_use=false");
|
||||
@@ -99,7 +97,8 @@ pub async fn run_probe(
|
||||
let UpstreamType::Direct {
|
||||
interface,
|
||||
bind_addresses,
|
||||
} = &upstream.upstream_type else {
|
||||
} = &upstream.upstream_type
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if let Some(addrs) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
|
||||
@@ -199,11 +198,10 @@ pub async fn run_probe(
|
||||
if nat_probe
|
||||
&& probe.reflected_ipv4.is_none()
|
||||
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
|
||||
&& let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await
|
||||
{
|
||||
if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
|
||||
info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
|
||||
}
|
||||
|
||||
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
|
||||
@@ -217,12 +215,20 @@ pub async fn run_probe(
|
||||
|
||||
probe.ipv4_usable = config.ipv4
|
||||
&& probe.detected_ipv4.is_some()
|
||||
&& (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
&& (!probe.ipv4_is_bogon
|
||||
|| probe
|
||||
.reflected_ipv4
|
||||
.map(|r| !is_bogon(r.ip()))
|
||||
.unwrap_or(false));
|
||||
|
||||
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
|
||||
probe.ipv6_usable = ipv6_enabled
|
||||
&& probe.detected_ipv6.is_some()
|
||||
&& (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
|
||||
&& (!probe.ipv6_is_bogon
|
||||
|| probe
|
||||
.reflected_ipv6
|
||||
.map(|r| !is_bogon(r.ip()))
|
||||
.unwrap_or(false));
|
||||
|
||||
Ok(probe)
|
||||
}
|
||||
@@ -280,8 +286,6 @@ async fn probe_stun_servers_parallel(
|
||||
while next_idx < servers.len() && join_set.len() < concurrency {
|
||||
let stun_addr = servers[next_idx].clone();
|
||||
next_idx += 1;
|
||||
let bind_v4 = bind_v4;
|
||||
let bind_v6 = bind_v6;
|
||||
join_set.spawn(async move {
|
||||
let res = timeout(STUN_BATCH_TIMEOUT, async {
|
||||
let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?;
|
||||
@@ -300,11 +304,15 @@ async fn probe_stun_servers_parallel(
|
||||
match task {
|
||||
Ok((stun_addr, Ok(Ok(result)))) => {
|
||||
if let Some(v4) = result.v4 {
|
||||
let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
|
||||
let entry = best_v4_by_ip
|
||||
.entry(v4.reflected_addr.ip())
|
||||
.or_insert((0, v4));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if let Some(v6) = result.v6 {
|
||||
let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
|
||||
let entry = best_v6_by_ip
|
||||
.entry(v6.reflected_addr.ip())
|
||||
.or_insert((0, v6));
|
||||
entry.0 += 1;
|
||||
}
|
||||
if result.v4.is_some() || result.v6.is_some() {
|
||||
@@ -324,17 +332,11 @@ async fn probe_stun_servers_parallel(
|
||||
}
|
||||
|
||||
let mut out = DualStunResult::default();
|
||||
if let Some((_, best)) = best_v4_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
if let Some((_, best)) = best_v4_by_ip.into_values().max_by_key(|(count, _)| *count) {
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v4 = Some(best);
|
||||
}
|
||||
if let Some((_, best)) = best_v6_by_ip
|
||||
.into_values()
|
||||
.max_by_key(|(count, _)| *count)
|
||||
{
|
||||
if let Some((_, best)) = best_v6_by_ip.into_values().max_by_key(|(count, _)| *count) {
|
||||
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
|
||||
out.v6 = Some(best);
|
||||
}
|
||||
@@ -347,7 +349,8 @@ pub fn decide_network_capabilities(
|
||||
middle_proxy_nat_ip: Option<IpAddr>,
|
||||
) -> NetworkDecision {
|
||||
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
|
||||
let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
let ipv6_dc =
|
||||
config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
|
||||
let nat_ip_v4 = matches!(middle_proxy_nat_ip, Some(IpAddr::V4(_)));
|
||||
let nat_ip_v6 = matches!(middle_proxy_nat_ip, Some(IpAddr::V6(_)));
|
||||
|
||||
@@ -534,10 +537,26 @@ pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
|
||||
|
||||
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
|
||||
info!(
|
||||
ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
|
||||
ipv4 = probe
|
||||
.detected_ipv4
|
||||
.as_ref()
|
||||
.map(|v| v.to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
ipv6 = probe
|
||||
.detected_ipv6
|
||||
.as_ref()
|
||||
.map(|v| v.to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
reflected_v4 = probe
|
||||
.reflected_ipv4
|
||||
.as_ref()
|
||||
.map(|v| v.ip().to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
reflected_v6 = probe
|
||||
.reflected_ipv6
|
||||
.as_ref()
|
||||
.map(|v| v.ip().to_string())
|
||||
.unwrap_or_else(|| "-".into()),
|
||||
ipv4_bogon = probe.ipv4_is_bogon,
|
||||
ipv6_bogon = probe.ipv6_is_bogon,
|
||||
ipv4_me = decision.ipv4_me,
|
||||
|
||||
@@ -2,13 +2,20 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::sync::OnceLock;
|
||||
|
||||
use tokio::net::{lookup_host, UdpSocket};
|
||||
use tokio::time::{timeout, Duration, sleep};
|
||||
use tokio::net::{UdpSocket, lookup_host};
|
||||
use tokio::time::{Duration, sleep, timeout};
|
||||
|
||||
use crate::crypto::SecureRandom;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::network::dns_overrides::{resolve, split_host_port};
|
||||
|
||||
fn stun_rng() -> &'static SecureRandom {
|
||||
static STUN_RNG: OnceLock<SecureRandom> = OnceLock::new();
|
||||
STUN_RNG.get_or_init(SecureRandom::new)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum IpFamily {
|
||||
V4,
|
||||
@@ -34,13 +41,13 @@ pub async fn stun_probe_dual(stun_addr: &str) -> Result<DualStunResult> {
|
||||
stun_probe_family(stun_addr, IpFamily::V6),
|
||||
);
|
||||
|
||||
Ok(DualStunResult {
|
||||
v4: v4?,
|
||||
v6: v6?,
|
||||
})
|
||||
Ok(DualStunResult { v4: v4?, v6: v6? })
|
||||
}
|
||||
|
||||
pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Option<StunProbeResult>> {
|
||||
pub async fn stun_probe_family(
|
||||
stun_addr: &str,
|
||||
family: IpFamily,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
stun_probe_family_with_bind(stun_addr, family, None).await
|
||||
}
|
||||
|
||||
@@ -49,8 +56,6 @@ pub async fn stun_probe_family_with_bind(
|
||||
family: IpFamily,
|
||||
bind_ip: Option<IpAddr>,
|
||||
) -> Result<Option<StunProbeResult>> {
|
||||
use rand::RngCore;
|
||||
|
||||
let bind_addr = match (family, bind_ip) {
|
||||
(IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0),
|
||||
(IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0),
|
||||
@@ -71,13 +76,18 @@ pub async fn stun_probe_family_with_bind(
|
||||
if let Some(addr) = target_addr {
|
||||
match socket.connect(addr).await {
|
||||
Ok(()) => {}
|
||||
Err(e) if family == IpFamily::V6 && matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) => return Ok(None),
|
||||
Err(e)
|
||||
if family == IpFamily::V6
|
||||
&& matches!(
|
||||
e.kind(),
|
||||
std::io::ErrorKind::NetworkUnreachable
|
||||
| std::io::ErrorKind::HostUnreachable
|
||||
| std::io::ErrorKind::Unsupported
|
||||
| std::io::ErrorKind::NetworkDown
|
||||
) =>
|
||||
{
|
||||
return Ok(None);
|
||||
}
|
||||
Err(e) => return Err(ProxyError::Proxy(format!("STUN connect failed: {e}"))),
|
||||
}
|
||||
} else {
|
||||
@@ -88,7 +98,7 @@ pub async fn stun_probe_family_with_bind(
|
||||
req[0..2].copy_from_slice(&0x0001u16.to_be_bytes()); // Binding Request
|
||||
req[2..4].copy_from_slice(&0u16.to_be_bytes()); // length
|
||||
req[4..8].copy_from_slice(&0x2112A442u32.to_be_bytes()); // magic cookie
|
||||
rand::rng().fill_bytes(&mut req[8..20]); // transaction ID
|
||||
stun_rng().fill(&mut req[8..20]); // transaction ID
|
||||
|
||||
let mut buf = [0u8; 256];
|
||||
let mut attempt = 0;
|
||||
@@ -120,16 +130,16 @@ pub async fn stun_probe_family_with_bind(
|
||||
|
||||
let magic = 0x2112A442u32.to_be_bytes();
|
||||
let txid = &req[8..20];
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
let mut idx = 20;
|
||||
while idx + 4 <= n {
|
||||
let atype = u16::from_be_bytes(buf[idx..idx + 2].try_into().unwrap());
|
||||
let alen = u16::from_be_bytes(buf[idx + 2..idx + 4].try_into().unwrap()) as usize;
|
||||
idx += 4;
|
||||
if idx + alen > n {
|
||||
break;
|
||||
}
|
||||
|
||||
match atype {
|
||||
match atype {
|
||||
0x0020 /* XOR-MAPPED-ADDRESS */ | 0x0001 /* MAPPED-ADDRESS */ => {
|
||||
if alen < 8 {
|
||||
break;
|
||||
@@ -198,9 +208,8 @@ pub async fn stun_probe_family_with_bind(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
|
||||
idx += (alen + 3) & !3;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
@@ -228,7 +237,11 @@ async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result<Option<S
|
||||
.await
|
||||
.map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?;
|
||||
|
||||
let target = addrs
|
||||
.find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6)));
|
||||
let target = addrs.find(|a| {
|
||||
matches!(
|
||||
(a.is_ipv4(), family),
|
||||
(true, IpFamily::V4) | (false, IpFamily::V6)
|
||||
)
|
||||
});
|
||||
Ok(target)
|
||||
}
|
||||
|
||||
@@ -33,35 +33,89 @@ pub static TG_DATACENTERS_V6: LazyLock<Vec<IpAddr>> = LazyLock::new(|| {
|
||||
|
||||
// ============= Middle Proxies (for advertising) =============
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V4: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
pub static TG_MIDDLE_PROXIES_V4: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)]);
|
||||
m.insert(2, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)]);
|
||||
m.insert(-2, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)]);
|
||||
m.insert(3, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)]);
|
||||
m.insert(-3, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)]);
|
||||
m.insert(
|
||||
1,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-1,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
2,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-2,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
3,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-3,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888)],
|
||||
);
|
||||
m.insert(4, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888)]);
|
||||
m.insert(-4, vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 165, 109)), 8888)]);
|
||||
m.insert(
|
||||
-4,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(149, 154, 165, 109)), 8888)],
|
||||
);
|
||||
m.insert(5, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)]);
|
||||
m.insert(-5, vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)]);
|
||||
m.insert(
|
||||
-5,
|
||||
vec![(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888)],
|
||||
);
|
||||
m
|
||||
});
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_V6: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
pub static TG_MIDDLE_PROXIES_V6: LazyLock<std::collections::HashMap<i32, Vec<(IpAddr, u16)>>> =
|
||||
LazyLock::new(|| {
|
||||
let mut m = std::collections::HashMap::new();
|
||||
m.insert(1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-1, vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)]);
|
||||
m.insert(2, vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)]);
|
||||
m.insert(-2, vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)]);
|
||||
m.insert(3, vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-3, vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)]);
|
||||
m.insert(4, vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-4, vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)]);
|
||||
m.insert(5, vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)]);
|
||||
m.insert(-5, vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)]);
|
||||
m.insert(
|
||||
1,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-1,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f001::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
2,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)],
|
||||
);
|
||||
m.insert(
|
||||
-2,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f002::d".parse().unwrap()), 80)],
|
||||
);
|
||||
m.insert(
|
||||
3,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-3,
|
||||
vec![(IpAddr::V6("2001:b28:f23d:f003::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
4,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-4,
|
||||
vec![(IpAddr::V6("2001:67c:04e8:f004::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
5,
|
||||
vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m.insert(
|
||||
-5,
|
||||
vec![(IpAddr::V6("2001:b28:f23f:f005::d".parse().unwrap()), 8888)],
|
||||
);
|
||||
m
|
||||
});
|
||||
|
||||
@@ -89,12 +143,12 @@ impl ProtoTag {
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Convert to 4 bytes (little-endian)
|
||||
pub fn to_bytes(self) -> [u8; 4] {
|
||||
(self as u32).to_le_bytes()
|
||||
}
|
||||
|
||||
|
||||
/// Get protocol tag as bytes slice
|
||||
pub fn as_bytes(&self) -> &'static [u8; 4] {
|
||||
match self {
|
||||
@@ -152,11 +206,29 @@ pub const TLS_RECORD_CHANGE_CIPHER: u8 = 0x14;
|
||||
pub const TLS_RECORD_APPLICATION: u8 = 0x17;
|
||||
/// TLS record type: Alert
|
||||
pub const TLS_RECORD_ALERT: u8 = 0x15;
|
||||
/// Maximum TLS record size
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = 16384;
|
||||
/// Maximum TLS chunk size (with overhead)
|
||||
/// RFC 8446 §5.2 allows up to 16384 + 256 bytes of ciphertext
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256;
|
||||
/// Maximum TLS plaintext record payload size.
|
||||
/// RFC 8446 §5.1: "The length MUST NOT exceed 2^14 bytes."
|
||||
/// Use this for validating incoming unencrypted records
|
||||
/// (ClientHello, ChangeCipherSpec, unprotected Handshake messages).
|
||||
pub const MAX_TLS_PLAINTEXT_SIZE: usize = 16_384;
|
||||
|
||||
/// Structural minimum for a valid TLS 1.3 ClientHello with SNI.
|
||||
/// Derived from RFC 8446 §4.1.2 field layout + Appendix D.4 compat mode.
|
||||
/// Deliberately conservative (below any real client) to avoid false
|
||||
/// positives on legitimate connections with compact extension sets.
|
||||
pub const MIN_TLS_CLIENT_HELLO_SIZE: usize = 100;
|
||||
|
||||
/// Maximum TLS ciphertext record payload size.
|
||||
/// RFC 8446 §5.2: "The length MUST NOT exceed 2^14 + 256 bytes."
|
||||
/// The +256 accounts for maximum AEAD expansion overhead.
|
||||
/// Use this for validating or sizing buffers for encrypted records.
|
||||
pub const MAX_TLS_CIPHERTEXT_SIZE: usize = 16_384 + 256;
|
||||
|
||||
#[deprecated(note = "use MAX_TLS_PLAINTEXT_SIZE")]
|
||||
pub const MAX_TLS_RECORD_SIZE: usize = MAX_TLS_PLAINTEXT_SIZE;
|
||||
|
||||
#[deprecated(note = "use MAX_TLS_CIPHERTEXT_SIZE")]
|
||||
pub const MAX_TLS_CHUNK_SIZE: usize = MAX_TLS_CIPHERTEXT_SIZE;
|
||||
|
||||
/// Secure Intermediate payload is expected to be 4-byte aligned.
|
||||
pub fn is_valid_secure_payload_len(data_len: usize) -> bool {
|
||||
@@ -204,9 +276,7 @@ pub const SMALL_BUFFER_SIZE: usize = 8192;
|
||||
// ============= Statistics =============
|
||||
|
||||
/// Duration buckets for histogram metrics
|
||||
pub static DURATION_BUCKETS: &[f64] = &[
|
||||
0.1, 0.5, 1.0, 2.0, 5.0, 15.0, 60.0, 300.0, 600.0, 1800.0,
|
||||
];
|
||||
pub static DURATION_BUCKETS: &[f64] = &[0.1, 0.5, 1.0, 2.0, 5.0, 15.0, 60.0, 300.0, 600.0, 1800.0];
|
||||
|
||||
// ============= Reserved Nonce Patterns =============
|
||||
|
||||
@@ -217,29 +287,27 @@ pub static RESERVED_NONCE_FIRST_BYTES: &[u8] = &[0xef];
|
||||
pub static RESERVED_NONCE_BEGINNINGS: &[[u8; 4]] = &[
|
||||
[0x48, 0x45, 0x41, 0x44], // HEAD
|
||||
[0x50, 0x4F, 0x53, 0x54], // POST
|
||||
[0x47, 0x45, 0x54, 0x20], // GET
|
||||
[0x47, 0x45, 0x54, 0x20], // GET
|
||||
[0xee, 0xee, 0xee, 0xee], // Intermediate
|
||||
[0xdd, 0xdd, 0xdd, 0xdd], // Secure
|
||||
[0x16, 0x03, 0x01, 0x02], // TLS
|
||||
];
|
||||
|
||||
/// Reserved continuation bytes (bytes 4-7)
|
||||
pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[
|
||||
[0x00, 0x00, 0x00, 0x00],
|
||||
];
|
||||
pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[[0x00, 0x00, 0x00, 0x00]];
|
||||
|
||||
// ============= RPC Constants (for Middle Proxy) =============
|
||||
|
||||
/// RPC Proxy Request
|
||||
/// RPC Flags (from Erlang mtp_rpc.erl)
|
||||
pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
pub const RPC_FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const RPC_FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const RPC_FLAG_PAD: u32 = 0x8000000;
|
||||
pub const RPC_FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const RPC_FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const RPC_FLAG_QUICKACK: u32 = 0x80000000;
|
||||
pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8;
|
||||
pub const RPC_FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const RPC_FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const RPC_FLAG_PAD: u32 = 0x8000000;
|
||||
pub const RPC_FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const RPC_FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const RPC_FLAG_QUICKACK: u32 = 0x80000000;
|
||||
|
||||
pub const RPC_PROXY_REQ: [u8; 4] = [0xee, 0xf1, 0xce, 0x36];
|
||||
/// RPC Proxy Answer
|
||||
@@ -267,63 +335,66 @@ pub mod rpc_flags {
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
// ============= Middle-End Proxy Servers =============
|
||||
pub const ME_PROXY_PORT: u16 = 8888;
|
||||
|
||||
// ============= Middle-End Proxy Servers =============
|
||||
pub const ME_PROXY_PORT: u16 = 8888;
|
||||
|
||||
pub static TG_MIDDLE_PROXIES_FLAT_V4: LazyLock<Vec<(IpAddr, u16)>> = LazyLock::new(|| {
|
||||
vec![
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888),
|
||||
]
|
||||
});
|
||||
|
||||
// ============= RPC Constants (u32 native endian) =============
|
||||
// From mtproto-common.h + net-tcp-rpc-common.h + mtproto-proxy.c
|
||||
|
||||
pub const RPC_NONCE_U32: u32 = 0x7acb87aa;
|
||||
pub const RPC_HANDSHAKE_U32: u32 = 0x7682eef5;
|
||||
pub const RPC_HANDSHAKE_ERROR_U32: u32 = 0x6a27beda;
|
||||
pub const TL_PROXY_TAG_U32: u32 = 0xdb1e26ae; // mtproto-proxy.c:121
|
||||
|
||||
// mtproto-common.h
|
||||
pub const RPC_PROXY_REQ_U32: u32 = 0x36cef1ee;
|
||||
pub const RPC_PROXY_ANS_U32: u32 = 0x4403da0d;
|
||||
pub const RPC_CLOSE_CONN_U32: u32 = 0x1fcf425d;
|
||||
pub const RPC_CLOSE_EXT_U32: u32 = 0x5eb634a2;
|
||||
pub const RPC_SIMPLE_ACK_U32: u32 = 0x3bac409b;
|
||||
pub const RPC_PING_U32: u32 = 0x5730a2df;
|
||||
pub const RPC_PONG_U32: u32 = 0x8430eaa7;
|
||||
|
||||
pub const RPC_CRYPTO_NONE_U32: u32 = 0;
|
||||
pub const RPC_CRYPTO_AES_U32: u32 = 1;
|
||||
|
||||
pub mod proxy_flags {
|
||||
pub const FLAG_HAS_AD_TAG: u32 = 1;
|
||||
pub const FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const FLAG_HAS_AD_TAG2: u32 = 0x8;
|
||||
pub const FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const FLAG_PAD: u32 = 0x8000000;
|
||||
pub const FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
pub static TG_MIDDLE_PROXIES_FLAT_V4: LazyLock<Vec<(IpAddr, u16)>> = LazyLock::new(|| {
|
||||
vec![
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 50)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 161, 144)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(149, 154, 175, 100)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 4, 136)), 8888),
|
||||
(IpAddr::V4(Ipv4Addr::new(91, 108, 56, 183)), 8888),
|
||||
]
|
||||
});
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
#[cfg(test)]
|
||||
// ============= RPC Constants (u32 native endian) =============
|
||||
// From mtproto-common.h + net-tcp-rpc-common.h + mtproto-proxy.c
|
||||
|
||||
pub const RPC_NONCE_U32: u32 = 0x7acb87aa;
|
||||
pub const RPC_HANDSHAKE_U32: u32 = 0x7682eef5;
|
||||
pub const RPC_HANDSHAKE_ERROR_U32: u32 = 0x6a27beda;
|
||||
pub const TL_PROXY_TAG_U32: u32 = 0xdb1e26ae; // mtproto-proxy.c:121
|
||||
|
||||
// mtproto-common.h
|
||||
pub const RPC_PROXY_REQ_U32: u32 = 0x36cef1ee;
|
||||
pub const RPC_PROXY_ANS_U32: u32 = 0x4403da0d;
|
||||
pub const RPC_CLOSE_CONN_U32: u32 = 0x1fcf425d;
|
||||
pub const RPC_CLOSE_EXT_U32: u32 = 0x5eb634a2;
|
||||
pub const RPC_SIMPLE_ACK_U32: u32 = 0x3bac409b;
|
||||
pub const RPC_PING_U32: u32 = 0x5730a2df;
|
||||
pub const RPC_PONG_U32: u32 = 0x8430eaa7;
|
||||
|
||||
pub const RPC_CRYPTO_NONE_U32: u32 = 0;
|
||||
pub const RPC_CRYPTO_AES_U32: u32 = 1;
|
||||
|
||||
pub mod proxy_flags {
|
||||
pub const FLAG_HAS_AD_TAG: u32 = 1;
|
||||
pub const FLAG_NOT_ENCRYPTED: u32 = 0x2;
|
||||
pub const FLAG_HAS_AD_TAG2: u32 = 0x8;
|
||||
pub const FLAG_MAGIC: u32 = 0x1000;
|
||||
pub const FLAG_EXTMODE2: u32 = 0x20000;
|
||||
pub const FLAG_PAD: u32 = 0x8000000;
|
||||
pub const FLAG_INTERMEDIATE: u32 = 0x20000000;
|
||||
pub const FLAG_ABRIDGED: u32 = 0x40000000;
|
||||
pub const FLAG_QUICKACK: u32 = 0x80000000;
|
||||
}
|
||||
|
||||
pub mod rpc_crypto_flags {
|
||||
pub const USE_CRC32C: u32 = 0x800;
|
||||
}
|
||||
|
||||
pub const ME_CONNECT_TIMEOUT_SECS: u64 = 5;
|
||||
pub const ME_HANDSHAKE_TIMEOUT_SECS: u64 = 10;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/tls_size_constants_security_tests.rs"]
|
||||
mod tls_size_constants_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proto_tag_roundtrip() {
|
||||
for tag in [ProtoTag::Abridged, ProtoTag::Intermediate, ProtoTag::Secure] {
|
||||
@@ -332,20 +403,20 @@ mod tests {
|
||||
assert_eq!(tag, parsed);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_proto_tag_values() {
|
||||
assert_eq!(ProtoTag::Abridged.to_bytes(), PROTO_TAG_ABRIDGED);
|
||||
assert_eq!(ProtoTag::Intermediate.to_bytes(), PROTO_TAG_INTERMEDIATE);
|
||||
assert_eq!(ProtoTag::Secure.to_bytes(), PROTO_TAG_SECURE);
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_invalid_proto_tag() {
|
||||
assert!(ProtoTag::from_bytes([0, 0, 0, 0]).is_none());
|
||||
assert!(ProtoTag::from_bytes([0xff, 0xff, 0xff, 0xff]).is_none());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_datacenters_count() {
|
||||
assert_eq!(TG_DATACENTERS_V4.len(), 5);
|
||||
|
||||
@@ -22,7 +22,7 @@ impl FrameExtra {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
|
||||
/// Create with quickack flag set
|
||||
pub fn with_quickack() -> Self {
|
||||
Self {
|
||||
@@ -30,7 +30,7 @@ impl FrameExtra {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Create with simple_ack flag set
|
||||
pub fn with_simple_ack() -> Self {
|
||||
Self {
|
||||
@@ -38,7 +38,7 @@ impl FrameExtra {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// Check if any flags are set
|
||||
pub fn has_flags(&self) -> bool {
|
||||
self.quickack || self.simple_ack || self.skip_send
|
||||
@@ -76,22 +76,22 @@ impl FrameMode {
|
||||
FrameMode::Abridged => 4,
|
||||
FrameMode::Intermediate => 4,
|
||||
FrameMode::SecureIntermediate => 4 + 3, // length + padding
|
||||
FrameMode::Full => 12 + 16, // header + max CBC padding
|
||||
FrameMode::Full => 12 + 16, // header + max CBC padding
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate message length for MTProto
|
||||
pub fn validate_message_length(len: usize) -> bool {
|
||||
use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
use super::constants::{MAX_MSG_LEN, MIN_MSG_LEN, PADDING_FILLER};
|
||||
|
||||
(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_frame_extra_default() {
|
||||
let extra = FrameExtra::default();
|
||||
@@ -100,18 +100,18 @@ mod tests {
|
||||
assert!(!extra.skip_send);
|
||||
assert!(!extra.has_flags());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_frame_extra_flags() {
|
||||
let extra = FrameExtra::with_quickack();
|
||||
assert!(extra.quickack);
|
||||
assert!(extra.has_flags());
|
||||
|
||||
|
||||
let extra = FrameExtra::with_simple_ack();
|
||||
assert!(extra.simple_ack);
|
||||
assert!(extra.has_flags());
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_validate_message_length() {
|
||||
assert!(validate_message_length(12)); // MIN_MSG_LEN
|
||||
@@ -119,4 +119,4 @@ mod tests {
|
||||
assert!(!validate_message_length(8)); // Too small
|
||||
assert!(!validate_message_length(13)); // Not aligned to 4
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,4 +12,4 @@ pub use frame::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use obfuscation::*;
|
||||
#[allow(unused_imports)]
|
||||
pub use tls::*;
|
||||
pub use tls::*;
|
||||
|
||||
@@ -2,9 +2,9 @@
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
use crate::crypto::{sha256, AesCtr};
|
||||
use super::constants::*;
|
||||
use crate::crypto::{AesCtr, sha256};
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// Obfuscation parameters from handshake
|
||||
///
|
||||
@@ -44,41 +44,40 @@ impl ObfuscationParams {
|
||||
let dec_prekey_iv = &handshake[SKIP_LEN..SKIP_LEN + PREKEY_LEN + IV_LEN];
|
||||
let dec_prekey = &dec_prekey_iv[..PREKEY_LEN];
|
||||
let dec_iv_bytes = &dec_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
let enc_prekey_iv: Vec<u8> = dec_prekey_iv.iter().rev().copied().collect();
|
||||
let enc_prekey = &enc_prekey_iv[..PREKEY_LEN];
|
||||
let enc_iv_bytes = &enc_prekey_iv[PREKEY_LEN..];
|
||||
|
||||
|
||||
for (username, secret) in secrets {
|
||||
let mut dec_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
dec_key_input.extend_from_slice(dec_prekey);
|
||||
dec_key_input.extend_from_slice(secret);
|
||||
let decrypt_key = sha256(&dec_key_input);
|
||||
|
||||
|
||||
let decrypt_iv = u128::from_be_bytes(dec_iv_bytes.try_into().unwrap());
|
||||
|
||||
|
||||
let mut decryptor = AesCtr::new(&decrypt_key, decrypt_iv);
|
||||
let decrypted = decryptor.decrypt(handshake);
|
||||
|
||||
|
||||
let tag_bytes: [u8; 4] = decrypted[PROTO_TAG_POS..PROTO_TAG_POS + 4]
|
||||
.try_into()
|
||||
.unwrap();
|
||||
|
||||
|
||||
let proto_tag = match ProtoTag::from_bytes(tag_bytes) {
|
||||
Some(tag) => tag,
|
||||
None => continue,
|
||||
};
|
||||
|
||||
let dc_idx = i16::from_le_bytes(
|
||||
decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap()
|
||||
);
|
||||
|
||||
|
||||
let dc_idx =
|
||||
i16::from_le_bytes(decrypted[DC_IDX_POS..DC_IDX_POS + 2].try_into().unwrap());
|
||||
|
||||
let mut enc_key_input = Vec::with_capacity(PREKEY_LEN + secret.len());
|
||||
enc_key_input.extend_from_slice(enc_prekey);
|
||||
enc_key_input.extend_from_slice(secret);
|
||||
let encrypt_key = sha256(&enc_key_input);
|
||||
let encrypt_iv = u128::from_be_bytes(enc_iv_bytes.try_into().unwrap());
|
||||
|
||||
|
||||
return Some((
|
||||
ObfuscationParams {
|
||||
decrypt_key,
|
||||
@@ -91,20 +90,20 @@ impl ObfuscationParams {
|
||||
username.clone(),
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
|
||||
/// Create AES-CTR decryptor for client -> proxy direction
|
||||
pub fn create_decryptor(&self) -> AesCtr {
|
||||
AesCtr::new(&self.decrypt_key, self.decrypt_iv)
|
||||
}
|
||||
|
||||
|
||||
/// Create AES-CTR encryptor for proxy -> client direction
|
||||
pub fn create_encryptor(&self) -> AesCtr {
|
||||
AesCtr::new(&self.encrypt_key, self.encrypt_iv)
|
||||
}
|
||||
|
||||
|
||||
/// Get the combined encrypt key and IV for fast mode
|
||||
pub fn enc_key_iv(&self) -> Vec<u8> {
|
||||
let mut result = Vec::with_capacity(KEY_LEN + IV_LEN);
|
||||
@@ -120,7 +119,7 @@ pub fn generate_nonce<R: FnMut(usize) -> Vec<u8>>(mut random_bytes: R) -> [u8; H
|
||||
let nonce_vec = random_bytes(HANDSHAKE_LEN);
|
||||
let mut nonce = [0u8; HANDSHAKE_LEN];
|
||||
nonce.copy_from_slice(&nonce_vec);
|
||||
|
||||
|
||||
if is_valid_nonce(&nonce) {
|
||||
return nonce;
|
||||
}
|
||||
@@ -132,17 +131,17 @@ pub fn is_valid_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> bool {
|
||||
if RESERVED_NONCE_FIRST_BYTES.contains(&nonce[0]) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
let first_four: [u8; 4] = nonce[..4].try_into().unwrap();
|
||||
if RESERVED_NONCE_BEGINNINGS.contains(&first_four) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
let continue_four: [u8; 4] = nonce[4..8].try_into().unwrap();
|
||||
if RESERVED_NONCE_CONTINUES.contains(&continue_four) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
@@ -153,7 +152,7 @@ pub fn prepare_tg_nonce(
|
||||
enc_key_iv: Option<&[u8]>,
|
||||
) {
|
||||
nonce[PROTO_TAG_POS..PROTO_TAG_POS + 4].copy_from_slice(&proto_tag.to_bytes());
|
||||
|
||||
|
||||
if let Some(key_iv) = enc_key_iv {
|
||||
let reversed: Vec<u8> = key_iv.iter().rev().copied().collect();
|
||||
nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN].copy_from_slice(&reversed);
|
||||
@@ -171,39 +170,39 @@ pub fn encrypt_nonce(nonce: &[u8; HANDSHAKE_LEN]) -> Vec<u8> {
|
||||
let key_iv = &nonce[SKIP_LEN..SKIP_LEN + KEY_LEN + IV_LEN];
|
||||
let enc_key = sha256(key_iv);
|
||||
let enc_iv = u128::from_be_bytes(key_iv[..IV_LEN].try_into().unwrap());
|
||||
|
||||
|
||||
let mut encryptor = AesCtr::new(&enc_key, enc_iv);
|
||||
|
||||
|
||||
let mut result = nonce.to_vec();
|
||||
let encrypted_part = encryptor.encrypt(&nonce[PROTO_TAG_POS..]);
|
||||
result[PROTO_TAG_POS..].copy_from_slice(&encrypted_part);
|
||||
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_nonce() {
|
||||
let mut valid = [0x42u8; HANDSHAKE_LEN];
|
||||
valid[4..8].copy_from_slice(&[1, 2, 3, 4]);
|
||||
assert!(is_valid_nonce(&valid));
|
||||
|
||||
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[0] = 0xef;
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
|
||||
let mut invalid = [0x00u8; HANDSHAKE_LEN];
|
||||
invalid[..4].copy_from_slice(b"HEAD");
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
|
||||
|
||||
let mut invalid = [0x42u8; HANDSHAKE_LEN];
|
||||
invalid[4..8].copy_from_slice(&[0, 0, 0, 0]);
|
||||
assert!(!is_valid_nonce(&invalid));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_generate_nonce() {
|
||||
let mut counter = 0u8;
|
||||
@@ -211,7 +210,7 @@ mod tests {
|
||||
counter = counter.wrapping_add(1);
|
||||
vec![counter; n]
|
||||
});
|
||||
|
||||
|
||||
assert!(is_valid_nonce(&nonce));
|
||||
assert_eq!(nonce.len(), HANDSHAKE_LEN);
|
||||
}
|
||||
|
||||
358
src/protocol/tests/tls_adversarial_tests.rs
Normal file
358
src/protocol/tests/tls_adversarial_tests.rs
Normal file
@@ -0,0 +1,358 @@
|
||||
use super::*;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use std::time::Instant;
|
||||
|
||||
/// Helper to create a byte vector of specific length.
|
||||
fn make_garbage(len: usize) -> Vec<u8> {
|
||||
vec![0x42u8; len]
|
||||
}
|
||||
|
||||
/// Helper to create a valid-looking HMAC digest for test.
|
||||
fn make_digest(secret: &[u8], msg: &[u8], ts: u32) -> [u8; 32] {
|
||||
let mut hmac = sha256_hmac(secret, msg);
|
||||
let ts_bytes = ts.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
hmac[28 + i] ^= ts_bytes[i];
|
||||
}
|
||||
hmac
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake_with_session_id(
|
||||
secret: &[u8],
|
||||
timestamp: u32,
|
||||
session_id: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let session_id_len = session_id.len();
|
||||
let len = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
|
||||
handshake[TLS_DIGEST_POS + TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
let sid_start = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1;
|
||||
handshake[sid_start..sid_start + session_id_len].copy_from_slice(session_id);
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let digest = make_digest(secret, &handshake, timestamp);
|
||||
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_tls_handshake(secret: &[u8], timestamp: u32) -> Vec<u8> {
|
||||
make_valid_tls_handshake_with_session_id(secret, timestamp, &[0x42; 32])
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Truncated Packet Tests (OWASP ASVS 5.1.4, 5.1.5)
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_10_bytes_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
let truncated = make_garbage(10);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_at_digest_start_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
// TLS_DIGEST_POS = 11. 11 bytes should be rejected.
|
||||
let truncated = make_garbage(TLS_DIGEST_POS);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_truncated_inside_digest_rejected() {
|
||||
let secrets = vec![("user".to_string(), b"secret".to_vec())];
|
||||
// TLS_DIGEST_POS + 16 (half digest)
|
||||
let truncated = make_garbage(TLS_DIGEST_POS + 16);
|
||||
assert!(validate_tls_handshake(&truncated, &secrets, true).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_truncated_at_record_header_rejected() {
|
||||
let truncated = make_garbage(3);
|
||||
assert!(extract_sni_from_client_hello(&truncated).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_truncated_at_handshake_header_rejected() {
|
||||
let mut truncated = vec![TLS_RECORD_HANDSHAKE, 0x03, 0x03, 0x00, 0x05];
|
||||
truncated.extend_from_slice(&[0x01, 0x00]); // ClientHello type but truncated length
|
||||
assert!(extract_sni_from_client_hello(&truncated).is_none());
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Malformed Extension Parsing Tests
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_overlapping_extension_lengths_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // Handshake type: ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]); // Length: 92
|
||||
h.extend_from_slice(&[0x03, 0x03]); // Version
|
||||
h.extend_from_slice(&[0u8; 32]); // Random
|
||||
h.push(0); // Session ID length: 0
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]); // Cipher suites
|
||||
h.extend_from_slice(&[0x01, 0x00]); // Compression
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0x00, 0x20]); // Total Extensions length: 32
|
||||
|
||||
// Extension 1: SNI (type 0)
|
||||
h.extend_from_slice(&[0x00, 0x00]);
|
||||
h.extend_from_slice(&[0x00, 0x40]); // Claimed len: 64 (OVERFLOWS total extensions len 32)
|
||||
h.extend_from_slice(&[0u8; 64]);
|
||||
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_infinite_loop_potential_extension_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // Handshake type: ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]); // Length: 92
|
||||
h.extend_from_slice(&[0x03, 0x03]); // Version
|
||||
h.extend_from_slice(&[0u8; 32]); // Random
|
||||
h.push(0); // Session ID length: 0
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]); // Cipher suites
|
||||
h.extend_from_slice(&[0x01, 0x00]); // Compression
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0x00, 0x10]); // Total Extensions length: 16
|
||||
|
||||
// Extension: zero length but claims more?
|
||||
// If our parser didn't advance, it might loop.
|
||||
// Telemt uses `pos += 4 + elen;` so it always advances.
|
||||
h.extend_from_slice(&[0x12, 0x34]); // Unknown type
|
||||
h.extend_from_slice(&[0x00, 0x00]); // Length 0
|
||||
|
||||
// Fill the rest with garbage
|
||||
h.extend_from_slice(&[0x42; 12]);
|
||||
|
||||
// We expect it to finish without SNI found
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_invalid_hostname_rejected() {
|
||||
let host = b"invalid_host!%^";
|
||||
let mut sni = Vec::new();
|
||||
sni.extend_from_slice(&((host.len() + 3) as u16).to_be_bytes());
|
||||
sni.push(0);
|
||||
sni.extend_from_slice(&(host.len() as u16).to_be_bytes());
|
||||
sni.extend_from_slice(host);
|
||||
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x60]; // Record header
|
||||
h.push(0x01); // ClientHello
|
||||
h.extend_from_slice(&[0x00, 0x00, 0x5C]);
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]);
|
||||
h.extend_from_slice(&[0x01, 0x00]);
|
||||
|
||||
let mut ext = Vec::new();
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni);
|
||||
|
||||
h.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&ext);
|
||||
|
||||
assert!(
|
||||
extract_sni_from_client_hello(&h).is_none(),
|
||||
"Invalid SNI hostname must be rejected"
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Timing Neutrality Tests (OWASP ASVS 5.1.7)
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_timing_neutrality() {
|
||||
let secret = b"timing_test_secret_32_bytes_long_";
|
||||
let secrets = vec![("u".to_string(), secret.to_vec())];
|
||||
|
||||
let mut base = vec![0x42u8; 100];
|
||||
base[TLS_DIGEST_POS + TLS_DIGEST_LEN] = 32;
|
||||
|
||||
const ITER: usize = 600;
|
||||
const ROUNDS: usize = 7;
|
||||
|
||||
let mut per_round_avg_diff_ns = Vec::with_capacity(ROUNDS);
|
||||
|
||||
for round in 0..ROUNDS {
|
||||
let mut success_h = base.clone();
|
||||
let mut fail_h = base.clone();
|
||||
|
||||
let start_success = Instant::now();
|
||||
for _ in 0..ITER {
|
||||
let digest = make_digest(secret, &success_h, 0);
|
||||
success_h[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
let _ = validate_tls_handshake_at_time(&success_h, &secrets, true, 0);
|
||||
}
|
||||
let success_elapsed = start_success.elapsed();
|
||||
|
||||
let start_fail = Instant::now();
|
||||
for i in 0..ITER {
|
||||
let mut digest = make_digest(secret, &fail_h, 0);
|
||||
let flip_idx = (i + round) % (TLS_DIGEST_LEN - 4);
|
||||
digest[flip_idx] ^= 0xFF;
|
||||
fail_h[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
let _ = validate_tls_handshake_at_time(&fail_h, &secrets, true, 0);
|
||||
}
|
||||
let fail_elapsed = start_fail.elapsed();
|
||||
|
||||
let diff = if success_elapsed > fail_elapsed {
|
||||
success_elapsed - fail_elapsed
|
||||
} else {
|
||||
fail_elapsed - success_elapsed
|
||||
};
|
||||
per_round_avg_diff_ns.push(diff.as_nanos() as f64 / ITER as f64);
|
||||
}
|
||||
|
||||
per_round_avg_diff_ns.sort_by(|a, b| a.partial_cmp(b).unwrap());
|
||||
let median_avg_diff_ns = per_round_avg_diff_ns[ROUNDS / 2];
|
||||
|
||||
// Keep this as a coarse side-channel guard only; noisy shared CI hosts can
|
||||
// introduce microsecond-level jitter that should not fail deterministic suites.
|
||||
assert!(
|
||||
median_avg_diff_ns < 50_000.0,
|
||||
"Median timing delta too large: {} ns/iter",
|
||||
median_avg_diff_ns
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Adversarial Fingerprinting / Active Probing Tests
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[test]
|
||||
fn is_tls_handshake_robustness_against_probing() {
|
||||
// Valid TLS 1.0 ClientHello
|
||||
assert!(is_tls_handshake(&[0x16, 0x03, 0x01]));
|
||||
// Valid TLS 1.2/1.3 ClientHello (Legacy Record Layer)
|
||||
assert!(is_tls_handshake(&[0x16, 0x03, 0x03]));
|
||||
|
||||
// Invalid record type but matching version
|
||||
assert!(!is_tls_handshake(&[0x17, 0x03, 0x03]));
|
||||
// Plaintext HTTP request
|
||||
assert!(!is_tls_handshake(b"GET / HTTP/1.1"));
|
||||
// Short garbage
|
||||
assert!(!is_tls_handshake(&[0x16, 0x03]));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_tls_handshake_at_time_strict_boundary() {
|
||||
let secret = b"strict_boundary_secret_32_bytes_";
|
||||
let secrets = vec![("u".to_string(), secret.to_vec())];
|
||||
let now: i64 = 1_000_000_000;
|
||||
|
||||
// Boundary: exactly TIME_SKEW_MAX (120s past)
|
||||
let ts_past = (now - TIME_SKEW_MAX) as u32;
|
||||
let h = make_valid_tls_handshake_with_session_id(secret, ts_past, &[0x42; 32]);
|
||||
assert!(validate_tls_handshake_at_time(&h, &secrets, false, now).is_some());
|
||||
|
||||
// Boundary + 1s: should be rejected
|
||||
let ts_too_past = (now - TIME_SKEW_MAX - 1) as u32;
|
||||
let h2 = make_valid_tls_handshake_with_session_id(secret, ts_too_past, &[0x42; 32]);
|
||||
assert!(validate_tls_handshake_at_time(&h2, &secrets, false, now).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_duplicate_extensions_rejected() {
|
||||
// Construct a ClientHello with TWO SNI extensions
|
||||
let host1 = b"first.com";
|
||||
let mut sni1 = Vec::new();
|
||||
sni1.extend_from_slice(&((host1.len() + 3) as u16).to_be_bytes());
|
||||
sni1.push(0);
|
||||
sni1.extend_from_slice(&(host1.len() as u16).to_be_bytes());
|
||||
sni1.extend_from_slice(host1);
|
||||
|
||||
let host2 = b"second.com";
|
||||
let mut sni2 = Vec::new();
|
||||
sni2.extend_from_slice(&((host2.len() + 3) as u16).to_be_bytes());
|
||||
sni2.push(0);
|
||||
sni2.extend_from_slice(&(host2.len() as u16).to_be_bytes());
|
||||
sni2.extend_from_slice(host2);
|
||||
|
||||
let mut ext = Vec::new();
|
||||
// Ext 1: SNI
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni1.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni1);
|
||||
// Ext 2: SNI again
|
||||
ext.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext.extend_from_slice(&(sni2.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&sni2);
|
||||
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&[0x03, 0x03]);
|
||||
body.extend_from_slice(&[0u8; 32]);
|
||||
body.push(0);
|
||||
body.extend_from_slice(&[0x00, 0x02, 0x13, 0x01]);
|
||||
body.extend_from_slice(&[0x01, 0x00]);
|
||||
body.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01);
|
||||
let body_len = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&body_len[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut h = Vec::new();
|
||||
h.push(0x16);
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&handshake);
|
||||
|
||||
// Duplicate SNI extensions are ambiguous and must fail closed.
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_alpn_with_malformed_list_rejected() {
|
||||
let mut alpn_payload = Vec::new();
|
||||
alpn_payload.extend_from_slice(&0x0005u16.to_be_bytes()); // Total len 5
|
||||
alpn_payload.push(10); // Labeled len 10 (OVERFLOWS total 5)
|
||||
alpn_payload.extend_from_slice(b"h2");
|
||||
|
||||
let mut ext = Vec::new();
|
||||
ext.extend_from_slice(&0x0010u16.to_be_bytes()); // Type: ALPN (16)
|
||||
ext.extend_from_slice(&(alpn_payload.len() as u16).to_be_bytes());
|
||||
ext.extend_from_slice(&alpn_payload);
|
||||
|
||||
let mut h = vec![
|
||||
0x16, 0x03, 0x03, 0x00, 0x40, 0x01, 0x00, 0x00, 0x3C, 0x03, 0x03,
|
||||
];
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01, 0x01, 0x00]);
|
||||
h.extend_from_slice(&(ext.len() as u16).to_be_bytes());
|
||||
h.extend_from_slice(&ext);
|
||||
|
||||
let res = extract_alpn_from_client_hello(&h);
|
||||
assert!(
|
||||
res.is_empty(),
|
||||
"Malformed ALPN list must return empty or fail"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extract_sni_with_huge_extension_header_rejected() {
|
||||
let mut h = vec![0x16, 0x03, 0x03, 0x00, 0x00]; // Record header
|
||||
h.push(0x01); // ClientHello
|
||||
h.extend_from_slice(&[0x00, 0xFF, 0xFF]); // Huge length (65535) - overflows record
|
||||
h.extend_from_slice(&[0x03, 0x03]);
|
||||
h.extend_from_slice(&[0u8; 32]);
|
||||
h.push(0);
|
||||
h.extend_from_slice(&[0x00, 0x02, 0x13, 0x01, 0x01, 0x00]);
|
||||
|
||||
// Extensions start
|
||||
h.extend_from_slice(&[0xFF, 0xFF]); // Total extensions: 65535 (OVERFLOWS everything)
|
||||
|
||||
assert!(extract_sni_from_client_hello(&h).is_none());
|
||||
}
|
||||
210
src/protocol/tests/tls_fuzz_security_tests.rs
Normal file
210
src/protocol/tests/tls_fuzz_security_tests.rs
Normal file
@@ -0,0 +1,210 @@
|
||||
use super::*;
|
||||
use crate::crypto::sha256_hmac;
|
||||
use std::panic::catch_unwind;
|
||||
|
||||
fn make_valid_tls_handshake_with_session_id(
|
||||
secret: &[u8],
|
||||
timestamp: u32,
|
||||
session_id: &[u8],
|
||||
) -> Vec<u8> {
|
||||
let session_id_len = session_id.len();
|
||||
assert!(session_id_len <= u8::MAX as usize);
|
||||
|
||||
let len = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1 + session_id_len;
|
||||
let mut handshake = vec![0x42u8; len];
|
||||
handshake[TLS_DIGEST_POS + TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
let sid_start = TLS_DIGEST_POS + TLS_DIGEST_LEN + 1;
|
||||
handshake[sid_start..sid_start + session_id_len].copy_from_slice(session_id);
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].fill(0);
|
||||
|
||||
let mut digest = sha256_hmac(secret, &handshake);
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for idx in 0..4 {
|
||||
digest[28 + idx] ^= ts[idx];
|
||||
}
|
||||
|
||||
handshake[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn make_valid_client_hello_record(host: &str, alpn_protocols: &[&[u8]]) -> Vec<u8> {
|
||||
let mut body = Vec::new();
|
||||
body.extend_from_slice(&TLS_VERSION);
|
||||
body.extend_from_slice(&[0u8; 32]);
|
||||
body.push(0);
|
||||
body.extend_from_slice(&2u16.to_be_bytes());
|
||||
body.extend_from_slice(&[0x13, 0x01]);
|
||||
body.push(1);
|
||||
body.push(0);
|
||||
|
||||
let mut ext_blob = Vec::new();
|
||||
|
||||
let host_bytes = host.as_bytes();
|
||||
let mut sni_payload = Vec::new();
|
||||
sni_payload.extend_from_slice(&((host_bytes.len() + 3) as u16).to_be_bytes());
|
||||
sni_payload.push(0);
|
||||
sni_payload.extend_from_slice(&(host_bytes.len() as u16).to_be_bytes());
|
||||
sni_payload.extend_from_slice(host_bytes);
|
||||
ext_blob.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(sni_payload.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&sni_payload);
|
||||
|
||||
if !alpn_protocols.is_empty() {
|
||||
let mut alpn_list = Vec::new();
|
||||
for proto in alpn_protocols {
|
||||
alpn_list.push(proto.len() as u8);
|
||||
alpn_list.extend_from_slice(proto);
|
||||
}
|
||||
let mut alpn_data = Vec::new();
|
||||
alpn_data.extend_from_slice(&(alpn_list.len() as u16).to_be_bytes());
|
||||
alpn_data.extend_from_slice(&alpn_list);
|
||||
|
||||
ext_blob.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||
ext_blob.extend_from_slice(&(alpn_data.len() as u16).to_be_bytes());
|
||||
ext_blob.extend_from_slice(&alpn_data);
|
||||
}
|
||||
|
||||
body.extend_from_slice(&(ext_blob.len() as u16).to_be_bytes());
|
||||
body.extend_from_slice(&ext_blob);
|
||||
|
||||
let mut handshake = Vec::new();
|
||||
handshake.push(0x01);
|
||||
let body_len = (body.len() as u32).to_be_bytes();
|
||||
handshake.extend_from_slice(&body_len[1..4]);
|
||||
handshake.extend_from_slice(&body);
|
||||
|
||||
let mut record = Vec::new();
|
||||
record.push(TLS_RECORD_HANDSHAKE);
|
||||
record.extend_from_slice(&[0x03, 0x01]);
|
||||
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(&handshake);
|
||||
record
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn client_hello_fuzz_corpus_never_panics_or_accepts_corruption() {
|
||||
let valid = make_valid_client_hello_record("example.com", &[b"h2", b"http/1.1"]);
|
||||
assert_eq!(
|
||||
extract_sni_from_client_hello(&valid).as_deref(),
|
||||
Some("example.com")
|
||||
);
|
||||
assert_eq!(
|
||||
extract_alpn_from_client_hello(&valid),
|
||||
vec![b"h2".to_vec(), b"http/1.1".to_vec()]
|
||||
);
|
||||
assert!(
|
||||
extract_sni_from_client_hello(&make_valid_client_hello_record("127.0.0.1", &[])).is_none(),
|
||||
"literal IP hostnames must be rejected"
|
||||
);
|
||||
|
||||
let mut corpus = vec![
|
||||
Vec::new(),
|
||||
vec![0x16, 0x03, 0x03],
|
||||
valid[..9].to_vec(),
|
||||
valid[..valid.len() - 1].to_vec(),
|
||||
];
|
||||
|
||||
let mut wrong_type = valid.clone();
|
||||
wrong_type[0] = 0x15;
|
||||
corpus.push(wrong_type);
|
||||
|
||||
let mut wrong_handshake = valid.clone();
|
||||
wrong_handshake[5] = 0x02;
|
||||
corpus.push(wrong_handshake);
|
||||
|
||||
let mut wrong_length = valid.clone();
|
||||
wrong_length[3] ^= 0x7f;
|
||||
corpus.push(wrong_length);
|
||||
|
||||
for (idx, input) in corpus.iter().enumerate() {
|
||||
assert!(catch_unwind(|| extract_sni_from_client_hello(input)).is_ok());
|
||||
assert!(catch_unwind(|| extract_alpn_from_client_hello(input)).is_ok());
|
||||
|
||||
if idx == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
assert!(
|
||||
extract_sni_from_client_hello(input).is_none(),
|
||||
"corpus item {idx} must fail closed for SNI"
|
||||
);
|
||||
assert!(
|
||||
extract_alpn_from_client_hello(input).is_empty(),
|
||||
"corpus item {idx} must fail closed for ALPN"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_handshake_fuzz_corpus_never_panics_and_rejects_digest_mutations() {
|
||||
let secret = b"tls_fuzz_security_secret";
|
||||
let now: i64 = 1_700_000_000;
|
||||
let base = make_valid_tls_handshake_with_session_id(secret, now as u32, &[0x42; 32]);
|
||||
let secrets = vec![("fuzz-user".to_string(), secret.to_vec())];
|
||||
|
||||
assert!(validate_tls_handshake_at_time(&base, &secrets, false, now).is_some());
|
||||
|
||||
let mut corpus = Vec::new();
|
||||
|
||||
let mut truncated = base.clone();
|
||||
truncated.truncate(TLS_DIGEST_POS + 16);
|
||||
corpus.push(truncated);
|
||||
|
||||
let mut digest_flip = base.clone();
|
||||
digest_flip[TLS_DIGEST_POS + 7] ^= 0x80;
|
||||
corpus.push(digest_flip);
|
||||
|
||||
let mut session_id_len_overflow = base.clone();
|
||||
session_id_len_overflow[TLS_DIGEST_POS + TLS_DIGEST_LEN] = 33;
|
||||
corpus.push(session_id_len_overflow);
|
||||
|
||||
let mut timestamp_far_past = base.clone();
|
||||
timestamp_far_past[TLS_DIGEST_POS + 28..TLS_DIGEST_POS + 32]
|
||||
.copy_from_slice(&((now - i64::from(TIME_SKEW_MAX) - 1) as u32).to_le_bytes());
|
||||
corpus.push(timestamp_far_past);
|
||||
|
||||
let mut timestamp_far_future = base.clone();
|
||||
timestamp_far_future[TLS_DIGEST_POS + 28..TLS_DIGEST_POS + 32]
|
||||
.copy_from_slice(&((now - TIME_SKEW_MIN + 1) as u32).to_le_bytes());
|
||||
corpus.push(timestamp_far_future);
|
||||
|
||||
let mut seed = 0xA5A5_5A5A_F00D_BAAD_u64;
|
||||
for _ in 0..32 {
|
||||
let mut mutated = base.clone();
|
||||
for _ in 0..2 {
|
||||
seed = seed
|
||||
.wrapping_mul(2862933555777941757)
|
||||
.wrapping_add(3037000493);
|
||||
let idx = TLS_DIGEST_POS + (seed as usize % TLS_DIGEST_LEN);
|
||||
mutated[idx] ^= ((seed >> 17) as u8).wrapping_add(1);
|
||||
}
|
||||
corpus.push(mutated);
|
||||
}
|
||||
|
||||
for (idx, handshake) in corpus.iter().enumerate() {
|
||||
let result =
|
||||
catch_unwind(|| validate_tls_handshake_at_time(handshake, &secrets, false, now));
|
||||
assert!(result.is_ok(), "corpus item {idx} must not panic");
|
||||
assert!(
|
||||
result.unwrap().is_none(),
|
||||
"corpus item {idx} must fail closed"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tls_boot_time_acceptance_is_capped_by_replay_window() {
|
||||
let secret = b"tls_boot_time_cap_secret";
|
||||
let secrets = vec![("boot-user".to_string(), secret.to_vec())];
|
||||
let boot_ts = 1u32;
|
||||
let handshake = make_valid_tls_handshake_with_session_id(secret, boot_ts, &[0x42; 32]);
|
||||
|
||||
assert!(
|
||||
validate_tls_handshake_with_replay_window(&handshake, &secrets, false, 300).is_some(),
|
||||
"boot-time timestamp should be accepted while replay window permits it"
|
||||
);
|
||||
assert!(
|
||||
validate_tls_handshake_with_replay_window(&handshake, &secrets, false, 0).is_none(),
|
||||
"boot-time timestamp must be rejected when replay window disables the bypass"
|
||||
);
|
||||
}
|
||||
@@ -0,0 +1,37 @@
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn extension_builder_fails_closed_on_u16_length_overflow() {
|
||||
let builder = TlsExtensionBuilder {
|
||||
extensions: vec![0u8; (u16::MAX as usize) + 1],
|
||||
};
|
||||
|
||||
let built = builder.build();
|
||||
assert!(
|
||||
built.is_empty(),
|
||||
"oversized extension blob must fail closed instead of truncating length field"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn server_hello_builder_fails_closed_on_session_id_len_overflow() {
|
||||
let builder = ServerHelloBuilder {
|
||||
random: [0u8; 32],
|
||||
session_id: vec![0xAB; (u8::MAX as usize) + 1],
|
||||
cipher_suite: cipher_suite::TLS_AES_128_GCM_SHA256,
|
||||
compression: 0,
|
||||
extensions: TlsExtensionBuilder::new(),
|
||||
};
|
||||
|
||||
let message = builder.build_message();
|
||||
let record = builder.build_record();
|
||||
|
||||
assert!(
|
||||
message.is_empty(),
|
||||
"session_id length overflow must fail closed in message builder"
|
||||
);
|
||||
assert!(
|
||||
record.is_empty(),
|
||||
"session_id length overflow must fail closed in record builder"
|
||||
);
|
||||
}
|
||||
2429
src/protocol/tests/tls_security_tests.rs
Normal file
2429
src/protocol/tests/tls_security_tests.rs
Normal file
File diff suppressed because it is too large
Load Diff
11
src/protocol/tests/tls_size_constants_security_tests.rs
Normal file
11
src/protocol/tests/tls_size_constants_security_tests.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
use super::{MAX_TLS_CIPHERTEXT_SIZE, MAX_TLS_PLAINTEXT_SIZE, MIN_TLS_CLIENT_HELLO_SIZE};
|
||||
|
||||
#[test]
|
||||
fn tls_size_constants_match_rfc_8446() {
|
||||
assert_eq!(MAX_TLS_PLAINTEXT_SIZE, 16_384);
|
||||
assert_eq!(MAX_TLS_CIPHERTEXT_SIZE, 16_640);
|
||||
|
||||
assert!(MIN_TLS_CLIENT_HELLO_SIZE < 512);
|
||||
assert!(MIN_TLS_CLIENT_HELLO_SIZE > 64);
|
||||
assert!(MAX_TLS_CIPHERTEXT_SIZE > MAX_TLS_PLAINTEXT_SIZE);
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
380
src/proxy/adaptive_buffers.rs
Normal file
380
src/proxy/adaptive_buffers.rs
Normal file
@@ -0,0 +1,380 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
// Adaptive buffer policy is staged and retained for deterministic rollout.
|
||||
// Keep definitions compiled for compatibility and security test scaffolding.
|
||||
|
||||
use dashmap::DashMap;
|
||||
use std::cmp::max;
|
||||
use std::sync::OnceLock;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
const EMA_ALPHA: f64 = 0.2;
|
||||
const PROFILE_TTL: Duration = Duration::from_secs(300);
|
||||
const THROUGHPUT_UP_BPS: f64 = 8_000_000.0;
|
||||
const THROUGHPUT_DOWN_BPS: f64 = 2_000_000.0;
|
||||
const RATIO_CONFIRM_THRESHOLD: f64 = 1.12;
|
||||
const TIER1_HOLD_TICKS: u32 = 8;
|
||||
const TIER2_HOLD_TICKS: u32 = 4;
|
||||
const QUIET_DEMOTE_TICKS: u32 = 480;
|
||||
const HARD_COOLDOWN_TICKS: u32 = 20;
|
||||
const HARD_PENDING_THRESHOLD: u32 = 3;
|
||||
const HARD_PARTIAL_RATIO_THRESHOLD: f64 = 0.25;
|
||||
const DIRECT_C2S_CAP_BYTES: usize = 128 * 1024;
|
||||
const DIRECT_S2C_CAP_BYTES: usize = 512 * 1024;
|
||||
const ME_FRAMES_CAP: usize = 96;
|
||||
const ME_BYTES_CAP: usize = 384 * 1024;
|
||||
const ME_DELAY_MIN_US: u64 = 150;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum AdaptiveTier {
|
||||
Base = 0,
|
||||
Tier1 = 1,
|
||||
Tier2 = 2,
|
||||
Tier3 = 3,
|
||||
}
|
||||
|
||||
impl AdaptiveTier {
|
||||
pub fn promote(self) -> Self {
|
||||
match self {
|
||||
Self::Base => Self::Tier1,
|
||||
Self::Tier1 => Self::Tier2,
|
||||
Self::Tier2 => Self::Tier3,
|
||||
Self::Tier3 => Self::Tier3,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn demote(self) -> Self {
|
||||
match self {
|
||||
Self::Base => Self::Base,
|
||||
Self::Tier1 => Self::Base,
|
||||
Self::Tier2 => Self::Tier1,
|
||||
Self::Tier3 => Self::Tier2,
|
||||
}
|
||||
}
|
||||
|
||||
fn ratio(self) -> (usize, usize) {
|
||||
match self {
|
||||
Self::Base => (1, 1),
|
||||
Self::Tier1 => (5, 4),
|
||||
Self::Tier2 => (3, 2),
|
||||
Self::Tier3 => (2, 1),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_u8(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum TierTransitionReason {
|
||||
SoftConfirmed,
|
||||
HardPressure,
|
||||
QuietDemotion,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub struct TierTransition {
|
||||
pub from: AdaptiveTier,
|
||||
pub to: AdaptiveTier,
|
||||
pub reason: TierTransitionReason,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
pub struct RelaySignalSample {
|
||||
pub c2s_bytes: u64,
|
||||
pub s2c_requested_bytes: u64,
|
||||
pub s2c_written_bytes: u64,
|
||||
pub s2c_write_ops: u64,
|
||||
pub s2c_partial_writes: u64,
|
||||
pub s2c_consecutive_pending_writes: u32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct SessionAdaptiveController {
|
||||
tier: AdaptiveTier,
|
||||
max_tier_seen: AdaptiveTier,
|
||||
throughput_ema_bps: f64,
|
||||
incoming_ema_bps: f64,
|
||||
outgoing_ema_bps: f64,
|
||||
tier1_hold_ticks: u32,
|
||||
tier2_hold_ticks: u32,
|
||||
quiet_ticks: u32,
|
||||
hard_cooldown_ticks: u32,
|
||||
}
|
||||
|
||||
impl SessionAdaptiveController {
|
||||
pub fn new(initial_tier: AdaptiveTier) -> Self {
|
||||
Self {
|
||||
tier: initial_tier,
|
||||
max_tier_seen: initial_tier,
|
||||
throughput_ema_bps: 0.0,
|
||||
incoming_ema_bps: 0.0,
|
||||
outgoing_ema_bps: 0.0,
|
||||
tier1_hold_ticks: 0,
|
||||
tier2_hold_ticks: 0,
|
||||
quiet_ticks: 0,
|
||||
hard_cooldown_ticks: 0,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn max_tier_seen(&self) -> AdaptiveTier {
|
||||
self.max_tier_seen
|
||||
}
|
||||
|
||||
pub fn observe(&mut self, sample: RelaySignalSample, tick_secs: f64) -> Option<TierTransition> {
|
||||
if tick_secs <= f64::EPSILON {
|
||||
return None;
|
||||
}
|
||||
|
||||
if self.hard_cooldown_ticks > 0 {
|
||||
self.hard_cooldown_ticks -= 1;
|
||||
}
|
||||
|
||||
let c2s_bps = (sample.c2s_bytes as f64 * 8.0) / tick_secs;
|
||||
let incoming_bps = (sample.s2c_requested_bytes as f64 * 8.0) / tick_secs;
|
||||
let outgoing_bps = (sample.s2c_written_bytes as f64 * 8.0) / tick_secs;
|
||||
let throughput = c2s_bps.max(outgoing_bps);
|
||||
|
||||
self.throughput_ema_bps = ema(self.throughput_ema_bps, throughput);
|
||||
self.incoming_ema_bps = ema(self.incoming_ema_bps, incoming_bps);
|
||||
self.outgoing_ema_bps = ema(self.outgoing_ema_bps, outgoing_bps);
|
||||
|
||||
let tier1_now = self.throughput_ema_bps >= THROUGHPUT_UP_BPS;
|
||||
if tier1_now {
|
||||
self.tier1_hold_ticks = self.tier1_hold_ticks.saturating_add(1);
|
||||
} else {
|
||||
self.tier1_hold_ticks = 0;
|
||||
}
|
||||
|
||||
let ratio = if self.outgoing_ema_bps <= f64::EPSILON {
|
||||
0.0
|
||||
} else {
|
||||
self.incoming_ema_bps / self.outgoing_ema_bps
|
||||
};
|
||||
let tier2_now = ratio >= RATIO_CONFIRM_THRESHOLD;
|
||||
if tier2_now {
|
||||
self.tier2_hold_ticks = self.tier2_hold_ticks.saturating_add(1);
|
||||
} else {
|
||||
self.tier2_hold_ticks = 0;
|
||||
}
|
||||
|
||||
let partial_ratio = if sample.s2c_write_ops == 0 {
|
||||
0.0
|
||||
} else {
|
||||
sample.s2c_partial_writes as f64 / sample.s2c_write_ops as f64
|
||||
};
|
||||
let hard_now = sample.s2c_consecutive_pending_writes >= HARD_PENDING_THRESHOLD
|
||||
|| partial_ratio >= HARD_PARTIAL_RATIO_THRESHOLD;
|
||||
|
||||
if hard_now && self.hard_cooldown_ticks == 0 {
|
||||
return self.promote(TierTransitionReason::HardPressure, HARD_COOLDOWN_TICKS);
|
||||
}
|
||||
|
||||
if self.tier1_hold_ticks >= TIER1_HOLD_TICKS && self.tier2_hold_ticks >= TIER2_HOLD_TICKS {
|
||||
return self.promote(TierTransitionReason::SoftConfirmed, 0);
|
||||
}
|
||||
|
||||
let demote_candidate =
|
||||
self.throughput_ema_bps < THROUGHPUT_DOWN_BPS && !tier2_now && !hard_now;
|
||||
if demote_candidate {
|
||||
self.quiet_ticks = self.quiet_ticks.saturating_add(1);
|
||||
if self.quiet_ticks >= QUIET_DEMOTE_TICKS {
|
||||
self.quiet_ticks = 0;
|
||||
return self.demote(TierTransitionReason::QuietDemotion);
|
||||
}
|
||||
} else {
|
||||
self.quiet_ticks = 0;
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
fn promote(
|
||||
&mut self,
|
||||
reason: TierTransitionReason,
|
||||
hard_cooldown_ticks: u32,
|
||||
) -> Option<TierTransition> {
|
||||
let from = self.tier;
|
||||
let to = from.promote();
|
||||
if from == to {
|
||||
return None;
|
||||
}
|
||||
self.tier = to;
|
||||
self.max_tier_seen = max(self.max_tier_seen, to);
|
||||
self.hard_cooldown_ticks = hard_cooldown_ticks;
|
||||
self.tier1_hold_ticks = 0;
|
||||
self.tier2_hold_ticks = 0;
|
||||
self.quiet_ticks = 0;
|
||||
Some(TierTransition { from, to, reason })
|
||||
}
|
||||
|
||||
fn demote(&mut self, reason: TierTransitionReason) -> Option<TierTransition> {
|
||||
let from = self.tier;
|
||||
let to = from.demote();
|
||||
if from == to {
|
||||
return None;
|
||||
}
|
||||
self.tier = to;
|
||||
self.tier1_hold_ticks = 0;
|
||||
self.tier2_hold_ticks = 0;
|
||||
Some(TierTransition { from, to, reason })
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
struct UserAdaptiveProfile {
|
||||
tier: AdaptiveTier,
|
||||
seen_at: Instant,
|
||||
}
|
||||
|
||||
fn profiles() -> &'static DashMap<String, UserAdaptiveProfile> {
|
||||
static USER_PROFILES: OnceLock<DashMap<String, UserAdaptiveProfile>> = OnceLock::new();
|
||||
USER_PROFILES.get_or_init(DashMap::new)
|
||||
}
|
||||
|
||||
pub fn seed_tier_for_user(user: &str) -> AdaptiveTier {
|
||||
let now = Instant::now();
|
||||
if let Some(entry) = profiles().get(user) {
|
||||
let value = entry.value();
|
||||
if now.duration_since(value.seen_at) <= PROFILE_TTL {
|
||||
return value.tier;
|
||||
}
|
||||
}
|
||||
AdaptiveTier::Base
|
||||
}
|
||||
|
||||
pub fn record_user_tier(user: &str, tier: AdaptiveTier) {
|
||||
let now = Instant::now();
|
||||
if let Some(mut entry) = profiles().get_mut(user) {
|
||||
let existing = *entry;
|
||||
let effective = if now.duration_since(existing.seen_at) > PROFILE_TTL {
|
||||
tier
|
||||
} else {
|
||||
max(existing.tier, tier)
|
||||
};
|
||||
*entry = UserAdaptiveProfile {
|
||||
tier: effective,
|
||||
seen_at: now,
|
||||
};
|
||||
return;
|
||||
}
|
||||
profiles().insert(user.to_string(), UserAdaptiveProfile { tier, seen_at: now });
|
||||
}
|
||||
|
||||
pub fn direct_copy_buffers_for_tier(
|
||||
tier: AdaptiveTier,
|
||||
base_c2s: usize,
|
||||
base_s2c: usize,
|
||||
) -> (usize, usize) {
|
||||
let (num, den) = tier.ratio();
|
||||
(
|
||||
scale(base_c2s, num, den, DIRECT_C2S_CAP_BYTES),
|
||||
scale(base_s2c, num, den, DIRECT_S2C_CAP_BYTES),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn me_flush_policy_for_tier(
|
||||
tier: AdaptiveTier,
|
||||
base_frames: usize,
|
||||
base_bytes: usize,
|
||||
base_delay: Duration,
|
||||
) -> (usize, usize, Duration) {
|
||||
let (num, den) = tier.ratio();
|
||||
let frames = scale(base_frames, num, den, ME_FRAMES_CAP).max(1);
|
||||
let bytes = scale(base_bytes, num, den, ME_BYTES_CAP).max(4096);
|
||||
let delay_us = base_delay.as_micros() as u64;
|
||||
let adjusted_delay_us = match tier {
|
||||
AdaptiveTier::Base => delay_us,
|
||||
AdaptiveTier::Tier1 => (delay_us.saturating_mul(7)).saturating_div(10),
|
||||
AdaptiveTier::Tier2 => delay_us.saturating_div(2),
|
||||
AdaptiveTier::Tier3 => (delay_us.saturating_mul(3)).saturating_div(10),
|
||||
}
|
||||
.max(ME_DELAY_MIN_US)
|
||||
.min(delay_us.max(ME_DELAY_MIN_US));
|
||||
(frames, bytes, Duration::from_micros(adjusted_delay_us))
|
||||
}
|
||||
|
||||
fn ema(prev: f64, value: f64) -> f64 {
|
||||
if prev <= f64::EPSILON {
|
||||
value
|
||||
} else {
|
||||
(prev * (1.0 - EMA_ALPHA)) + (value * EMA_ALPHA)
|
||||
}
|
||||
}
|
||||
|
||||
fn scale(base: usize, numerator: usize, denominator: usize, cap: usize) -> usize {
|
||||
let scaled = base
|
||||
.saturating_mul(numerator)
|
||||
.saturating_div(denominator.max(1));
|
||||
scaled.min(cap).max(1)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn sample(
|
||||
c2s_bytes: u64,
|
||||
s2c_requested_bytes: u64,
|
||||
s2c_written_bytes: u64,
|
||||
s2c_write_ops: u64,
|
||||
s2c_partial_writes: u64,
|
||||
s2c_consecutive_pending_writes: u32,
|
||||
) -> RelaySignalSample {
|
||||
RelaySignalSample {
|
||||
c2s_bytes,
|
||||
s2c_requested_bytes,
|
||||
s2c_written_bytes,
|
||||
s2c_write_ops,
|
||||
s2c_partial_writes,
|
||||
s2c_consecutive_pending_writes,
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_soft_promotion_requires_tier1_and_tier2() {
|
||||
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Base);
|
||||
let tick_secs = 0.25;
|
||||
let mut promoted = None;
|
||||
for _ in 0..8 {
|
||||
promoted = ctrl.observe(
|
||||
sample(
|
||||
300_000, // ~9.6 Mbps
|
||||
320_000, // incoming > outgoing to confirm tier2
|
||||
250_000, 10, 0, 0,
|
||||
),
|
||||
tick_secs,
|
||||
);
|
||||
}
|
||||
|
||||
let transition = promoted.expect("expected soft promotion");
|
||||
assert_eq!(transition.from, AdaptiveTier::Base);
|
||||
assert_eq!(transition.to, AdaptiveTier::Tier1);
|
||||
assert_eq!(transition.reason, TierTransitionReason::SoftConfirmed);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_hard_promotion_on_pending_pressure() {
|
||||
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Base);
|
||||
let transition = ctrl
|
||||
.observe(sample(10_000, 20_000, 10_000, 4, 1, 3), 0.25)
|
||||
.expect("expected hard promotion");
|
||||
assert_eq!(transition.reason, TierTransitionReason::HardPressure);
|
||||
assert_eq!(transition.to, AdaptiveTier::Tier1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_quiet_demotion_is_slow_and_stepwise() {
|
||||
let mut ctrl = SessionAdaptiveController::new(AdaptiveTier::Tier2);
|
||||
let mut demotion = None;
|
||||
for _ in 0..QUIET_DEMOTE_TICKS {
|
||||
demotion = ctrl.observe(sample(1, 1, 1, 1, 0, 0), 0.25);
|
||||
}
|
||||
|
||||
let transition = demotion.expect("expected quiet demotion");
|
||||
assert_eq!(transition.from, AdaptiveTier::Tier2);
|
||||
assert_eq!(transition.to, AdaptiveTier::Tier1);
|
||||
assert_eq!(transition.reason, TierTransitionReason::QuietDemotion);
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,10 +1,13 @@
|
||||
use std::collections::HashSet;
|
||||
use std::ffi::OsString;
|
||||
use std::fs::OpenOptions;
|
||||
use std::io::Write;
|
||||
use std::net::SocketAddr;
|
||||
use std::path::{Component, Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Mutex, OnceLock};
|
||||
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, split};
|
||||
use tokio::sync::watch;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
@@ -15,12 +18,212 @@ use crate::protocol::constants::*;
|
||||
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
|
||||
use crate::proxy::relay::relay_bidirectional;
|
||||
use crate::proxy::route_mode::{
|
||||
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
|
||||
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
|
||||
cutover_stagger_delay,
|
||||
};
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||
use crate::transport::UpstreamManager;
|
||||
#[cfg(unix)]
|
||||
use nix::fcntl::{Flock, FlockArg, OFlag, openat};
|
||||
#[cfg(unix)]
|
||||
use nix::sys::stat::Mode;
|
||||
|
||||
#[cfg(unix)]
|
||||
use std::os::unix::fs::OpenOptionsExt;
|
||||
|
||||
const UNKNOWN_DC_LOG_DISTINCT_LIMIT: usize = 1024;
|
||||
static LOGGED_UNKNOWN_DCS: OnceLock<Mutex<HashSet<i16>>> = OnceLock::new();
|
||||
const MAX_SCOPE_HINT_LEN: usize = 64;
|
||||
|
||||
fn validated_scope_hint(user: &str) -> Option<&str> {
|
||||
let scope = user.strip_prefix("scope_")?;
|
||||
if scope.is_empty() || scope.len() > MAX_SCOPE_HINT_LEN {
|
||||
return None;
|
||||
}
|
||||
if scope
|
||||
.bytes()
|
||||
.all(|b| b.is_ascii_alphanumeric() || b == b'-')
|
||||
{
|
||||
Some(scope)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct SanitizedUnknownDcLogPath {
|
||||
resolved_path: PathBuf,
|
||||
allowed_parent: PathBuf,
|
||||
file_name: OsString,
|
||||
}
|
||||
|
||||
// In tests, this function shares global mutable state. Callers that also use
|
||||
// cache-reset helpers must hold `unknown_dc_test_lock()` to keep assertions
|
||||
// deterministic under parallel execution.
|
||||
fn should_log_unknown_dc(dc_idx: i16) -> bool {
|
||||
let set = LOGGED_UNKNOWN_DCS.get_or_init(|| Mutex::new(HashSet::new()));
|
||||
should_log_unknown_dc_with_set(set, dc_idx)
|
||||
}
|
||||
|
||||
fn should_log_unknown_dc_with_set(set: &Mutex<HashSet<i16>>, dc_idx: i16) -> bool {
|
||||
match set.lock() {
|
||||
Ok(mut guard) => {
|
||||
if guard.contains(&dc_idx) {
|
||||
return false;
|
||||
}
|
||||
if guard.len() >= UNKNOWN_DC_LOG_DISTINCT_LIMIT {
|
||||
return false;
|
||||
}
|
||||
guard.insert(dc_idx)
|
||||
}
|
||||
// Fail closed on poisoned state to avoid unbounded blocking log writes.
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn sanitize_unknown_dc_log_path(path: &str) -> Option<SanitizedUnknownDcLogPath> {
|
||||
let candidate = Path::new(path);
|
||||
if candidate.as_os_str().is_empty() {
|
||||
return None;
|
||||
}
|
||||
if candidate
|
||||
.components()
|
||||
.any(|component| matches!(component, Component::ParentDir))
|
||||
{
|
||||
return None;
|
||||
}
|
||||
|
||||
let cwd = std::env::current_dir().ok()?;
|
||||
let file_name = candidate.file_name()?;
|
||||
let parent = candidate.parent().unwrap_or_else(|| Path::new("."));
|
||||
let parent_path = if parent.is_absolute() {
|
||||
parent.to_path_buf()
|
||||
} else {
|
||||
cwd.join(parent)
|
||||
};
|
||||
let canonical_parent = parent_path.canonicalize().ok()?;
|
||||
if !canonical_parent.is_dir() {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(SanitizedUnknownDcLogPath {
|
||||
resolved_path: canonical_parent.join(file_name),
|
||||
allowed_parent: canonical_parent,
|
||||
file_name: file_name.to_os_string(),
|
||||
})
|
||||
}
|
||||
|
||||
fn unknown_dc_log_path_is_still_safe(path: &SanitizedUnknownDcLogPath) -> bool {
|
||||
let Some(parent) = path.resolved_path.parent() else {
|
||||
return false;
|
||||
};
|
||||
let Ok(current_parent) = parent.canonicalize() else {
|
||||
return false;
|
||||
};
|
||||
if current_parent != path.allowed_parent {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Ok(canonical_target) = path.resolved_path.canonicalize() {
|
||||
let Some(target_parent) = canonical_target.parent() else {
|
||||
return false;
|
||||
};
|
||||
let Some(target_name) = canonical_target.file_name() else {
|
||||
return false;
|
||||
};
|
||||
if target_parent != path.allowed_parent || target_name != path.file_name {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn open_unknown_dc_log_append(path: &Path) -> std::io::Result<std::fs::File> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.custom_flags(libc::O_NOFOLLOW)
|
||||
.open(path)
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
let _ = path;
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::PermissionDenied,
|
||||
"unknown_dc_file_log_enabled requires unix O_NOFOLLOW support",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn open_unknown_dc_log_append_anchored(
|
||||
path: &SanitizedUnknownDcLogPath,
|
||||
) -> std::io::Result<std::fs::File> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let parent = OpenOptions::new()
|
||||
.read(true)
|
||||
.custom_flags(libc::O_DIRECTORY | libc::O_NOFOLLOW | libc::O_CLOEXEC)
|
||||
.open(&path.allowed_parent)?;
|
||||
|
||||
let oflags = OFlag::O_CREAT
|
||||
| OFlag::O_APPEND
|
||||
| OFlag::O_WRONLY
|
||||
| OFlag::O_NOFOLLOW
|
||||
| OFlag::O_CLOEXEC;
|
||||
let mode = Mode::from_bits_truncate(0o600);
|
||||
let path_component = Path::new(path.file_name.as_os_str());
|
||||
let fd = openat(&parent, path_component, oflags, mode)
|
||||
.map_err(|err| std::io::Error::from_raw_os_error(err as i32))?;
|
||||
let file = std::fs::File::from(fd);
|
||||
Ok(file)
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
let _ = path;
|
||||
Err(std::io::Error::new(
|
||||
std::io::ErrorKind::PermissionDenied,
|
||||
"unknown_dc_file_log_enabled requires unix O_NOFOLLOW support",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn append_unknown_dc_line(file: &mut std::fs::File, dc_idx: i16) -> std::io::Result<()> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
let cloned = file.try_clone()?;
|
||||
let mut locked = Flock::lock(cloned, FlockArg::LockExclusive)
|
||||
.map_err(|(_, err)| std::io::Error::from_raw_os_error(err as i32))?;
|
||||
let write_result = writeln!(&mut *locked, "dc_idx={dc_idx}");
|
||||
let _ = locked
|
||||
.unlock()
|
||||
.map_err(|(_, err)| std::io::Error::from_raw_os_error(err as i32))?;
|
||||
write_result
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
writeln!(file, "dc_idx={dc_idx}")
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn clear_unknown_dc_log_cache_for_testing() {
|
||||
if let Some(set) = LOGGED_UNKNOWN_DCS.get()
|
||||
&& let Ok(mut guard) = set.lock()
|
||||
{
|
||||
guard.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn unknown_dc_test_lock() -> &'static Mutex<()> {
|
||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
pub(crate) async fn handle_via_direct<R, W>(
|
||||
client_reader: CryptoReader<R>,
|
||||
@@ -52,8 +255,15 @@ where
|
||||
"Connecting to Telegram DC"
|
||||
);
|
||||
|
||||
let scope_hint = validated_scope_hint(user);
|
||||
if user.starts_with("scope_") && scope_hint.is_none() {
|
||||
warn!(
|
||||
user = %user,
|
||||
"Ignoring invalid scope hint and falling back to default upstream selection"
|
||||
);
|
||||
}
|
||||
let tg_stream = upstream_manager
|
||||
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty()))
|
||||
.connect(dc_addr, Some(success.dc_idx), scope_hint)
|
||||
.await?;
|
||||
|
||||
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
|
||||
@@ -64,8 +274,7 @@ where
|
||||
debug!(peer = %success.peer, "TG handshake complete, starting relay");
|
||||
|
||||
stats.increment_user_connects(user);
|
||||
stats.increment_user_curr_connects(user);
|
||||
stats.increment_current_connections_direct();
|
||||
let _direct_connection_lease = stats.acquire_direct_connection_lease();
|
||||
|
||||
let relay_result = relay_bidirectional(
|
||||
client_reader,
|
||||
@@ -76,15 +285,14 @@ where
|
||||
config.general.direct_relay_copy_buf_s2c_bytes,
|
||||
user,
|
||||
Arc::clone(&stats),
|
||||
config.access.user_data_quota.get(user).copied(),
|
||||
buffer_pool,
|
||||
);
|
||||
tokio::pin!(relay_result);
|
||||
let relay_result = loop {
|
||||
if let Some(cutover) = affected_cutover_state(
|
||||
&route_rx,
|
||||
RelayRouteMode::Direct,
|
||||
route_snapshot.generation,
|
||||
) {
|
||||
if let Some(cutover) =
|
||||
affected_cutover_state(&route_rx, RelayRouteMode::Direct, route_snapshot.generation)
|
||||
{
|
||||
let delay = cutover_stagger_delay(session_id, cutover.generation);
|
||||
warn!(
|
||||
user = %user,
|
||||
@@ -108,9 +316,6 @@ where
|
||||
}
|
||||
};
|
||||
|
||||
stats.decrement_current_connections_direct();
|
||||
stats.decrement_user_curr_connects(user);
|
||||
|
||||
match &relay_result {
|
||||
Ok(()) => debug!(user = %user, "Direct relay completed"),
|
||||
Err(e) => debug!(user = %user, error = %e, "Direct relay ended with error"),
|
||||
@@ -135,7 +340,9 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
for addr_str in addrs {
|
||||
match addr_str.parse::<SocketAddr>() {
|
||||
Ok(addr) => parsed.push(addr),
|
||||
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"),
|
||||
Err(_) => {
|
||||
warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -157,17 +364,27 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
|
||||
// Unknown DC requested by client without override: log and fall back.
|
||||
if !config.dc_overrides.contains_key(&dc_key) {
|
||||
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
|
||||
warn!(
|
||||
dc_idx = dc_idx,
|
||||
"Requested non-standard DC with no override; falling back to default cluster"
|
||||
);
|
||||
if config.general.unknown_dc_file_log_enabled
|
||||
&& let Some(path) = &config.general.unknown_dc_log_path
|
||||
&& let Ok(handle) = tokio::runtime::Handle::try_current()
|
||||
{
|
||||
let path = path.clone();
|
||||
handle.spawn_blocking(move || {
|
||||
if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) {
|
||||
let _ = writeln!(file, "dc_idx={dc_idx}");
|
||||
if let Some(path) = sanitize_unknown_dc_log_path(path) {
|
||||
if should_log_unknown_dc(dc_idx) {
|
||||
handle.spawn_blocking(move || {
|
||||
if unknown_dc_log_path_is_still_safe(&path)
|
||||
&& let Ok(mut file) = open_unknown_dc_log_append_anchored(&path)
|
||||
{
|
||||
let _ = append_unknown_dc_line(&mut file, dc_idx);
|
||||
}
|
||||
});
|
||||
}
|
||||
});
|
||||
} else {
|
||||
warn!(dc_idx = dc_idx, raw_path = %path, "Rejected unsafe unknown DC log path");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -175,7 +392,7 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
let fallback_idx = if default_dc >= 1 && default_dc <= num_dcs {
|
||||
default_dc - 1
|
||||
} else {
|
||||
1
|
||||
0
|
||||
};
|
||||
|
||||
info!(
|
||||
@@ -191,20 +408,18 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
|
||||
))
|
||||
}
|
||||
|
||||
async fn do_tg_handshake_static(
|
||||
mut stream: TcpStream,
|
||||
async fn do_tg_handshake_static<S>(
|
||||
mut stream: S,
|
||||
success: &HandshakeSuccess,
|
||||
config: &ProxyConfig,
|
||||
rng: &SecureRandom,
|
||||
) -> Result<(
|
||||
CryptoReader<tokio::net::tcp::OwnedReadHalf>,
|
||||
CryptoWriter<tokio::net::tcp::OwnedWriteHalf>,
|
||||
)> {
|
||||
) -> Result<(CryptoReader<ReadHalf<S>>, CryptoWriter<WriteHalf<S>>)>
|
||||
where
|
||||
S: AsyncRead + AsyncWrite + Unpin,
|
||||
{
|
||||
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) = generate_tg_nonce(
|
||||
success.proto_tag,
|
||||
success.dc_idx,
|
||||
&success.dec_key,
|
||||
success.dec_iv,
|
||||
&success.enc_key,
|
||||
success.enc_iv,
|
||||
rng,
|
||||
@@ -222,7 +437,7 @@ async fn do_tg_handshake_static(
|
||||
stream.write_all(&encrypted_nonce).await?;
|
||||
stream.flush().await?;
|
||||
|
||||
let (read_half, write_half) = stream.into_split();
|
||||
let (read_half, write_half) = split(stream);
|
||||
|
||||
let max_pending = config.general.crypto_pending_buffer;
|
||||
Ok((
|
||||
@@ -230,3 +445,19 @@ async fn do_tg_handshake_static(
|
||||
CryptoWriter::new(write_half, tg_encryptor, max_pending),
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/direct_relay_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/direct_relay_business_logic_tests.rs"]
|
||||
mod business_logic_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/direct_relay_common_mistakes_tests.rs"]
|
||||
mod common_mistakes_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/direct_relay_subtle_adversarial_tests.rs"]
|
||||
mod subtle_adversarial_tests;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,32 +1,231 @@
|
||||
//! Masking - forward unrecognized traffic to mask host
|
||||
|
||||
use std::str;
|
||||
use std::net::SocketAddr;
|
||||
use std::time::Duration;
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt};
|
||||
use tokio::time::timeout;
|
||||
use tracing::debug;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::network::dns_overrides::resolve_socket_addr;
|
||||
use crate::stats::beobachten::BeobachtenStore;
|
||||
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
|
||||
use rand::{Rng, RngExt};
|
||||
use std::net::SocketAddr;
|
||||
use std::str;
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use tokio::net::TcpStream;
|
||||
#[cfg(unix)]
|
||||
use tokio::net::UnixStream;
|
||||
use tokio::time::{Instant, timeout};
|
||||
use tracing::debug;
|
||||
|
||||
#[cfg(not(test))]
|
||||
const MASK_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
#[cfg(test)]
|
||||
const MASK_TIMEOUT: Duration = Duration::from_millis(50);
|
||||
/// Maximum duration for the entire masking relay.
|
||||
/// Limits resource consumption from slow-loris attacks and port scanners.
|
||||
#[cfg(not(test))]
|
||||
const MASK_RELAY_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
#[cfg(test)]
|
||||
const MASK_RELAY_TIMEOUT: Duration = Duration::from_millis(200);
|
||||
#[cfg(not(test))]
|
||||
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
#[cfg(test)]
|
||||
const MASK_RELAY_IDLE_TIMEOUT: Duration = Duration::from_millis(100);
|
||||
const MASK_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
struct CopyOutcome {
|
||||
total: usize,
|
||||
ended_by_eof: bool,
|
||||
}
|
||||
|
||||
async fn copy_with_idle_timeout<R, W>(reader: &mut R, writer: &mut W) -> CopyOutcome
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buf = [0u8; MASK_BUFFER_SIZE];
|
||||
let mut total = 0usize;
|
||||
let mut ended_by_eof = false;
|
||||
loop {
|
||||
let read_res = timeout(MASK_RELAY_IDLE_TIMEOUT, reader.read(&mut buf)).await;
|
||||
let n = match read_res {
|
||||
Ok(Ok(n)) => n,
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
};
|
||||
if n == 0 {
|
||||
ended_by_eof = true;
|
||||
break;
|
||||
}
|
||||
total = total.saturating_add(n);
|
||||
|
||||
let write_res = timeout(MASK_RELAY_IDLE_TIMEOUT, writer.write_all(&buf[..n])).await;
|
||||
match write_res {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(_)) | Err(_) => break,
|
||||
}
|
||||
}
|
||||
CopyOutcome {
|
||||
total,
|
||||
ended_by_eof,
|
||||
}
|
||||
}
|
||||
|
||||
fn next_mask_shape_bucket(total: usize, floor: usize, cap: usize) -> usize {
|
||||
if total == 0 || floor == 0 || cap < floor {
|
||||
return total;
|
||||
}
|
||||
|
||||
if total >= cap {
|
||||
return total;
|
||||
}
|
||||
|
||||
let mut bucket = floor;
|
||||
while bucket < total {
|
||||
match bucket.checked_mul(2) {
|
||||
Some(next) => bucket = next,
|
||||
None => return total,
|
||||
}
|
||||
if bucket > cap {
|
||||
return cap;
|
||||
}
|
||||
}
|
||||
bucket
|
||||
}
|
||||
|
||||
async fn maybe_write_shape_padding<W>(
|
||||
mask_write: &mut W,
|
||||
total_sent: usize,
|
||||
enabled: bool,
|
||||
floor: usize,
|
||||
cap: usize,
|
||||
above_cap_blur: bool,
|
||||
above_cap_blur_max_bytes: usize,
|
||||
aggressive_mode: bool,
|
||||
) where
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
if !enabled {
|
||||
return;
|
||||
}
|
||||
|
||||
let target_total = if total_sent >= cap && above_cap_blur && above_cap_blur_max_bytes > 0 {
|
||||
let mut rng = rand::rng();
|
||||
let extra = if aggressive_mode {
|
||||
rng.random_range(1..=above_cap_blur_max_bytes)
|
||||
} else {
|
||||
rng.random_range(0..=above_cap_blur_max_bytes)
|
||||
};
|
||||
total_sent.saturating_add(extra)
|
||||
} else {
|
||||
next_mask_shape_bucket(total_sent, floor, cap)
|
||||
};
|
||||
|
||||
if target_total <= total_sent {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut remaining = target_total - total_sent;
|
||||
let mut pad_chunk = [0u8; 1024];
|
||||
let deadline = Instant::now() + MASK_TIMEOUT;
|
||||
|
||||
while remaining > 0 {
|
||||
let now = Instant::now();
|
||||
if now >= deadline {
|
||||
return;
|
||||
}
|
||||
|
||||
let write_len = remaining.min(pad_chunk.len());
|
||||
{
|
||||
let mut rng = rand::rng();
|
||||
rng.fill_bytes(&mut pad_chunk[..write_len]);
|
||||
}
|
||||
let write_budget = deadline.saturating_duration_since(now);
|
||||
match timeout(write_budget, mask_write.write_all(&pad_chunk[..write_len])).await {
|
||||
Ok(Ok(())) => {}
|
||||
Ok(Err(_)) | Err(_) => return,
|
||||
}
|
||||
remaining -= write_len;
|
||||
}
|
||||
|
||||
let now = Instant::now();
|
||||
if now >= deadline {
|
||||
return;
|
||||
}
|
||||
let flush_budget = deadline.saturating_duration_since(now);
|
||||
let _ = timeout(flush_budget, mask_write.flush()).await;
|
||||
}
|
||||
|
||||
async fn write_proxy_header_with_timeout<W>(mask_write: &mut W, header: &[u8]) -> bool
|
||||
where
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
match timeout(MASK_TIMEOUT, mask_write.write_all(header)).await {
|
||||
Ok(Ok(())) => true,
|
||||
Ok(Err(_)) => false,
|
||||
Err(_) => {
|
||||
debug!("Timeout writing proxy protocol header to mask backend");
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn consume_client_data_with_timeout<R>(reader: R)
|
||||
where
|
||||
R: AsyncRead + Unpin,
|
||||
{
|
||||
if timeout(MASK_RELAY_TIMEOUT, consume_client_data(reader))
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
debug!("Timed out while consuming client data on masking fallback path");
|
||||
}
|
||||
}
|
||||
|
||||
async fn wait_mask_connect_budget(started: Instant) {
|
||||
let elapsed = started.elapsed();
|
||||
if elapsed < MASK_TIMEOUT {
|
||||
tokio::time::sleep(MASK_TIMEOUT - elapsed).await;
|
||||
}
|
||||
}
|
||||
|
||||
fn mask_outcome_target_budget(config: &ProxyConfig) -> Duration {
|
||||
if config.censorship.mask_timing_normalization_enabled {
|
||||
let floor = config.censorship.mask_timing_normalization_floor_ms;
|
||||
let ceiling = config.censorship.mask_timing_normalization_ceiling_ms;
|
||||
if ceiling > floor {
|
||||
let mut rng = rand::rng();
|
||||
return Duration::from_millis(rng.random_range(floor..=ceiling));
|
||||
}
|
||||
return Duration::from_millis(floor);
|
||||
}
|
||||
|
||||
MASK_TIMEOUT
|
||||
}
|
||||
|
||||
async fn wait_mask_connect_budget_if_needed(started: Instant, config: &ProxyConfig) {
|
||||
if config.censorship.mask_timing_normalization_enabled {
|
||||
return;
|
||||
}
|
||||
|
||||
wait_mask_connect_budget(started).await;
|
||||
}
|
||||
|
||||
async fn wait_mask_outcome_budget(started: Instant, config: &ProxyConfig) {
|
||||
let target = mask_outcome_target_budget(config);
|
||||
let elapsed = started.elapsed();
|
||||
if elapsed < target {
|
||||
tokio::time::sleep(target - elapsed).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Detect client type based on initial data
|
||||
fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
// Check for HTTP request
|
||||
if data.len() > 4
|
||||
&& (data.starts_with(b"GET ") || data.starts_with(b"POST") ||
|
||||
data.starts_with(b"HEAD") || data.starts_with(b"PUT ") ||
|
||||
data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS"))
|
||||
&& (data.starts_with(b"GET ")
|
||||
|| data.starts_with(b"POST")
|
||||
|| data.starts_with(b"HEAD")
|
||||
|| data.starts_with(b"PUT ")
|
||||
|| data.starts_with(b"DELETE")
|
||||
|| data.starts_with(b"OPTIONS"))
|
||||
{
|
||||
return "HTTP";
|
||||
}
|
||||
@@ -49,6 +248,33 @@ fn detect_client_type(data: &[u8]) -> &'static str {
|
||||
"unknown"
|
||||
}
|
||||
|
||||
fn build_mask_proxy_header(
|
||||
version: u8,
|
||||
peer: SocketAddr,
|
||||
local_addr: SocketAddr,
|
||||
) -> Option<Vec<u8>> {
|
||||
match version {
|
||||
0 => None,
|
||||
2 => Some(
|
||||
ProxyProtocolV2Builder::new()
|
||||
.with_addrs(peer, local_addr)
|
||||
.build(),
|
||||
),
|
||||
_ => {
|
||||
let header = match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) => ProxyProtocolV1Builder::new()
|
||||
.tcp4(src.into(), dst.into())
|
||||
.build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) => ProxyProtocolV1Builder::new()
|
||||
.tcp6(src.into(), dst.into())
|
||||
.build(),
|
||||
_ => ProxyProtocolV1Builder::new().build(),
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle a bad client by forwarding to mask host
|
||||
pub async fn handle_bad_client<R, W>(
|
||||
reader: R,
|
||||
@@ -58,8 +284,7 @@ pub async fn handle_bad_client<R, W>(
|
||||
local_addr: SocketAddr,
|
||||
config: &ProxyConfig,
|
||||
beobachten: &BeobachtenStore,
|
||||
)
|
||||
where
|
||||
) where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
{
|
||||
@@ -71,13 +296,15 @@ where
|
||||
|
||||
if !config.censorship.mask {
|
||||
// Masking disabled, just consume data
|
||||
consume_client_data(reader).await;
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
return;
|
||||
}
|
||||
|
||||
// Connect via Unix socket or TCP
|
||||
#[cfg(unix)]
|
||||
if let Some(ref sock_path) = config.censorship.mask_unix_sock {
|
||||
let outcome_started = Instant::now();
|
||||
let connect_started = Instant::now();
|
||||
debug!(
|
||||
client_type = client_type,
|
||||
sock = %sock_path,
|
||||
@@ -89,45 +316,59 @@ where
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
let proxy_header = build_mask_proxy_header(
|
||||
config.censorship.mask_proxy_protocol,
|
||||
peer,
|
||||
local_addr,
|
||||
);
|
||||
if let Some(header) = proxy_header
|
||||
&& !write_proxy_header_with_timeout(&mut mask_write, &header).await
|
||||
{
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
return;
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
if timeout(
|
||||
MASK_RELAY_TIMEOUT,
|
||||
relay_to_mask(
|
||||
reader,
|
||||
writer,
|
||||
mask_read,
|
||||
mask_write,
|
||||
initial_data,
|
||||
config.censorship.mask_shape_hardening,
|
||||
config.censorship.mask_shape_bucket_floor_bytes,
|
||||
config.censorship.mask_shape_bucket_cap_bytes,
|
||||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
),
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
debug!("Mask relay timed out (unix socket)");
|
||||
}
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask unix socket");
|
||||
consume_client_data(reader).await;
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask unix socket");
|
||||
consume_client_data(reader).await;
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
let mask_host = config.censorship.mask_host.as_deref()
|
||||
let mask_host = config
|
||||
.censorship
|
||||
.mask_host
|
||||
.as_deref()
|
||||
.unwrap_or(&config.censorship.tls_domain);
|
||||
let mask_port = config.censorship.mask_port;
|
||||
|
||||
@@ -143,44 +384,54 @@ where
|
||||
let mask_addr = resolve_socket_addr(mask_host, mask_port)
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| format!("{}:{}", mask_host, mask_port));
|
||||
let outcome_started = Instant::now();
|
||||
let connect_started = Instant::now();
|
||||
let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await;
|
||||
match connect_result {
|
||||
Ok(Ok(stream)) => {
|
||||
let proxy_header: Option<Vec<u8>> = match config.censorship.mask_proxy_protocol {
|
||||
0 => None,
|
||||
version => {
|
||||
let header = match version {
|
||||
2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(),
|
||||
_ => match (peer, local_addr) {
|
||||
(SocketAddr::V4(src), SocketAddr::V4(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(),
|
||||
(SocketAddr::V6(src), SocketAddr::V6(dst)) =>
|
||||
ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(),
|
||||
_ =>
|
||||
ProxyProtocolV1Builder::new().build(),
|
||||
},
|
||||
};
|
||||
Some(header)
|
||||
}
|
||||
};
|
||||
let proxy_header =
|
||||
build_mask_proxy_header(config.censorship.mask_proxy_protocol, peer, local_addr);
|
||||
|
||||
let (mask_read, mut mask_write) = stream.into_split();
|
||||
if let Some(header) = proxy_header {
|
||||
if mask_write.write_all(&header).await.is_err() {
|
||||
return;
|
||||
}
|
||||
if let Some(header) = proxy_header
|
||||
&& !write_proxy_header_with_timeout(&mut mask_write, &header).await
|
||||
{
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
return;
|
||||
}
|
||||
if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() {
|
||||
if timeout(
|
||||
MASK_RELAY_TIMEOUT,
|
||||
relay_to_mask(
|
||||
reader,
|
||||
writer,
|
||||
mask_read,
|
||||
mask_write,
|
||||
initial_data,
|
||||
config.censorship.mask_shape_hardening,
|
||||
config.censorship.mask_shape_bucket_floor_bytes,
|
||||
config.censorship.mask_shape_bucket_cap_bytes,
|
||||
config.censorship.mask_shape_above_cap_blur,
|
||||
config.censorship.mask_shape_above_cap_blur_max_bytes,
|
||||
config.censorship.mask_shape_hardening_aggressive_mode,
|
||||
),
|
||||
)
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
debug!("Mask relay timed out");
|
||||
}
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
wait_mask_connect_budget_if_needed(connect_started, config).await;
|
||||
debug!(error = %e, "Failed to connect to mask host");
|
||||
consume_client_data(reader).await;
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Timeout connecting to mask host");
|
||||
consume_client_data(reader).await;
|
||||
consume_client_data_with_timeout(reader).await;
|
||||
wait_mask_outcome_budget(outcome_started, config).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -192,8 +443,13 @@ async fn relay_to_mask<R, W, MR, MW>(
|
||||
mut mask_read: MR,
|
||||
mut mask_write: MW,
|
||||
initial_data: &[u8],
|
||||
)
|
||||
where
|
||||
shape_hardening_enabled: bool,
|
||||
shape_bucket_floor_bytes: usize,
|
||||
shape_bucket_cap_bytes: usize,
|
||||
shape_above_cap_blur: bool,
|
||||
shape_above_cap_blur_max_bytes: usize,
|
||||
shape_hardening_aggressive_mode: bool,
|
||||
) where
|
||||
R: AsyncRead + Unpin + Send + 'static,
|
||||
W: AsyncWrite + Unpin + Send + 'static,
|
||||
MR: AsyncRead + Unpin + Send + 'static,
|
||||
@@ -203,47 +459,36 @@ where
|
||||
if mask_write.write_all(initial_data).await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Relay traffic
|
||||
let c2m = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
loop {
|
||||
match reader.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => {
|
||||
let _ = mask_write.shutdown().await;
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
if mask_write.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let m2c = tokio::spawn(async move {
|
||||
let mut buf = vec![0u8; MASK_BUFFER_SIZE];
|
||||
loop {
|
||||
match mask_read.read(&mut buf).await {
|
||||
Ok(0) | Err(_) => {
|
||||
let _ = writer.shutdown().await;
|
||||
break;
|
||||
}
|
||||
Ok(n) => {
|
||||
if writer.write_all(&buf[..n]).await.is_err() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Wait for either to complete
|
||||
tokio::select! {
|
||||
_ = c2m => {}
|
||||
_ = m2c => {}
|
||||
if mask_write.flush().await.is_err() {
|
||||
return;
|
||||
}
|
||||
|
||||
let (upstream_copy, downstream_copy) = tokio::join!(
|
||||
async { copy_with_idle_timeout(&mut reader, &mut mask_write).await },
|
||||
async { copy_with_idle_timeout(&mut mask_read, &mut writer).await }
|
||||
);
|
||||
|
||||
let total_sent = initial_data.len().saturating_add(upstream_copy.total);
|
||||
|
||||
let should_shape = shape_hardening_enabled
|
||||
&& !initial_data.is_empty()
|
||||
&& (upstream_copy.ended_by_eof
|
||||
|| (shape_hardening_aggressive_mode && downstream_copy.total == 0));
|
||||
|
||||
maybe_write_shape_padding(
|
||||
&mut mask_write,
|
||||
total_sent,
|
||||
should_shape,
|
||||
shape_bucket_floor_bytes,
|
||||
shape_bucket_cap_bytes,
|
||||
shape_above_cap_blur,
|
||||
shape_above_cap_blur_max_bytes,
|
||||
shape_hardening_aggressive_mode,
|
||||
)
|
||||
.await;
|
||||
|
||||
let _ = mask_write.shutdown().await;
|
||||
let _ = writer.shutdown().await;
|
||||
}
|
||||
|
||||
/// Just consume all data from client without responding
|
||||
@@ -255,3 +500,51 @@ async fn consume_client_data<R: AsyncRead + Unpin>(mut reader: R) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_adversarial_tests.rs"]
|
||||
mod adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_hardening_adversarial_tests.rs"]
|
||||
mod masking_shape_hardening_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_above_cap_blur_security_tests.rs"]
|
||||
mod masking_shape_above_cap_blur_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_normalization_security_tests.rs"]
|
||||
mod masking_timing_normalization_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_ab_envelope_blur_integration_security_tests.rs"]
|
||||
mod masking_ab_envelope_blur_integration_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_guard_security_tests.rs"]
|
||||
mod masking_shape_guard_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_guard_adversarial_tests.rs"]
|
||||
mod masking_shape_guard_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_classifier_resistance_adversarial_tests.rs"]
|
||||
mod masking_shape_classifier_resistance_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_shape_bypass_blackhat_tests.rs"]
|
||||
mod masking_shape_bypass_blackhat_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_aggressive_mode_security_tests.rs"]
|
||||
mod masking_aggressive_mode_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/masking_timing_sidechannel_redteam_expected_fail_tests.rs"]
|
||||
mod masking_timing_sidechannel_redteam_expected_fail_tests;
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,12 +1,72 @@
|
||||
//! Proxy Defs
|
||||
|
||||
// Apply strict linting to proxy production code while keeping test builds noise-tolerant.
|
||||
#![cfg_attr(test, allow(warnings))]
|
||||
#![cfg_attr(not(test), forbid(clippy::undocumented_unsafe_blocks))]
|
||||
#![cfg_attr(
|
||||
not(test),
|
||||
deny(
|
||||
clippy::unwrap_used,
|
||||
clippy::expect_used,
|
||||
clippy::panic,
|
||||
clippy::todo,
|
||||
clippy::unimplemented,
|
||||
clippy::correctness,
|
||||
clippy::option_if_let_else,
|
||||
clippy::or_fun_call,
|
||||
clippy::branches_sharing_code,
|
||||
clippy::single_option_map,
|
||||
clippy::useless_let_if_seq,
|
||||
clippy::redundant_locals,
|
||||
clippy::cloned_ref_to_slice_refs,
|
||||
unsafe_code,
|
||||
clippy::await_holding_lock,
|
||||
clippy::await_holding_refcell_ref,
|
||||
clippy::debug_assert_with_mut_call,
|
||||
clippy::macro_use_imports,
|
||||
clippy::cast_ptr_alignment,
|
||||
clippy::cast_lossless,
|
||||
clippy::ptr_as_ptr,
|
||||
clippy::large_stack_arrays,
|
||||
clippy::same_functions_in_if_condition,
|
||||
trivial_casts,
|
||||
trivial_numeric_casts,
|
||||
unused_extern_crates,
|
||||
unused_import_braces,
|
||||
rust_2018_idioms
|
||||
)
|
||||
)]
|
||||
#![cfg_attr(
|
||||
not(test),
|
||||
allow(
|
||||
clippy::use_self,
|
||||
clippy::redundant_closure,
|
||||
clippy::too_many_arguments,
|
||||
clippy::doc_markdown,
|
||||
clippy::missing_const_for_fn,
|
||||
clippy::unnecessary_operation,
|
||||
clippy::redundant_pub_crate,
|
||||
clippy::derive_partial_eq_without_eq,
|
||||
clippy::type_complexity,
|
||||
clippy::new_ret_no_self,
|
||||
clippy::cast_possible_truncation,
|
||||
clippy::cast_possible_wrap,
|
||||
clippy::significant_drop_tightening,
|
||||
clippy::significant_drop_in_scrutinee,
|
||||
clippy::float_cmp,
|
||||
clippy::nursery
|
||||
)
|
||||
)]
|
||||
|
||||
pub mod adaptive_buffers;
|
||||
pub mod client;
|
||||
pub mod direct_relay;
|
||||
pub mod handshake;
|
||||
pub mod masking;
|
||||
pub mod middle_relay;
|
||||
pub mod route_mode;
|
||||
pub mod relay;
|
||||
pub mod route_mode;
|
||||
pub mod session_eviction;
|
||||
|
||||
pub use client::ClientHandler;
|
||||
#[allow(unused_imports)]
|
||||
|
||||
@@ -51,20 +51,19 @@
|
||||
//! - `poll_write` on client = S→C (to client) → `octets_to`, `msgs_to`
|
||||
//! - `SharedCounters` (atomics) let the watchdog read stats without locking
|
||||
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{
|
||||
AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional_with_sizes,
|
||||
};
|
||||
use tokio::time::Instant;
|
||||
use tracing::{debug, trace, warn};
|
||||
use crate::error::Result;
|
||||
use crate::error::{ProxyError, Result};
|
||||
use crate::stats::Stats;
|
||||
use crate::stream::BufferPool;
|
||||
use dashmap::DashMap;
|
||||
use std::io;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadBuf, copy_bidirectional_with_sizes};
|
||||
use tokio::time::Instant;
|
||||
use tracing::{debug, trace, warn};
|
||||
|
||||
// ============= Constants =============
|
||||
|
||||
@@ -80,6 +79,11 @@ const ACTIVITY_TIMEOUT: Duration = Duration::from_secs(1800);
|
||||
/// without measurable overhead from atomic reads.
|
||||
const WATCHDOG_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
#[inline]
|
||||
fn watchdog_delta(current: u64, previous: u64) -> u64 {
|
||||
current.saturating_sub(previous)
|
||||
}
|
||||
|
||||
// ============= CombinedStream =============
|
||||
|
||||
/// Combines separate read and write halves into a single bidirectional stream.
|
||||
@@ -205,6 +209,12 @@ struct StatsIo<S> {
|
||||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
quota_limit: Option<u64>,
|
||||
quota_exceeded: Arc<AtomicBool>,
|
||||
quota_read_wake_scheduled: bool,
|
||||
quota_write_wake_scheduled: bool,
|
||||
quota_read_retry_active: Arc<AtomicBool>,
|
||||
quota_write_retry_active: Arc<AtomicBool>,
|
||||
epoch: Instant,
|
||||
}
|
||||
|
||||
@@ -214,11 +224,136 @@ impl<S> StatsIo<S> {
|
||||
counters: Arc<SharedCounters>,
|
||||
stats: Arc<Stats>,
|
||||
user: String,
|
||||
quota_limit: Option<u64>,
|
||||
quota_exceeded: Arc<AtomicBool>,
|
||||
epoch: Instant,
|
||||
) -> Self {
|
||||
// Mark initial activity so the watchdog doesn't fire before data flows
|
||||
counters.touch(Instant::now(), epoch);
|
||||
Self { inner, counters, stats, user, epoch }
|
||||
Self {
|
||||
inner,
|
||||
counters,
|
||||
stats,
|
||||
user,
|
||||
quota_limit,
|
||||
quota_exceeded,
|
||||
quota_read_wake_scheduled: false,
|
||||
quota_write_wake_scheduled: false,
|
||||
quota_read_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
quota_write_retry_active: Arc::new(AtomicBool::new(false)),
|
||||
epoch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> Drop for StatsIo<S> {
|
||||
fn drop(&mut self) {
|
||||
self.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
self.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct QuotaIoSentinel;
|
||||
|
||||
impl std::fmt::Display for QuotaIoSentinel {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str("user data quota exceeded")
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for QuotaIoSentinel {}
|
||||
|
||||
fn quota_io_error() -> io::Error {
|
||||
io::Error::new(io::ErrorKind::PermissionDenied, QuotaIoSentinel)
|
||||
}
|
||||
|
||||
fn is_quota_io_error(err: &io::Error) -> bool {
|
||||
err.kind() == io::ErrorKind::PermissionDenied
|
||||
&& err
|
||||
.get_ref()
|
||||
.and_then(|source| source.downcast_ref::<QuotaIoSentinel>())
|
||||
.is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(1);
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_CONTENTION_RETRY_INTERVAL: Duration = Duration::from_millis(2);
|
||||
|
||||
fn spawn_quota_retry_waker(retry_active: Arc<AtomicBool>, waker: std::task::Waker) {
|
||||
tokio::task::spawn(async move {
|
||||
loop {
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
tokio::time::sleep(QUOTA_CONTENTION_RETRY_INTERVAL).await;
|
||||
if !retry_active.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
waker.wake_by_ref();
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
static QUOTA_USER_LOCKS: OnceLock<DashMap<String, Arc<Mutex<()>>>> = OnceLock::new();
|
||||
static QUOTA_USER_OVERFLOW_LOCKS: OnceLock<Vec<Arc<Mutex<()>>>> = OnceLock::new();
|
||||
|
||||
#[cfg(test)]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 64;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_USER_LOCKS_MAX: usize = 4_096;
|
||||
#[cfg(test)]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 16;
|
||||
#[cfg(not(test))]
|
||||
const QUOTA_OVERFLOW_LOCK_STRIPES: usize = 256;
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_guard() -> &'static Mutex<()> {
|
||||
static TEST_LOCK: OnceLock<Mutex<()>> = OnceLock::new();
|
||||
TEST_LOCK.get_or_init(|| Mutex::new(()))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn quota_user_lock_test_scope() -> std::sync::MutexGuard<'static, ()> {
|
||||
quota_user_lock_test_guard()
|
||||
.lock()
|
||||
.unwrap_or_else(|poisoned| poisoned.into_inner())
|
||||
}
|
||||
|
||||
fn quota_overflow_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let stripes = QUOTA_USER_OVERFLOW_LOCKS.get_or_init(|| {
|
||||
(0..QUOTA_OVERFLOW_LOCK_STRIPES)
|
||||
.map(|_| Arc::new(Mutex::new(())))
|
||||
.collect()
|
||||
});
|
||||
|
||||
let hash = crc32fast::hash(user.as_bytes()) as usize;
|
||||
Arc::clone(&stripes[hash % stripes.len()])
|
||||
}
|
||||
|
||||
fn quota_user_lock(user: &str) -> Arc<Mutex<()>> {
|
||||
let locks = QUOTA_USER_LOCKS.get_or_init(DashMap::new);
|
||||
if let Some(existing) = locks.get(user) {
|
||||
return Arc::clone(existing.value());
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
locks.retain(|_, value| Arc::strong_count(value) > 1);
|
||||
}
|
||||
|
||||
if locks.len() >= QUOTA_USER_LOCKS_MAX {
|
||||
return quota_overflow_user_lock(user);
|
||||
}
|
||||
|
||||
let created = Arc::new(Mutex::new(()));
|
||||
match locks.entry(user.to_string()) {
|
||||
dashmap::mapref::entry::Entry::Occupied(entry) => Arc::clone(entry.get()),
|
||||
dashmap::mapref::entry::Entry::Vacant(entry) => {
|
||||
entry.insert(Arc::clone(&created));
|
||||
created
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,20 +364,82 @@ impl<S: AsyncRead + Unpin> AsyncRead for StatsIo<S> {
|
||||
buf: &mut ReadBuf<'_>,
|
||||
) -> Poll<io::Result<()>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_read_wake_scheduled = false;
|
||||
this.quota_read_retry_active.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_read_wake_scheduled {
|
||||
this.quota_read_wake_scheduled = true;
|
||||
this.quota_read_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_read_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
let before = buf.filled().len();
|
||||
|
||||
match Pin::new(&mut this.inner).poll_read(cx, buf) {
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = buf.filled().len() - before;
|
||||
if n > 0 {
|
||||
let mut reached_quota_boundary = false;
|
||||
if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let remaining = limit - used;
|
||||
if (n as u64) > remaining {
|
||||
// Fail closed: when a single read chunk would cross quota,
|
||||
// stop relay immediately without accounting beyond the cap.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
reached_quota_boundary = (n as u64) == remaining;
|
||||
}
|
||||
|
||||
// C→S: client sent data
|
||||
this.counters.c2s_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters
|
||||
.c2s_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters.c2s_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_from(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_from(&this.user);
|
||||
|
||||
if reached_quota_boundary {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
trace!(user = %this.user, bytes = n, "C->S");
|
||||
}
|
||||
Poll::Ready(Ok(()))
|
||||
@@ -259,18 +456,77 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
||||
buf: &[u8],
|
||||
) -> Poll<io::Result<usize>> {
|
||||
let this = self.get_mut();
|
||||
if this.quota_exceeded.load(Ordering::Relaxed) {
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
match Pin::new(&mut this.inner).poll_write(cx, buf) {
|
||||
let quota_lock = this
|
||||
.quota_limit
|
||||
.is_some()
|
||||
.then(|| quota_user_lock(&this.user));
|
||||
let _quota_guard = if let Some(lock) = quota_lock.as_ref() {
|
||||
match lock.try_lock() {
|
||||
Ok(guard) => {
|
||||
this.quota_write_wake_scheduled = false;
|
||||
this.quota_write_retry_active
|
||||
.store(false, Ordering::Relaxed);
|
||||
Some(guard)
|
||||
}
|
||||
Err(_) => {
|
||||
if !this.quota_write_wake_scheduled {
|
||||
this.quota_write_wake_scheduled = true;
|
||||
this.quota_write_retry_active.store(true, Ordering::Relaxed);
|
||||
spawn_quota_retry_waker(
|
||||
Arc::clone(&this.quota_write_retry_active),
|
||||
cx.waker().clone(),
|
||||
);
|
||||
}
|
||||
return Poll::Pending;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let write_buf = if let Some(limit) = this.quota_limit {
|
||||
let used = this.stats.get_user_total_octets(&this.user);
|
||||
if used >= limit {
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
let remaining = (limit - used) as usize;
|
||||
if buf.len() > remaining {
|
||||
// Fail closed: do not emit partial S->C payload when remaining
|
||||
// quota cannot accommodate the pending write request.
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
buf
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
|
||||
match Pin::new(&mut this.inner).poll_write(cx, write_buf) {
|
||||
Poll::Ready(Ok(n)) => {
|
||||
if n > 0 {
|
||||
// S→C: data written to client
|
||||
this.counters.s2c_bytes.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters
|
||||
.s2c_bytes
|
||||
.fetch_add(n as u64, Ordering::Relaxed);
|
||||
this.counters.s2c_ops.fetch_add(1, Ordering::Relaxed);
|
||||
this.counters.touch(Instant::now(), this.epoch);
|
||||
|
||||
this.stats.add_user_octets_to(&this.user, n as u64);
|
||||
this.stats.increment_user_msgs_to(&this.user);
|
||||
|
||||
if let Some(limit) = this.quota_limit
|
||||
&& this.stats.get_user_total_octets(&this.user) >= limit
|
||||
{
|
||||
this.quota_exceeded.store(true, Ordering::Relaxed);
|
||||
return Poll::Ready(Err(quota_io_error()));
|
||||
}
|
||||
|
||||
trace!(user = %this.user, bytes = n, "S->C");
|
||||
}
|
||||
Poll::Ready(Ok(n))
|
||||
@@ -307,7 +563,8 @@ impl<S: AsyncWrite + Unpin> AsyncWrite for StatsIo<S> {
|
||||
/// - Per-user stats: bytes and ops counted per direction
|
||||
/// - Periodic rate logging: every 10 seconds when active
|
||||
/// - Clean shutdown: both write sides are shut down on exit
|
||||
/// - Error propagation: I/O errors are returned as `ProxyError::Io`
|
||||
/// - Error propagation: quota exits return `ProxyError::DataQuotaExceeded`,
|
||||
/// other I/O failures are returned as `ProxyError::Io`
|
||||
pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
||||
client_reader: CR,
|
||||
client_writer: CW,
|
||||
@@ -317,6 +574,7 @@ pub async fn relay_bidirectional<CR, CW, SR, SW>(
|
||||
s2c_buf_size: usize,
|
||||
user: &str,
|
||||
stats: Arc<Stats>,
|
||||
quota_limit: Option<u64>,
|
||||
_buffer_pool: Arc<BufferPool>,
|
||||
) -> Result<()>
|
||||
where
|
||||
@@ -327,6 +585,7 @@ where
|
||||
{
|
||||
let epoch = Instant::now();
|
||||
let counters = Arc::new(SharedCounters::new());
|
||||
let quota_exceeded = Arc::new(AtomicBool::new(false));
|
||||
let user_owned = user.to_string();
|
||||
|
||||
// ── Combine split halves into bidirectional streams ──────────────
|
||||
@@ -339,12 +598,15 @@ where
|
||||
Arc::clone(&counters),
|
||||
Arc::clone(&stats),
|
||||
user_owned.clone(),
|
||||
quota_limit,
|
||||
Arc::clone("a_exceeded),
|
||||
epoch,
|
||||
);
|
||||
|
||||
// ── Watchdog: activity timeout + periodic rate logging ──────────
|
||||
let wd_counters = Arc::clone(&counters);
|
||||
let wd_user = user_owned.clone();
|
||||
let wd_quota_exceeded = Arc::clone("a_exceeded);
|
||||
|
||||
let watchdog = async {
|
||||
let mut prev_c2s: u64 = 0;
|
||||
@@ -356,6 +618,11 @@ where
|
||||
let now = Instant::now();
|
||||
let idle = wd_counters.idle_duration(now, epoch);
|
||||
|
||||
if wd_quota_exceeded.load(Ordering::Relaxed) {
|
||||
warn!(user = %wd_user, "User data quota reached, closing relay");
|
||||
return;
|
||||
}
|
||||
|
||||
// ── Activity timeout ────────────────────────────────────
|
||||
if idle >= ACTIVITY_TIMEOUT {
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
@@ -373,8 +640,8 @@ where
|
||||
// ── Periodic rate logging ───────────────────────────────
|
||||
let c2s = wd_counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = wd_counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
let c2s_delta = c2s - prev_c2s;
|
||||
let s2c_delta = s2c - prev_s2c;
|
||||
let c2s_delta = watchdog_delta(c2s, prev_c2s);
|
||||
let s2c_delta = watchdog_delta(s2c, prev_s2c);
|
||||
|
||||
if c2s_delta > 0 || s2c_delta > 0 {
|
||||
let secs = WATCHDOG_INTERVAL.as_secs_f64();
|
||||
@@ -439,6 +706,22 @@ where
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
Some(Err(e)) if is_quota_io_error(&e) => {
|
||||
let c2s = counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
let s2c = counters.s2c_bytes.load(Ordering::Relaxed);
|
||||
warn!(
|
||||
user = %user_owned,
|
||||
c2s_bytes = c2s,
|
||||
s2c_bytes = s2c,
|
||||
c2s_msgs = c2s_ops,
|
||||
s2c_msgs = s2c_ops,
|
||||
duration_secs = duration.as_secs(),
|
||||
"Data quota reached, closing relay"
|
||||
);
|
||||
Err(ProxyError::DataQuotaExceeded {
|
||||
user: user_owned.clone(),
|
||||
})
|
||||
}
|
||||
Some(Err(e)) => {
|
||||
// I/O error in one of the directions
|
||||
let c2s = counters.c2s_bytes.load(Ordering::Relaxed);
|
||||
@@ -472,3 +755,39 @@ where
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_adversarial_tests.rs"]
|
||||
mod adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_lock_pressure_adversarial_tests.rs"]
|
||||
mod relay_quota_lock_pressure_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_boundary_blackhat_tests.rs"]
|
||||
mod relay_quota_boundary_blackhat_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_model_adversarial_tests.rs"]
|
||||
mod relay_quota_model_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_overflow_regression_tests.rs"]
|
||||
mod relay_quota_overflow_regression_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_watchdog_delta_security_tests.rs"]
|
||||
mod relay_watchdog_delta_security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_waker_storm_adversarial_tests.rs"]
|
||||
mod relay_quota_waker_storm_adversarial_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/relay_quota_wake_liveness_regression_tests.rs"]
|
||||
mod relay_quota_wake_liveness_regression_tests;
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU8, AtomicU64, Ordering};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use tokio::sync::watch;
|
||||
|
||||
pub(crate) const ROUTE_SWITCH_ERROR_MSG: &str = "Route mode switched by cutover";
|
||||
pub(crate) const ROUTE_SWITCH_ERROR_MSG: &str = "Session terminated";
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
@@ -14,17 +14,6 @@ pub(crate) enum RelayRouteMode {
|
||||
}
|
||||
|
||||
impl RelayRouteMode {
|
||||
pub(crate) fn as_u8(self) -> u8 {
|
||||
self as u8
|
||||
}
|
||||
|
||||
pub(crate) fn from_u8(value: u8) -> Self {
|
||||
match value {
|
||||
1 => Self::Middle,
|
||||
_ => Self::Direct,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
Self::Direct => "direct",
|
||||
@@ -41,8 +30,6 @@ pub(crate) struct RouteCutoverState {
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct RouteRuntimeController {
|
||||
mode: Arc<AtomicU8>,
|
||||
generation: Arc<AtomicU64>,
|
||||
direct_since_epoch_secs: Arc<AtomicU64>,
|
||||
tx: watch::Sender<RouteCutoverState>,
|
||||
}
|
||||
@@ -60,18 +47,13 @@ impl RouteRuntimeController {
|
||||
0
|
||||
};
|
||||
Self {
|
||||
mode: Arc::new(AtomicU8::new(initial_mode.as_u8())),
|
||||
generation: Arc::new(AtomicU64::new(0)),
|
||||
direct_since_epoch_secs: Arc::new(AtomicU64::new(direct_since_epoch_secs)),
|
||||
tx,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn snapshot(&self) -> RouteCutoverState {
|
||||
RouteCutoverState {
|
||||
mode: RelayRouteMode::from_u8(self.mode.load(Ordering::Relaxed)),
|
||||
generation: self.generation.load(Ordering::Relaxed),
|
||||
}
|
||||
*self.tx.borrow()
|
||||
}
|
||||
|
||||
pub(crate) fn subscribe(&self) -> watch::Receiver<RouteCutoverState> {
|
||||
@@ -84,20 +66,28 @@ impl RouteRuntimeController {
|
||||
}
|
||||
|
||||
pub(crate) fn set_mode(&self, mode: RelayRouteMode) -> Option<RouteCutoverState> {
|
||||
let previous = self.mode.swap(mode.as_u8(), Ordering::Relaxed);
|
||||
if previous == mode.as_u8() {
|
||||
let mut next = None;
|
||||
let changed = self.tx.send_if_modified(|state| {
|
||||
if state.mode == mode {
|
||||
return false;
|
||||
}
|
||||
if matches!(mode, RelayRouteMode::Direct) {
|
||||
self.direct_since_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
} else {
|
||||
self.direct_since_epoch_secs.store(0, Ordering::Relaxed);
|
||||
}
|
||||
state.mode = mode;
|
||||
state.generation = state.generation.saturating_add(1);
|
||||
next = Some(*state);
|
||||
true
|
||||
});
|
||||
|
||||
if !changed {
|
||||
return None;
|
||||
}
|
||||
if matches!(mode, RelayRouteMode::Direct) {
|
||||
self.direct_since_epoch_secs
|
||||
.store(now_epoch_secs(), Ordering::Relaxed);
|
||||
} else {
|
||||
self.direct_since_epoch_secs.store(0, Ordering::Relaxed);
|
||||
}
|
||||
let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1;
|
||||
let next = RouteCutoverState { mode, generation };
|
||||
self.tx.send_replace(next);
|
||||
Some(next)
|
||||
|
||||
next
|
||||
}
|
||||
}
|
||||
|
||||
@@ -110,10 +100,10 @@ fn now_epoch_secs() -> u64 {
|
||||
|
||||
pub(crate) fn is_session_affected_by_cutover(
|
||||
current: RouteCutoverState,
|
||||
_session_mode: RelayRouteMode,
|
||||
session_mode: RelayRouteMode,
|
||||
session_generation: u64,
|
||||
) -> bool {
|
||||
current.generation > session_generation
|
||||
current.generation > session_generation && current.mode != session_mode
|
||||
}
|
||||
|
||||
pub(crate) fn affected_cutover_state(
|
||||
@@ -129,9 +119,7 @@ pub(crate) fn affected_cutover_state(
|
||||
}
|
||||
|
||||
pub(crate) fn cutover_stagger_delay(session_id: u64, generation: u64) -> Duration {
|
||||
let mut value = session_id
|
||||
^ generation.rotate_left(17)
|
||||
^ 0x9e37_79b9_7f4a_7c15;
|
||||
let mut value = session_id ^ generation.rotate_left(17) ^ 0x9e37_79b9_7f4a_7c15;
|
||||
value ^= value >> 30;
|
||||
value = value.wrapping_mul(0xbf58_476d_1ce4_e5b9);
|
||||
value ^= value >> 27;
|
||||
@@ -140,3 +128,11 @@ pub(crate) fn cutover_stagger_delay(session_id: u64, generation: u64) -> Duratio
|
||||
let ms = 1000 + (value % 1000);
|
||||
Duration::from_millis(ms)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/route_mode_security_tests.rs"]
|
||||
mod security_tests;
|
||||
|
||||
#[cfg(test)]
|
||||
#[path = "tests/route_mode_coherence_adversarial_tests.rs"]
|
||||
mod coherence_adversarial_tests;
|
||||
|
||||
48
src/proxy/session_eviction.rs
Normal file
48
src/proxy/session_eviction.rs
Normal file
@@ -0,0 +1,48 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
/// Session eviction is intentionally disabled in runtime.
|
||||
///
|
||||
/// The initial `user+dc` single-lease model caused valid parallel client
|
||||
/// connections to evict each other. Keep the API shape for compatibility,
|
||||
/// but make it a no-op until a safer policy is introduced.
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SessionLease;
|
||||
|
||||
impl SessionLease {
|
||||
pub fn is_stale(&self) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn release(&self) {}
|
||||
}
|
||||
|
||||
pub struct RegistrationResult {
|
||||
pub lease: SessionLease,
|
||||
pub replaced_existing: bool,
|
||||
}
|
||||
|
||||
pub fn register_session(_user: &str, _dc_idx: i16) -> RegistrationResult {
|
||||
RegistrationResult {
|
||||
lease: SessionLease,
|
||||
replaced_existing: false,
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_session_eviction_disabled_behavior() {
|
||||
let first = register_session("alice", 2);
|
||||
let second = register_session("alice", 2);
|
||||
assert!(!first.replaced_existing);
|
||||
assert!(!second.replaced_existing);
|
||||
assert!(!first.lease.is_stale());
|
||||
assert!(!second.lease.is_stale());
|
||||
first.lease.release();
|
||||
second.lease.release();
|
||||
}
|
||||
}
|
||||
714
src/proxy/tests/client_adversarial_tests.rs
Normal file
714
src/proxy/tests/client_adversarial_tests.rs
Normal file
@@ -0,0 +1,714 @@
|
||||
use super::*;
|
||||
use crate::config::ProxyConfig;
|
||||
use crate::error::ProxyError;
|
||||
use crate::ip_tracker::UserIpTracker;
|
||||
use crate::stats::Stats;
|
||||
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Priority 3: Massive Concurrency Stress (OWASP ASVS 5.1.6)
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_stress_10k_connections_limit_strict() {
|
||||
let user = "stress-user";
|
||||
let limit = 512;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), limit);
|
||||
|
||||
let iterations = 1000;
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for i in 0..iterations {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
let user_str = user.to_string();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(127, 0, 0, (i % 254 + 1) as u8)),
|
||||
10000 + (i % 1000) as u16,
|
||||
);
|
||||
|
||||
match RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
&user_str, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(res) => Ok(res),
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => Err(()),
|
||||
Err(e) => panic!("Unexpected error: {:?}", e),
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
let results = futures::future::join_all(tasks).await;
|
||||
let mut successes = 0;
|
||||
let mut failures = 0;
|
||||
let mut reservations = Vec::new();
|
||||
|
||||
for res in results {
|
||||
match res.unwrap() {
|
||||
Ok(r) => {
|
||||
successes += 1;
|
||||
reservations.push(r);
|
||||
}
|
||||
Err(_) => failures += 1,
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(successes, limit, "Should allow exactly 'limit' connections");
|
||||
assert_eq!(
|
||||
failures,
|
||||
iterations - limit,
|
||||
"Should fail the rest with LimitExceeded"
|
||||
);
|
||||
assert_eq!(stats.get_user_curr_connects(user), limit as u64);
|
||||
|
||||
drop(reservations);
|
||||
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert_eq!(
|
||||
stats.get_user_curr_connects(user),
|
||||
0,
|
||||
"Stats must converge to 0 after all drops"
|
||||
);
|
||||
assert_eq!(
|
||||
ip_tracker.get_active_ip_count(user).await,
|
||||
0,
|
||||
"IP tracker must converge to 0"
|
||||
);
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// Priority 3: IP Tracker Race Stress
|
||||
// ------------------------------------------------------------------
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_ip_tracker_race_condition_stress() {
|
||||
let user = "race-user";
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 100).await;
|
||||
|
||||
let iterations = 1000;
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for i in 0..iterations {
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let ip = IpAddr::V4(Ipv4Addr::new(10, 0, 0, (i % 254 + 1) as u8));
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
for _ in 0..10 {
|
||||
if let Ok(()) = ip_tracker.check_and_add("race-user", ip).await {
|
||||
ip_tracker.remove_ip("race-user", ip).await;
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
futures::future::join_all(tasks).await;
|
||||
|
||||
assert_eq!(
|
||||
ip_tracker.get_active_ip_count(user).await,
|
||||
0,
|
||||
"IP count must be zero after balanced add/remove burst"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_limit_burst_peak_never_exceeds_cap() {
|
||||
let user = "peak-cap-user";
|
||||
let limit = 32;
|
||||
let attempts = 256;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), limit);
|
||||
|
||||
let peak = Arc::new(AtomicU64::new(0));
|
||||
let mut tasks = Vec::with_capacity(attempts);
|
||||
|
||||
for i in 0..attempts {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
let peak = Arc::clone(&peak);
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(203, 0, 113, (i % 250 + 1) as u8)),
|
||||
20000 + i as u16,
|
||||
);
|
||||
|
||||
let acquired = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(reservation) = acquired {
|
||||
let now = stats.get_user_curr_connects(user);
|
||||
loop {
|
||||
let prev = peak.load(Ordering::Relaxed);
|
||||
if now <= prev {
|
||||
break;
|
||||
}
|
||||
if peak
|
||||
.compare_exchange(prev, now, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(2)).await;
|
||||
drop(reservation);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
futures::future::join_all(tasks).await;
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert!(
|
||||
peak.load(Ordering::Relaxed) <= limit as u64,
|
||||
"peak concurrent reservations must not exceed configured cap"
|
||||
);
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_quota_rejection_never_mutates_live_counters() {
|
||||
let user = "quota-reject-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_data_quota.insert(user.to_string(), 0);
|
||||
|
||||
let peer: SocketAddr = "198.51.100.201:31111".parse().unwrap();
|
||||
let res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, Err(ProxyError::DataQuotaExceeded { .. })));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_expiration_rejection_never_mutates_live_counters() {
|
||||
let user = "expired-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_expirations.insert(
|
||||
user.to_string(),
|
||||
chrono::Utc::now() - chrono::Duration::seconds(1),
|
||||
);
|
||||
|
||||
let peer: SocketAddr = "198.51.100.202:31112".parse().unwrap();
|
||||
let res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(res, Err(ProxyError::UserExpired { .. })));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_ip_limit_failure_rolls_back_counter_exactly() {
|
||||
let user = "ip-limit-rollback-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 16);
|
||||
|
||||
let first_peer: SocketAddr = "198.51.100.203:31113".parse().unwrap();
|
||||
let first = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
first_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let second_peer: SocketAddr = "198.51.100.204:31114".parse().unwrap();
|
||||
let second = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
second_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
|
||||
assert!(matches!(
|
||||
second,
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. })
|
||||
));
|
||||
assert_eq!(stats.get_user_curr_connects(user), 1);
|
||||
|
||||
drop(first);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_parallel_limit_checks_success_path_leaves_no_residue() {
|
||||
let user = "parallel-check-success-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 128).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 128);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..128u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(10, 10, (i / 255) as u8, (i % 255 + 1) as u8)),
|
||||
32000 + i,
|
||||
);
|
||||
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
assert!(result.unwrap().is_ok());
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_parallel_limit_checks_failure_path_leaves_no_residue() {
|
||||
let user = "parallel-check-failure-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 0).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 512);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..64u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(172, 16, 0, (i % 250 + 1) as u8)),
|
||||
33000 + i,
|
||||
);
|
||||
RunningClientHandler::check_user_limits_static(user, &config, &stats, peer, &ip_tracker)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut _denied = 0usize;
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
match result.unwrap() {
|
||||
Ok(()) => {}
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => _denied += 1,
|
||||
Err(other) => panic!("unexpected error: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_churn_mixed_success_failure_converges_to_zero_state() {
|
||||
let user = "mixed-churn-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 4).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 8);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..200u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(192, 0, 2, (i % 16 + 1) as u8)),
|
||||
34000 + (i % 32),
|
||||
);
|
||||
let maybe_res = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await;
|
||||
|
||||
if let Ok(reservation) = maybe_res {
|
||||
tokio::time::sleep(Duration::from_millis((i % 3) as u64)).await;
|
||||
drop(reservation);
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
futures::future::join_all(tasks).await;
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_same_ip_parallel_attempts_allow_at_most_one_when_limit_is_one() {
|
||||
let user = "same-ip-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 1);
|
||||
|
||||
let peer: SocketAddr = "203.0.113.44:35555".parse().unwrap();
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for _ in 0..64 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut granted = 0usize;
|
||||
let mut reservations = Vec::new();
|
||||
for result in futures::future::join_all(tasks).await {
|
||||
match result.unwrap() {
|
||||
Ok(reservation) => {
|
||||
granted += 1;
|
||||
reservations.push(reservation);
|
||||
}
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => {}
|
||||
Err(other) => panic!("unexpected error: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
granted, 1,
|
||||
"only one reservation may be granted for same IP with limit=1"
|
||||
);
|
||||
drop(reservations);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_repeat_acquire_release_cycles_never_accumulate_state() {
|
||||
let user = "repeat-cycle-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 32).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 32);
|
||||
|
||||
for i in 0..500u16 {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(198, 18, (i / 250) as u8, (i % 250 + 1) as u8)),
|
||||
36000 + (i % 128),
|
||||
);
|
||||
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
drop(reservation);
|
||||
}
|
||||
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_multi_user_isolation_under_parallel_limit_exhaustion() {
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert("u1".to_string(), 8);
|
||||
config.access.user_max_tcp_conns.insert("u2".to_string(), 8);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..128u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let user = if i % 2 == 0 { "u1" } else { "u2" };
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(100, 64, (i / 64) as u8, (i % 64 + 1) as u8)),
|
||||
37000 + i,
|
||||
);
|
||||
RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user, &config, stats, peer, ip_tracker,
|
||||
)
|
||||
.await
|
||||
}));
|
||||
}
|
||||
|
||||
let mut u1_success = 0usize;
|
||||
let mut u2_success = 0usize;
|
||||
let mut reservations = Vec::new();
|
||||
for (idx, result) in futures::future::join_all(tasks)
|
||||
.await
|
||||
.into_iter()
|
||||
.enumerate()
|
||||
{
|
||||
let user = if idx % 2 == 0 { "u1" } else { "u2" };
|
||||
match result.unwrap() {
|
||||
Ok(reservation) => {
|
||||
if user == "u1" {
|
||||
u1_success += 1;
|
||||
} else {
|
||||
u2_success += 1;
|
||||
}
|
||||
reservations.push(reservation);
|
||||
}
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. }) => {}
|
||||
Err(other) => panic!("unexpected error: {other}"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(u1_success, 8, "u1 must get exactly its own configured cap");
|
||||
assert_eq!(u2_success, 8, "u2 must get exactly its own configured cap");
|
||||
|
||||
drop(reservations);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects("u1"), 0);
|
||||
assert_eq!(stats.get_user_curr_connects("u2"), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_limit_recovery_after_full_rejection_wave() {
|
||||
let user = "recover-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 1).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 1);
|
||||
|
||||
let first_peer: SocketAddr = "198.51.100.50:38001".parse().unwrap();
|
||||
let reservation = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
first_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for i in 0..64u16 {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(198, 51, 100, (i % 60 + 1) as u8)),
|
||||
38002 + i,
|
||||
);
|
||||
let denied = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(
|
||||
denied,
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. })
|
||||
));
|
||||
}
|
||||
|
||||
drop(reservation);
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
|
||||
let recovery_peer: SocketAddr = "198.51.100.200:38999".parse().unwrap();
|
||||
let recovered = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
recovery_peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(
|
||||
recovered.is_ok(),
|
||||
"capacity must recover after prior holder drops"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_dual_limit_cross_product_never_leaks_on_reject() {
|
||||
let user = "dual-limit-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 2).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.access.user_max_tcp_conns.insert(user.to_string(), 2);
|
||||
|
||||
let p1: SocketAddr = "203.0.113.10:39001".parse().unwrap();
|
||||
let p2: SocketAddr = "203.0.113.11:39002".parse().unwrap();
|
||||
let r1 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
p1,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
let r2 = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
p2,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
for i in 0..32u16 {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(203, 0, 113, (50 + i) as u8)),
|
||||
39010 + i,
|
||||
);
|
||||
let denied = RunningClientHandler::acquire_user_connection_reservation_static(
|
||||
user,
|
||||
&config,
|
||||
stats.clone(),
|
||||
peer,
|
||||
ip_tracker.clone(),
|
||||
)
|
||||
.await;
|
||||
assert!(matches!(
|
||||
denied,
|
||||
Err(ProxyError::ConnectionLimitExceeded { .. })
|
||||
));
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 2);
|
||||
drop((r1, r2));
|
||||
ip_tracker.drain_cleanup_queue().await;
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn client_check_user_limits_concurrent_churn_no_counter_drift() {
|
||||
let user = "check-drift-user";
|
||||
let stats = Arc::new(Stats::new());
|
||||
let ip_tracker = Arc::new(UserIpTracker::new());
|
||||
ip_tracker.set_user_limit(user, 64).await;
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config
|
||||
.access
|
||||
.user_max_tcp_conns
|
||||
.insert(user.to_string(), 64);
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..512u16 {
|
||||
let stats = Arc::clone(&stats);
|
||||
let ip_tracker = Arc::clone(&ip_tracker);
|
||||
let config = config.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let peer = SocketAddr::new(
|
||||
IpAddr::V4(Ipv4Addr::new(172, 20, (i / 255) as u8, (i % 255 + 1) as u8)),
|
||||
40000 + (i % 500),
|
||||
);
|
||||
let _ = RunningClientHandler::check_user_limits_static(
|
||||
user,
|
||||
&config,
|
||||
&stats,
|
||||
peer,
|
||||
&ip_tracker,
|
||||
)
|
||||
.await;
|
||||
}));
|
||||
}
|
||||
|
||||
for task in futures::future::join_all(tasks).await {
|
||||
task.unwrap();
|
||||
}
|
||||
|
||||
assert_eq!(stats.get_user_curr_connects(user), 0);
|
||||
assert_eq!(ip_tracker.get_active_ip_count(user).await, 0);
|
||||
}
|
||||
126
src/proxy/tests/client_beobachten_ttl_bounds_security_tests.rs
Normal file
126
src/proxy/tests/client_beobachten_ttl_bounds_security_tests.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
use super::*;
|
||||
|
||||
const BEOBACHTEN_TTL_MAX_MINUTES: u64 = 24 * 60;
|
||||
|
||||
#[test]
|
||||
fn beobachten_ttl_exact_upper_bound_is_preserved() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = BEOBACHTEN_TTL_MAX_MINUTES;
|
||||
|
||||
let ttl = beobachten_ttl(&config);
|
||||
assert_eq!(
|
||||
ttl,
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60),
|
||||
"upper-bound TTL should remain unchanged"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn beobachten_ttl_above_upper_bound_is_clamped() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = BEOBACHTEN_TTL_MAX_MINUTES + 1;
|
||||
|
||||
let ttl = beobachten_ttl(&config);
|
||||
assert_eq!(
|
||||
ttl,
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60),
|
||||
"TTL above security cap must be clamped"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn beobachten_ttl_u64_max_is_clamped_fail_safe() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = u64::MAX;
|
||||
|
||||
let ttl = beobachten_ttl(&config);
|
||||
assert_eq!(
|
||||
ttl,
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60),
|
||||
"extreme configured TTL must not become multi-century retention"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn positive_one_minute_maps_to_exact_60_seconds() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
config.general.beobachten_minutes = 1;
|
||||
|
||||
assert_eq!(beobachten_ttl(&config), Duration::from_secs(60));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn adversarial_boundary_triplet_behaves_deterministically() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
|
||||
config.general.beobachten_minutes = BEOBACHTEN_TTL_MAX_MINUTES - 1;
|
||||
assert_eq!(
|
||||
beobachten_ttl(&config),
|
||||
Duration::from_secs((BEOBACHTEN_TTL_MAX_MINUTES - 1) * 60)
|
||||
);
|
||||
|
||||
config.general.beobachten_minutes = BEOBACHTEN_TTL_MAX_MINUTES;
|
||||
assert_eq!(
|
||||
beobachten_ttl(&config),
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60)
|
||||
);
|
||||
|
||||
config.general.beobachten_minutes = BEOBACHTEN_TTL_MAX_MINUTES + 1;
|
||||
assert_eq!(
|
||||
beobachten_ttl(&config),
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn light_fuzz_random_minutes_match_fail_safe_model() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
|
||||
let mut seed = 0xD15E_A5E5_F00D_BAADu64;
|
||||
for _ in 0..8192 {
|
||||
seed ^= seed << 7;
|
||||
seed ^= seed >> 9;
|
||||
seed ^= seed << 8;
|
||||
|
||||
config.general.beobachten_minutes = seed;
|
||||
let ttl = beobachten_ttl(&config);
|
||||
let expected = if seed == 0 {
|
||||
Duration::from_secs(60)
|
||||
} else {
|
||||
Duration::from_secs(seed.min(BEOBACHTEN_TTL_MAX_MINUTES) * 60)
|
||||
};
|
||||
|
||||
assert_eq!(ttl, expected, "ttl mismatch for minutes={seed}");
|
||||
assert!(ttl <= Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn stress_monotonic_minutes_remain_monotonic_until_cap_then_flat() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = true;
|
||||
|
||||
let mut prev = Duration::from_secs(0);
|
||||
for minutes in 0..=(BEOBACHTEN_TTL_MAX_MINUTES + 4096) {
|
||||
config.general.beobachten_minutes = minutes;
|
||||
let ttl = beobachten_ttl(&config);
|
||||
|
||||
assert!(ttl >= prev, "ttl must be non-decreasing as minutes grow");
|
||||
assert!(ttl <= Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60));
|
||||
|
||||
if minutes > BEOBACHTEN_TTL_MAX_MINUTES {
|
||||
assert_eq!(
|
||||
ttl,
|
||||
Duration::from_secs(BEOBACHTEN_TTL_MAX_MINUTES * 60),
|
||||
"ttl must stay clamped once cap is exceeded"
|
||||
);
|
||||
}
|
||||
prev = ttl;
|
||||
}
|
||||
}
|
||||
904
src/proxy/tests/client_masking_blackhat_campaign_tests.rs
Normal file
904
src/proxy/tests/client_masking_blackhat_campaign_tests.rs
Normal file
@@ -0,0 +1,904 @@
|
||||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::{
|
||||
HANDSHAKE_LEN, MAX_TLS_PLAINTEXT_SIZE, MIN_TLS_CLIENT_HELLO_SIZE, TLS_RECORD_APPLICATION,
|
||||
TLS_VERSION,
|
||||
};
|
||||
use crate::protocol::tls;
|
||||
use std::collections::HashSet;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
struct CampaignHarness {
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
}
|
||||
|
||||
fn new_upstream_manager(stats: Arc<Stats>) -> Arc<UpstreamManager> {
|
||||
Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
false,
|
||||
stats,
|
||||
))
|
||||
}
|
||||
|
||||
fn build_mask_harness(secret_hex: &str, mask_port: u16) -> CampaignHarness {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = mask_port;
|
||||
cfg.censorship.mask_proxy_protocol = 0;
|
||||
cfg.access.ignore_time_skew = true;
|
||||
cfg.access
|
||||
.users
|
||||
.insert("user".to_string(), secret_hex.to_string());
|
||||
|
||||
let config = Arc::new(cfg);
|
||||
let stats = Arc::new(Stats::new());
|
||||
|
||||
CampaignHarness {
|
||||
config,
|
||||
stats: stats.clone(),
|
||||
upstream_manager: new_upstream_manager(stats),
|
||||
replay_checker: Arc::new(ReplayChecker::new(1024, Duration::from_secs(60))),
|
||||
buffer_pool: Arc::new(BufferPool::new()),
|
||||
rng: Arc::new(SecureRandom::new()),
|
||||
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||
beobachten: Arc::new(BeobachtenStore::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||
assert!(
|
||||
tls_len <= u16::MAX as usize,
|
||||
"TLS length must fit into record header"
|
||||
);
|
||||
|
||||
let total_len = 5 + tls_len;
|
||||
let mut handshake = vec![fill; total_len];
|
||||
|
||||
handshake[0] = 0x16;
|
||||
handshake[1] = 0x03;
|
||||
handshake[2] = 0x01;
|
||||
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
|
||||
let session_id_len: usize = 32;
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn wrap_tls_record(record_type: u8, payload: &[u8]) -> Vec<u8> {
|
||||
let mut record = Vec::with_capacity(5 + payload.len());
|
||||
record.push(record_type);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(payload);
|
||||
record
|
||||
}
|
||||
|
||||
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||
wrap_tls_record(TLS_RECORD_APPLICATION, payload)
|
||||
}
|
||||
|
||||
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||
where
|
||||
T: tokio::io::AsyncRead + Unpin,
|
||||
{
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await.unwrap();
|
||||
}
|
||||
|
||||
async fn run_tls_success_mtproto_fail_capture(
|
||||
harness: CampaignHarness,
|
||||
peer: SocketAddr,
|
||||
client_hello: Vec<u8>,
|
||||
bad_mtproto_record: Vec<u8>,
|
||||
trailing_records: Vec<Vec<u8>>,
|
||||
expected_forward: Vec<u8>,
|
||||
) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut cfg = (*harness.config).clone();
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
let cfg = Arc::new(cfg);
|
||||
|
||||
let expected = expected_forward.clone();
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = vec![0u8; expected.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let (server_side, mut client_side) = duplex(262144);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
cfg,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&client_hello).await.unwrap();
|
||||
|
||||
let mut tls_response_head = [0u8; 5];
|
||||
client_side
|
||||
.read_exact(&mut tls_response_head)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(tls_response_head[0], 0x16);
|
||||
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
|
||||
|
||||
client_side.write_all(&bad_mtproto_record).await.unwrap();
|
||||
for record in trailing_records {
|
||||
client_side.write_all(&record).await.unwrap();
|
||||
}
|
||||
|
||||
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(got, expected_forward);
|
||||
|
||||
client_side.shutdown().await.unwrap();
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
async fn run_invalid_tls_capture(config: Arc<ProxyConfig>, payload: Vec<u8>, expected: Vec<u8>) {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut cfg = (*config).clone();
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
let cfg = Arc::new(cfg);
|
||||
|
||||
let expected_probe = expected.clone();
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = vec![0u8; expected_probe.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(65536);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.77:45001".parse().unwrap(),
|
||||
cfg,
|
||||
stats,
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&payload).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(got, expected);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_01_tail_only_record_is_forwarded_after_tls_success_mtproto_fail() {
|
||||
let secret = [0xA1u8; 16];
|
||||
let harness = build_mask_harness("a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1a1", 1);
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 11, 600, 0x41);
|
||||
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let tail = wrap_tls_application_data(b"blackhat-tail-01");
|
||||
|
||||
run_tls_success_mtproto_fail_capture(
|
||||
harness,
|
||||
"198.51.100.1:55001".parse().unwrap(),
|
||||
client_hello,
|
||||
bad_record,
|
||||
vec![tail.clone()],
|
||||
tail,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_02_two_ordered_records_preserved_after_fallback() {
|
||||
let secret = [0xA2u8; 16];
|
||||
let harness = build_mask_harness("a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2a2", 1);
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 12, 600, 0x42);
|
||||
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let r1 = wrap_tls_application_data(b"first");
|
||||
let r2 = wrap_tls_application_data(b"second");
|
||||
let expected = [r1.clone(), r2.clone()].concat();
|
||||
|
||||
run_tls_success_mtproto_fail_capture(
|
||||
harness,
|
||||
"198.51.100.2:55002".parse().unwrap(),
|
||||
client_hello,
|
||||
bad_record,
|
||||
vec![r1, r2],
|
||||
expected,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_03_large_tls_application_record_survives_fallback() {
|
||||
let secret = [0xA3u8; 16];
|
||||
let harness = build_mask_harness("a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3a3", 1);
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 13, 600, 0x43);
|
||||
let bad_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let big_payload = vec![0x5Au8; MAX_TLS_PLAINTEXT_SIZE];
|
||||
let big_record = wrap_tls_application_data(&big_payload);
|
||||
|
||||
run_tls_success_mtproto_fail_capture(
|
||||
harness,
|
||||
"198.51.100.3:55003".parse().unwrap(),
|
||||
client_hello,
|
||||
bad_record,
|
||||
vec![big_record.clone()],
|
||||
big_record,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_04_coalesced_tail_in_failed_record_is_reframed_and_forwarded() {
|
||||
let secret = [0xA4u8; 16];
|
||||
let harness = build_mask_harness("a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4a4", 1);
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 14, 600, 0x44);
|
||||
|
||||
let coalesced_tail = b"coalesced-tail-blackhat".to_vec();
|
||||
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
|
||||
bad_payload.extend_from_slice(&coalesced_tail);
|
||||
let bad_record = wrap_tls_application_data(&bad_payload);
|
||||
let expected = wrap_tls_application_data(&coalesced_tail);
|
||||
|
||||
run_tls_success_mtproto_fail_capture(
|
||||
harness,
|
||||
"198.51.100.4:55004".parse().unwrap(),
|
||||
client_hello,
|
||||
bad_record,
|
||||
Vec::new(),
|
||||
expected,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_05_coalesced_tail_plus_next_record_keep_wire_order() {
|
||||
let secret = [0xA5u8; 16];
|
||||
let harness = build_mask_harness("a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5a5", 1);
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 15, 600, 0x45);
|
||||
|
||||
let coalesced_tail = b"inline-tail".to_vec();
|
||||
let mut bad_payload = vec![0u8; HANDSHAKE_LEN];
|
||||
bad_payload.extend_from_slice(&coalesced_tail);
|
||||
let bad_record = wrap_tls_application_data(&bad_payload);
|
||||
let next_record = wrap_tls_application_data(b"next-record");
|
||||
|
||||
let expected = [
|
||||
wrap_tls_application_data(&coalesced_tail),
|
||||
next_record.clone(),
|
||||
]
|
||||
.concat();
|
||||
|
||||
run_tls_success_mtproto_fail_capture(
|
||||
harness,
|
||||
"198.51.100.5:55005".parse().unwrap(),
|
||||
client_hello,
|
||||
bad_record,
|
||||
vec![next_record],
|
||||
expected,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_06_replayed_tls_hello_is_masked_without_serverhello() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let harness = build_mask_harness("a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6a6", backend_addr.port());
|
||||
let replay_checker = harness.replay_checker.clone();
|
||||
let client_hello = make_valid_tls_client_hello(&[0xA6; 16], 16, 600, 0x46);
|
||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let first_tail = wrap_tls_application_data(b"seed-tail");
|
||||
|
||||
let expected_hello = client_hello.clone();
|
||||
let expected_tail = first_tail.clone();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut s1, _) = listener.accept().await.unwrap();
|
||||
let mut got_tail = vec![0u8; expected_tail.len()];
|
||||
s1.read_exact(&mut got_tail).await.unwrap();
|
||||
assert_eq!(got_tail, expected_tail);
|
||||
drop(s1);
|
||||
|
||||
let (mut s2, _) = listener.accept().await.unwrap();
|
||||
let mut got_hello = vec![0u8; expected_hello.len()];
|
||||
s2.read_exact(&mut got_hello).await.unwrap();
|
||||
got_hello
|
||||
});
|
||||
|
||||
let run_one = |checker: Arc<ReplayChecker>, send_mtproto: bool| {
|
||||
let mut cfg = (*harness.config).clone();
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
let cfg = Arc::new(cfg);
|
||||
let hello = client_hello.clone();
|
||||
let invalid_mtproto_record = invalid_mtproto_record.clone();
|
||||
let first_tail = first_tail.clone();
|
||||
let stats = harness.stats.clone();
|
||||
let upstream = harness.upstream_manager.clone();
|
||||
let pool = harness.buffer_pool.clone();
|
||||
let rng = harness.rng.clone();
|
||||
let route = harness.route_runtime.clone();
|
||||
let ipt = harness.ip_tracker.clone();
|
||||
let beob = harness.beobachten.clone();
|
||||
|
||||
async move {
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.6:55006".parse().unwrap(),
|
||||
cfg,
|
||||
stats,
|
||||
upstream,
|
||||
checker,
|
||||
pool,
|
||||
rng,
|
||||
None,
|
||||
route,
|
||||
None,
|
||||
ipt,
|
||||
beob,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&hello).await.unwrap();
|
||||
if send_mtproto {
|
||||
let mut head = [0u8; 5];
|
||||
client_side.read_exact(&mut head).await.unwrap();
|
||||
assert_eq!(head[0], 0x16);
|
||||
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||
client_side
|
||||
.write_all(&invalid_mtproto_record)
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&first_tail).await.unwrap();
|
||||
} else {
|
||||
let mut one = [0u8; 1];
|
||||
let no_server_hello = tokio::time::timeout(
|
||||
Duration::from_millis(300),
|
||||
client_side.read_exact(&mut one),
|
||||
)
|
||||
.await;
|
||||
assert!(no_server_hello.is_err() || no_server_hello.unwrap().is_err());
|
||||
}
|
||||
client_side.shutdown().await.unwrap();
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
};
|
||||
|
||||
run_one(replay_checker.clone(), true).await;
|
||||
run_one(replay_checker, false).await;
|
||||
|
||||
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(got, client_hello);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_07_truncated_clienthello_exact_prefix_is_forwarded() {
|
||||
let mut payload = vec![0u8; 5 + 37];
|
||||
payload[0] = 0x16;
|
||||
payload[1] = 0x03;
|
||||
payload[2] = 0x01;
|
||||
payload[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||
payload[5..].fill(0x71);
|
||||
|
||||
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), payload.clone(), payload).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_08_out_of_bounds_len_forwards_header_only() {
|
||||
let header = vec![0x16, 0x03, 0x01, 0xFF, 0xFF];
|
||||
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), header.clone(), header).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_09_fragmented_header_then_partial_body_masks_seen_bytes_only() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
|
||||
let expected = {
|
||||
let mut x = vec![0u8; 5 + 11];
|
||||
x[0] = 0x16;
|
||||
x[1] = 0x03;
|
||||
x[2] = 0x01;
|
||||
x[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||
x[5..].fill(0xCC);
|
||||
x
|
||||
};
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut got = vec![0u8; expected.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
got
|
||||
});
|
||||
|
||||
let (server_side, mut client_side) = duplex(65536);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.9:55009".parse().unwrap(),
|
||||
Arc::new(cfg),
|
||||
Arc::new(Stats::new()),
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&[0x16, 0x03]).await.unwrap();
|
||||
client_side.write_all(&[0x01, 0x02, 0x58]).await.unwrap();
|
||||
client_side.write_all(&vec![0xCC; 11]).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let got = tokio::time::timeout(Duration::from_secs(4), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(got.len(), 16);
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_10_zero_handshake_timeout_with_delay_still_avoids_timeout_counter() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.general.beobachten = false;
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = 1;
|
||||
cfg.timeouts.client_handshake = 0;
|
||||
cfg.censorship.server_hello_delay_min_ms = 700;
|
||||
cfg.censorship.server_hello_delay_max_ms = 700;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let started = Instant::now();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.10:55010".parse().unwrap(),
|
||||
Arc::new(cfg),
|
||||
stats.clone(),
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
let mut invalid = vec![0u8; 5 + 700];
|
||||
invalid[0] = 0x16;
|
||||
invalid[1] = 0x03;
|
||||
invalid[2] = 0x01;
|
||||
invalid[3..5].copy_from_slice(&700u16.to_be_bytes());
|
||||
invalid[5..].fill(0x66);
|
||||
|
||||
client_side.write_all(&invalid).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
assert!(started.elapsed() >= Duration::from_millis(650));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_11_parallel_bad_tls_probes_all_masked_without_timeouts() {
|
||||
let n = 24usize;
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let mut seen = HashSet::new();
|
||||
for _ in 0..n {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut hdr = [0u8; 5];
|
||||
stream.read_exact(&mut hdr).await.unwrap();
|
||||
seen.insert(hdr.to_vec());
|
||||
}
|
||||
seen
|
||||
});
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..n {
|
||||
let mut hdr = [0u8; 5];
|
||||
hdr[0] = 0x16;
|
||||
hdr[1] = 0x03;
|
||||
hdr[2] = 0x01;
|
||||
hdr[3] = 0xFF;
|
||||
hdr[4] = i as u8;
|
||||
|
||||
let cfg = Arc::new(cfg.clone());
|
||||
let stats = stats.clone();
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
format!("198.51.100.11:{}", 56000 + i).parse().unwrap(),
|
||||
cfg,
|
||||
stats,
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
client_side.write_all(&hdr).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
hdr.to_vec()
|
||||
}));
|
||||
}
|
||||
|
||||
let mut expected = HashSet::new();
|
||||
for t in tasks {
|
||||
expected.insert(t.await.unwrap());
|
||||
}
|
||||
|
||||
let seen = tokio::time::timeout(Duration::from_secs(6), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(seen, expected);
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_12_parallel_tls_success_mtproto_fail_sessions_keep_isolation() {
|
||||
let sessions = 16usize;
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut expected = HashSet::new();
|
||||
for i in 0..sessions {
|
||||
let rec = wrap_tls_application_data(&vec![i as u8; 8 + i]);
|
||||
expected.insert(rec);
|
||||
}
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let mut got_set = HashSet::new();
|
||||
for _ in 0..sessions {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
let mut head = [0u8; 5];
|
||||
stream.read_exact(&mut head).await.unwrap();
|
||||
let len = u16::from_be_bytes([head[3], head[4]]) as usize;
|
||||
let mut rec = vec![0u8; 5 + len];
|
||||
rec[..5].copy_from_slice(&head);
|
||||
stream.read_exact(&mut rec[5..]).await.unwrap();
|
||||
got_set.insert(rec);
|
||||
}
|
||||
got_set
|
||||
});
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for i in 0..sessions {
|
||||
let mut harness =
|
||||
build_mask_harness("abababababababababababababababab", backend_addr.port());
|
||||
let mut cfg = (*harness.config).clone();
|
||||
cfg.censorship.mask_port = backend_addr.port();
|
||||
harness.config = Arc::new(cfg);
|
||||
tasks.push(tokio::spawn(async move {
|
||||
let secret = [0xABu8; 16];
|
||||
let hello =
|
||||
make_valid_tls_client_hello(&secret, 100 + i as u32, 600, 0x40 + (i as u8 % 10));
|
||||
let bad = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let tail = wrap_tls_application_data(&vec![i as u8; 8 + i]);
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
format!("198.51.100.12:{}", 56100 + i).parse().unwrap(),
|
||||
harness.config,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&hello).await.unwrap();
|
||||
let mut head = [0u8; 5];
|
||||
client_side.read_exact(&mut head).await.unwrap();
|
||||
read_and_discard_tls_record_body(&mut client_side, head).await;
|
||||
client_side.write_all(&bad).await.unwrap();
|
||||
client_side.write_all(&tail).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(5), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
tail
|
||||
}));
|
||||
}
|
||||
|
||||
let mut produced = HashSet::new();
|
||||
for t in tasks {
|
||||
produced.insert(t.await.unwrap());
|
||||
}
|
||||
|
||||
let observed = tokio::time::timeout(Duration::from_secs(8), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(produced, expected);
|
||||
assert_eq!(observed, expected);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_13_backend_down_does_not_escalate_to_handshake_timeout() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.censorship.mask = true;
|
||||
cfg.censorship.mask_unix_sock = None;
|
||||
cfg.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
cfg.censorship.mask_port = 1;
|
||||
cfg.timeouts.client_handshake = 1;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.13:55013".parse().unwrap(),
|
||||
Arc::new(cfg),
|
||||
stats.clone(),
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0x00];
|
||||
client_side.write_all(&bad).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_14_masking_disabled_path_finishes_cleanly() {
|
||||
let mut cfg = ProxyConfig::default();
|
||||
cfg.censorship.mask = false;
|
||||
cfg.timeouts.client_handshake = 1;
|
||||
|
||||
let stats = Arc::new(Stats::new());
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
"198.51.100.14:55014".parse().unwrap(),
|
||||
Arc::new(cfg),
|
||||
stats.clone(),
|
||||
new_upstream_manager(Arc::new(Stats::new())),
|
||||
Arc::new(ReplayChecker::new(128, Duration::from_secs(60))),
|
||||
Arc::new(BufferPool::new()),
|
||||
Arc::new(SecureRandom::new()),
|
||||
None,
|
||||
Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
None,
|
||||
Arc::new(UserIpTracker::new()),
|
||||
Arc::new(BeobachtenStore::new()),
|
||||
false,
|
||||
));
|
||||
|
||||
let bad = vec![0x16, 0x03, 0x01, 0xFF, 0xF0];
|
||||
client_side.write_all(&bad).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(4), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(stats.get_handshake_timeouts(), 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_15_light_fuzz_tls_lengths_and_fragmentation() {
|
||||
let mut seed = 0x9E3779B97F4A7C15u64;
|
||||
|
||||
for idx in 0..20u16 {
|
||||
seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);
|
||||
let mut tls_len = (seed as usize) % 20000;
|
||||
if idx % 3 == 0 {
|
||||
tls_len = MAX_TLS_PLAINTEXT_SIZE + 1 + (tls_len % 1024);
|
||||
}
|
||||
|
||||
let body_to_send =
|
||||
if (MIN_TLS_CLIENT_HELLO_SIZE..=MAX_TLS_PLAINTEXT_SIZE).contains(&tls_len) {
|
||||
(seed as usize % 29).min(tls_len.saturating_sub(1))
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let mut probe = vec![0u8; 5 + body_to_send];
|
||||
probe[0] = 0x16;
|
||||
probe[1] = 0x03;
|
||||
probe[2] = 0x01;
|
||||
probe[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
for b in &mut probe[5..] {
|
||||
seed = seed
|
||||
.wrapping_mul(2862933555777941757)
|
||||
.wrapping_add(3037000493);
|
||||
*b = (seed >> 24) as u8;
|
||||
}
|
||||
|
||||
let expected = probe.clone();
|
||||
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe, expected).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn blackhat_campaign_16_mixed_probe_burst_stress_finishes_without_panics() {
|
||||
let cases = 18usize;
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for i in 0..cases {
|
||||
tasks.push(tokio::spawn(async move {
|
||||
if i % 2 == 0 {
|
||||
let mut probe = vec![0u8; 5 + (i % 13)];
|
||||
probe[0] = 0x16;
|
||||
probe[1] = 0x03;
|
||||
probe[2] = 0x01;
|
||||
probe[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||
probe[5..].fill((0x90 + i as u8) ^ 0x5A);
|
||||
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), probe.clone(), probe)
|
||||
.await;
|
||||
} else {
|
||||
let hdr = vec![0x16, 0x03, 0x01, 0xFF, i as u8];
|
||||
run_invalid_tls_capture(Arc::new(ProxyConfig::default()), hdr.clone(), hdr).await;
|
||||
}
|
||||
}));
|
||||
}
|
||||
|
||||
for task in tasks {
|
||||
task.await.unwrap();
|
||||
}
|
||||
}
|
||||
255
src/proxy/tests/client_masking_budget_security_tests.rs
Normal file
255
src/proxy/tests/client_masking_budget_security_tests.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
use super::*;
|
||||
use crate::config::{UpstreamConfig, UpstreamType};
|
||||
use crate::crypto::sha256_hmac;
|
||||
use crate::protocol::constants::{HANDSHAKE_LEN, TLS_VERSION};
|
||||
use crate::protocol::tls;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt, duplex};
|
||||
use tokio::net::TcpListener;
|
||||
use tokio::time::{Duration, Instant};
|
||||
|
||||
struct PipelineHarness {
|
||||
config: Arc<ProxyConfig>,
|
||||
stats: Arc<Stats>,
|
||||
upstream_manager: Arc<UpstreamManager>,
|
||||
replay_checker: Arc<ReplayChecker>,
|
||||
buffer_pool: Arc<BufferPool>,
|
||||
rng: Arc<SecureRandom>,
|
||||
route_runtime: Arc<RouteRuntimeController>,
|
||||
ip_tracker: Arc<UserIpTracker>,
|
||||
beobachten: Arc<BeobachtenStore>,
|
||||
}
|
||||
|
||||
fn build_harness(config: ProxyConfig) -> PipelineHarness {
|
||||
let config = Arc::new(config);
|
||||
let stats = Arc::new(Stats::new());
|
||||
let upstream_manager = Arc::new(UpstreamManager::new(
|
||||
vec![UpstreamConfig {
|
||||
upstream_type: UpstreamType::Direct {
|
||||
interface: None,
|
||||
bind_addresses: None,
|
||||
},
|
||||
weight: 1,
|
||||
enabled: true,
|
||||
scopes: String::new(),
|
||||
selected_scope: String::new(),
|
||||
}],
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
1,
|
||||
false,
|
||||
stats.clone(),
|
||||
));
|
||||
|
||||
PipelineHarness {
|
||||
config,
|
||||
stats,
|
||||
upstream_manager,
|
||||
replay_checker: Arc::new(ReplayChecker::new(256, Duration::from_secs(60))),
|
||||
buffer_pool: Arc::new(BufferPool::new()),
|
||||
rng: Arc::new(SecureRandom::new()),
|
||||
route_runtime: Arc::new(RouteRuntimeController::new(RelayRouteMode::Direct)),
|
||||
ip_tracker: Arc::new(UserIpTracker::new()),
|
||||
beobachten: Arc::new(BeobachtenStore::new()),
|
||||
}
|
||||
}
|
||||
|
||||
fn make_valid_tls_client_hello(secret: &[u8], timestamp: u32, tls_len: usize, fill: u8) -> Vec<u8> {
|
||||
assert!(
|
||||
tls_len <= u16::MAX as usize,
|
||||
"TLS length must fit into record header"
|
||||
);
|
||||
|
||||
let total_len = 5 + tls_len;
|
||||
let mut handshake = vec![fill; total_len];
|
||||
|
||||
handshake[0] = 0x16;
|
||||
handshake[1] = 0x03;
|
||||
handshake[2] = 0x01;
|
||||
handshake[3..5].copy_from_slice(&(tls_len as u16).to_be_bytes());
|
||||
|
||||
let session_id_len: usize = 32;
|
||||
handshake[tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN] = session_id_len as u8;
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN].fill(0);
|
||||
let computed = sha256_hmac(secret, &handshake);
|
||||
let mut digest = computed;
|
||||
let ts = timestamp.to_le_bytes();
|
||||
for i in 0..4 {
|
||||
digest[28 + i] ^= ts[i];
|
||||
}
|
||||
|
||||
handshake[tls::TLS_DIGEST_POS..tls::TLS_DIGEST_POS + tls::TLS_DIGEST_LEN]
|
||||
.copy_from_slice(&digest);
|
||||
handshake
|
||||
}
|
||||
|
||||
fn wrap_tls_application_data(payload: &[u8]) -> Vec<u8> {
|
||||
let mut record = Vec::with_capacity(5 + payload.len());
|
||||
record.push(0x17);
|
||||
record.extend_from_slice(&TLS_VERSION);
|
||||
record.extend_from_slice(&(payload.len() as u16).to_be_bytes());
|
||||
record.extend_from_slice(payload);
|
||||
record
|
||||
}
|
||||
|
||||
async fn read_and_discard_tls_record_body<T>(stream: &mut T, header: [u8; 5])
|
||||
where
|
||||
T: tokio::io::AsyncRead + Unpin,
|
||||
{
|
||||
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||
let mut body = vec![0u8; len];
|
||||
stream.read_exact(&mut body).await.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn masking_runs_outside_handshake_timeout_budget_with_high_reject_delay() {
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = false;
|
||||
config.censorship.mask = true;
|
||||
config.censorship.mask_unix_sock = None;
|
||||
config.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
config.censorship.mask_port = 1;
|
||||
config.timeouts.client_handshake = 0;
|
||||
config.censorship.server_hello_delay_min_ms = 730;
|
||||
config.censorship.server_hello_delay_max_ms = 730;
|
||||
|
||||
let harness = build_harness(config);
|
||||
let stats = harness.stats.clone();
|
||||
|
||||
let (server_side, mut client_side) = duplex(4096);
|
||||
let peer: SocketAddr = "198.51.100.241:56541".parse().unwrap();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
harness.config,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
let mut invalid_hello = vec![0u8; 5 + 600];
|
||||
invalid_hello[0] = 0x16;
|
||||
invalid_hello[1] = 0x03;
|
||||
invalid_hello[2] = 0x01;
|
||||
invalid_hello[3..5].copy_from_slice(&600u16.to_be_bytes());
|
||||
invalid_hello[5..].fill(0x44);
|
||||
|
||||
let started = Instant::now();
|
||||
client_side.write_all(&invalid_hello).await.unwrap();
|
||||
client_side.shutdown().await.unwrap();
|
||||
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"bad-client fallback must not be canceled by handshake timeout"
|
||||
);
|
||||
assert_eq!(
|
||||
stats.get_handshake_timeouts(),
|
||||
0,
|
||||
"masking fallback path must not increment handshake timeout counter"
|
||||
);
|
||||
assert!(
|
||||
started.elapsed() >= Duration::from_millis(700),
|
||||
"configured reject delay should still be visible before masking"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn tls_mtproto_bad_client_does_not_reinject_clienthello_into_mask_backend() {
|
||||
let listener = TcpListener::bind("127.0.0.1:0").await.unwrap();
|
||||
let backend_addr = listener.local_addr().unwrap();
|
||||
|
||||
let mut config = ProxyConfig::default();
|
||||
config.general.beobachten = false;
|
||||
config.censorship.mask = true;
|
||||
config.censorship.mask_unix_sock = None;
|
||||
config.censorship.mask_host = Some("127.0.0.1".to_string());
|
||||
config.censorship.mask_port = backend_addr.port();
|
||||
config.censorship.mask_proxy_protocol = 0;
|
||||
config.access.ignore_time_skew = true;
|
||||
config.access.users.insert(
|
||||
"user".to_string(),
|
||||
"d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0d0".to_string(),
|
||||
);
|
||||
|
||||
let harness = build_harness(config);
|
||||
|
||||
let secret = [0xD0u8; 16];
|
||||
let client_hello = make_valid_tls_client_hello(&secret, 0, 600, 0x41);
|
||||
let invalid_mtproto_record = wrap_tls_application_data(&vec![0u8; HANDSHAKE_LEN]);
|
||||
let trailing_record = wrap_tls_application_data(b"no-clienthello-reinject");
|
||||
let expected_trailing = trailing_record.clone();
|
||||
|
||||
let accept_task = tokio::spawn(async move {
|
||||
let (mut stream, _) = listener.accept().await.unwrap();
|
||||
|
||||
let mut got = vec![0u8; expected_trailing.len()];
|
||||
stream.read_exact(&mut got).await.unwrap();
|
||||
assert_eq!(
|
||||
got, expected_trailing,
|
||||
"mask backend must receive only post-handshake trailing TLS records"
|
||||
);
|
||||
});
|
||||
|
||||
let (server_side, mut client_side) = duplex(131072);
|
||||
let peer: SocketAddr = "198.51.100.242:56542".parse().unwrap();
|
||||
|
||||
let handler = tokio::spawn(handle_client_stream(
|
||||
server_side,
|
||||
peer,
|
||||
harness.config,
|
||||
harness.stats,
|
||||
harness.upstream_manager,
|
||||
harness.replay_checker,
|
||||
harness.buffer_pool,
|
||||
harness.rng,
|
||||
None,
|
||||
harness.route_runtime,
|
||||
None,
|
||||
harness.ip_tracker,
|
||||
harness.beobachten,
|
||||
false,
|
||||
));
|
||||
|
||||
client_side.write_all(&client_hello).await.unwrap();
|
||||
|
||||
let mut tls_response_head = [0u8; 5];
|
||||
client_side
|
||||
.read_exact(&mut tls_response_head)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!(tls_response_head[0], 0x16);
|
||||
read_and_discard_tls_record_body(&mut client_side, tls_response_head).await;
|
||||
|
||||
client_side
|
||||
.write_all(&invalid_mtproto_record)
|
||||
.await
|
||||
.unwrap();
|
||||
client_side.write_all(&trailing_record).await.unwrap();
|
||||
|
||||
tokio::time::timeout(Duration::from_secs(3), accept_task)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
drop(client_side);
|
||||
let result = tokio::time::timeout(Duration::from_secs(3), handler)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user