Compare commits

..

86 Commits

Author SHA1 Message Date
Alexey
9de8b2f0bf Update release.yml 2026-03-22 10:36:54 +03:00
Alexey
4e5b67bae8 Update release.yml 2026-03-22 10:28:06 +03:00
Alexey
73f218b62a Update release.yml 2026-03-22 00:27:16 +03:00
Alexey
13ff3af1db Update release.yml 2026-03-22 00:18:54 +03:00
Alexey
77f717e3d1 Merge pull request #534 from telemt/workflow
Update release.yml
2026-03-22 00:16:11 +03:00
Alexey
db3e246390 Update release.yml 2026-03-22 00:15:56 +03:00
Alexey
b74ba38d40 Merge pull request #533 from telemt/workflow
Workflow
2026-03-22 00:10:38 +03:00
Alexey
269fce839f Update Dockerfile 2026-03-22 00:10:19 +03:00
Alexey
5a4072c964 Update release.yml 2026-03-22 00:08:16 +03:00
Alexey
a95678988a Merge pull request #530 from telemt/workflow
Update release.yml
2026-03-21 21:45:23 +03:00
Alexey
b17482ede3 Update release.yml 2026-03-21 21:45:01 +03:00
Alexey
e7a1d26e6e Merge pull request #526 from telemt/workflow
Update release.yml
2026-03-21 16:48:53 +03:00
Alexey
b91c6cb339 Update release.yml 2026-03-21 16:48:42 +03:00
Alexey
c4e7f54cbe Merge pull request #524 from telemt/workflow
Update release.yml
2026-03-21 16:31:15 +03:00
Alexey
f85205d48d Update release.yml 2026-03-21 16:31:05 +03:00
Alexey
d767ec02ee Update release.yml 2026-03-21 16:24:06 +03:00
Alexey
88a4c652b6 Merge pull request #523 from telemt/workflow
Update release.yml
2026-03-21 16:18:48 +03:00
Alexey
ea2d964502 Update release.yml 2026-03-21 16:18:24 +03:00
Alexey
3055637571 Merge pull request #522 from telemt/workflow
Update release.yml
2026-03-21 16:04:56 +03:00
Alexey
19b84b9d73 Update release.yml 2026-03-21 16:03:54 +03:00
Alexey
6ead8b1922 Merge pull request #521 from telemt/workflow
Update release.yml
2026-03-21 15:58:36 +03:00
Alexey
63aa1038c0 Update release.yml 2026-03-21 15:58:25 +03:00
Alexey
24594e648e Merge pull request #519 from telemt/workflow
Update release.yml
2026-03-21 15:21:47 +03:00
Alexey
e8b38ea860 Update release.yml 2026-03-21 15:21:25 +03:00
Alexey
f3598cf309 Merge pull request #514 from M1h4n1k/patch-1
docs: fix typo in ru QUICK_START
2026-03-21 10:22:52 +03:00
Michael Karpov
777b15b1da Update section title for Docker usage
Изменено название раздела с 'Запуск в Docker Compose' на 'Запуск без Docker Compose'.
2026-03-20 22:23:36 +02:00
Alexey
99ba2f7bbc Add Shadowsocks upstream support: merge pull request #430 from hunmar/feat/shadowsocks-upstream
Add Shadowsocks upstream support
2026-03-20 18:35:28 +03:00
Maxim Myalin
e14dd07220 Merge branch 'main' into feat/shadowsocks-upstream 2026-03-20 17:08:47 +03:00
Maxim Myalin
d93a4fbd53 Merge remote-tracking branch 'origin/main' into feat/shadowsocks-upstream
# Conflicts:
#	src/tls_front/fetcher.rs
2026-03-20 17:07:47 +03:00
Alexey
2798039ab8 Merge pull request #507 from dzhus/patch-2
Fix typo in systemd service metadata
2026-03-20 17:04:41 +03:00
Alexey
342b0119dd Merge pull request #509 from telemt/bump
Update Cargo.toml
2026-03-20 16:27:39 +03:00
Alexey
2605929b93 Update Cargo.toml 2026-03-20 16:26:57 +03:00
Alexey
36814b6355 ME Draining on Dual-Stack + TLS Fetcher Upstream Selection: merge pull request #508 from telemt/flow
ME Draining on Dual-Stack + TLS Fetcher Upstream Selection
2026-03-20 16:24:17 +03:00
Alexey
269ba537ad ME Draining on Dual-Stack
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 16:07:12 +03:00
Alexey
5c0eb6dbe8 TLS Fetcher Upstream Selection
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 16:05:24 +03:00
Maxim Myalin
66867d3f5b Merge branch 'main' into feat/shadowsocks-upstream
# Conflicts:
#	Cargo.lock
#	src/api/runtime_stats.rs
2026-03-20 15:22:36 +03:00
Dmitry Dzhus
db36945293 Fix typo in systemd service metadata 2026-03-20 12:00:41 +00:00
Alexey
dd07fa9453 Merge pull request #505 from telemt/flow-me
Teardown Monitoring in API and Metrics
2026-03-20 12:59:39 +03:00
Alexey
bb1a372ac4 Merge branch 'main' into flow-me 2026-03-20 12:59:32 +03:00
Alexey
6661401a34 Merge pull request #506 from telemt/about-releases
Update README.md
2026-03-20 12:59:09 +03:00
Alexey
cd65fb432b Update README.md 2026-03-20 12:58:55 +03:00
Alexey
caf0717789 Merge branch 'main' into flow-me 2026-03-20 12:57:27 +03:00
Alexey
4a610d83a3 Update Cargo.toml
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:56:13 +03:00
Alexey
aba4205dcc Teardown Monitoring in Metrics
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:46:35 +03:00
Alexey
ef9b7b1492 Teardown Monitoring in API
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:45:53 +03:00
Alexey
d112f15b90 ME Writers Anti-stuck + Quarantine fixes + ME Writers Advanced Cleanup + Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy: merge pull request #504 from telemt/flow-me
ME Writers Anti-stuck + Quarantine fixes + ME Writers Advanced Cleanup + Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy
2026-03-20 12:41:45 +03:00
Alexey
b55b264345 Merge branch 'main' into flow-me 2026-03-20 12:20:51 +03:00
Alexey
f61d25ebe0 Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:11:47 +03:00
Alexey
ed4d1167dd ME Writers Advanced Cleanup
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:09:23 +03:00
Alexey
dc6948cf39 Merge pull request #502 from telemt/about-releases
Update README.md
2026-03-20 11:25:19 +03:00
Alexey
4f11aa0772 Update README.md 2026-03-20 11:25:07 +03:00
Alexey
e40361b171 Cargo.toml + Cargo.lock
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 00:45:04 +03:00
Alexey
1c6c73beda ME Writers Anti-stuck and Quarantine fixes
Co-Authored-By: Nook Scheel <nook@live.ru>
2026-03-20 00:41:40 +03:00
Alexey
67dc1e8d18 Merge pull request #498 from telemt/bump
Update Cargo.toml
2026-03-19 18:25:14 +03:00
Alexey
ad8ada33c9 Update Cargo.toml 2026-03-19 18:24:01 +03:00
Alexey
bbb201b433 Instadrain + Hard-remove for long draining-state: merge pull request #497 from telemt/flow-stuck-writer
Instadrain + Hard-remove for long draining-state
2026-03-19 18:23:38 +03:00
Alexey
8d1faece60 Instadrain + Hard-remove for long draining-state
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-19 17:45:17 +03:00
Alexey
a603505f90 Merge pull request #492 from temandroid/main
fix(docker): expose port 9091 and allow external API access
2026-03-19 17:32:49 +03:00
Alexey
f8c42c324f Merge pull request #494 from Dimasssss/patch-1
Update install.sh
2026-03-19 17:32:05 +03:00
Alexey
dd8ef4d996 Merge branch 'main' into feat/shadowsocks-upstream 2026-03-19 17:19:01 +03:00
Dimasssss
dc3363aa0d Update install.sh 2026-03-19 16:23:32 +03:00
Alexey
f655924323 Update health.rs
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-19 16:15:00 +03:00
TEMAndroid
05c066c676 fix(docker): expose port 9091 and allow external API access
Add 9091 port mapping to compose.yml to make the REST API reachable
from outside the container. Previously only port 9090 (metrics) was
published, making the documented curl commands non-functional.

fixes #434
2026-03-19 15:54:01 +03:00
Alexey
1e000c2e7e ME Writer stuck-up in draining-state fixes: merge pull request #491 from telemt/flow-stuck-writer
ME Writer stuck-up in draining-state fixes
2026-03-19 14:44:43 +03:00
Alexey
fa17e719f6 Merge pull request #490 from telemt/bump
Update Cargo.toml
2026-03-19 14:43:15 +03:00
Alexey
ae3ced8e7c Update Cargo.toml 2026-03-19 14:42:59 +03:00
Alexey
3279f6d46a Cleanup-path as non-blocking
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-19 14:07:20 +03:00
Alexey
6f9aef7bb4 ME Writer stuck-up in draining-state fixes
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-19 13:08:35 +03:00
Alexey
049db1196f Merge pull request #487 from telemt/code-of-conduct
Update CODE_OF_CONDUCT.md
2026-03-19 01:18:14 +03:00
Alexey
c8ffc23cf7 Update CODE_OF_CONDUCT.md 2026-03-19 01:18:02 +03:00
Alexey
f230f2ce0e Merge pull request #486 from telemt/code-of-conduct
Create CODE_OF_CONDUCT.md
2026-03-19 00:59:50 +03:00
Alexey
bdac6e3480 Create CODE_OF_CONDUCT.md 2026-03-19 00:59:37 +03:00
Alexey
a4e9746dc7 Merge pull request #485 from Dimasssss/patch-3
Update install.sh
2026-03-19 00:43:14 +03:00
Dimasssss
c47495d671 Update install.sh
Вернул старый функционал + добавил новый:
- Вернул автоматическое создание конфига с секретом
- Вернул автоматическое создание службы
- Добавил удаление службы и telemt через `install.sh uninstall`
- Полное удаление вместе с конфигом через `install.sh --purge`
- Добавил установку нужной версии `install.sh 3.3.15`
2026-03-19 00:36:02 +03:00
Alexey
5ae3a90d5e Merge pull request #483 from Dimasssss/patch-1
Update CONFIG_PARAMS.en.md
2026-03-18 23:02:33 +03:00
Alexey
901a0b7c23 Runtime guardrails: merge pull request #484 from telemt/flow-runtime
Runtime guardrails
2026-03-18 22:37:17 +03:00
Alexey
03891db0c9 Update Cargo.toml
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-18 22:36:33 +03:00
Alexey
89e5668c7e Runtime guardrails
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-18 22:33:41 +03:00
Dimasssss
1935455256 Update CONFIG_PARAMS.en.md 2026-03-18 18:20:23 +03:00
Alexey
1544e3fcff Merge pull request #481 from telemt/bump
Update Cargo.toml
2026-03-18 13:58:39 +03:00
Alexey
85295a9961 Update Cargo.toml 2026-03-18 13:58:27 +03:00
Alexey
a54f807a45 Hot-Reload fixes: merge pull request #480 from telemt/flow-user
Hot-Reload fixes
2026-03-18 13:57:58 +03:00
Alexey
31f6258c47 Hot-Reload fixes
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-18 13:54:59 +03:00
Maxim Myalin
062464175e Merge branch 'main' into feat/shadowsocks-upstream 2026-03-18 12:38:23 +03:00
Maxim Myalin
a5983c17d3 Add Docker build context ignore file 2026-03-18 12:36:48 +03:00
Maxim Myalin
def42f0baa Add Shadowsocks upstream support 2026-03-18 12:36:44 +03:00
55 changed files with 5928 additions and 1069 deletions

8
.dockerignore Normal file
View File

@@ -0,0 +1,8 @@
.git
.github
target
.kilocode
cache
tlsfront
*.tar
*.tar.gz

View File

@@ -6,36 +6,34 @@ on:
- '[0-9]+.[0-9]+.[0-9]+'
workflow_dispatch:
concurrency:
group: release-${{ github.ref }}
cancel-in-progress: true
permissions:
contents: read
packages: write
env:
CARGO_TERM_COLOR: always
BINARY_NAME: telemt
jobs:
build:
name: Build ${{ matrix.target }}
# ==========================
# GNU / glibc
# ==========================
build-gnu:
name: GNU ${{ matrix.target }}
runs-on: ubuntu-latest
permissions:
contents: read
strategy:
fail-fast: false
matrix:
include:
- target: x86_64-unknown-linux-gnu
artifact_name: telemt
asset_name: telemt-x86_64-linux-gnu
asset: telemt-x86_64-linux-gnu
- target: aarch64-unknown-linux-gnu
artifact_name: telemt
asset_name: telemt-aarch64-linux-gnu
- target: x86_64-unknown-linux-musl
artifact_name: telemt
asset_name: telemt-x86_64-linux-musl
- target: aarch64-unknown-linux-musl
artifact_name: telemt
asset_name: telemt-aarch64-linux-musl
asset: telemt-aarch64-linux-gnu
steps:
- uses: actions/checkout@v4
@@ -43,12 +41,20 @@ jobs:
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
targets: ${{ matrix.target }}
targets: |
x86_64-unknown-linux-gnu
aarch64-unknown-linux-gnu
- name: Install cross-compilation tools
- name: Install deps
run: |
sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu
sudo apt-get install -y \
build-essential \
clang \
lld \
pkg-config \
gcc-aarch64-linux-gnu \
g++-aarch64-linux-gnu
- uses: actions/cache@v4
with:
@@ -56,41 +62,183 @@ jobs:
~/.cargo/registry
~/.cargo/git
target
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-${{ matrix.target }}-cargo-
key: gnu-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- name: Install cross
run: cargo install cross --git https://github.com/cross-rs/cross
- name: Build Release
env:
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
run: cross build --release --target ${{ matrix.target }}
- name: Package binary
- name: Build
run: |
cd target/${{ matrix.target }}/release
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }}
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
export CC=aarch64-linux-gnu-gcc
export CXX=aarch64-linux-gnu-g++
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
export CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"
else
export CC=clang
export CXX=clang++
export CC_x86_64_unknown_linux_gnu=clang
export CXX_x86_64_unknown_linux_gnu=clang++
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld"
fi
cargo build --release --target ${{ matrix.target }}
- name: Package
run: |
mkdir -p dist
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
cd dist
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.asset_name }}
name: ${{ matrix.asset }}
path: |
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256
dist/${{ matrix.asset }}.tar.gz
dist/${{ matrix.asset }}.sha256
build-docker-image:
needs: build
# ==========================
# MUSL
# ==========================
build-musl:
name: MUSL ${{ matrix.target }}
runs-on: ubuntu-latest
permissions:
contents: read
packages: write
container:
image: rust:slim-bookworm
strategy:
fail-fast: false
matrix:
include:
- target: x86_64-unknown-linux-musl
asset: telemt-x86_64-linux-musl
- target: aarch64-unknown-linux-musl
asset: telemt-aarch64-linux-musl
steps:
- uses: actions/checkout@v4
- name: Install deps
run: |
apt-get update
apt-get install -y \
musl-tools \
pkg-config \
curl
# 💾 cache toolchain
- uses: actions/cache@v4
if: matrix.target == 'aarch64-unknown-linux-musl'
with:
path: ~/.musl-aarch64
key: musl-toolchain-aarch64-v1
# 🔥 надёжная установка
- name: Install aarch64 musl toolchain
if: matrix.target == 'aarch64-unknown-linux-musl'
run: |
set -e
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
ARCHIVE="aarch64-linux-musl-cross.tgz"
if [ -x "$TOOLCHAIN_DIR/bin/aarch64-linux-musl-gcc" ]; then
echo "✅ musl toolchain already installed"
else
echo "⬇️ downloading musl toolchain..."
download() {
url="$1"
echo "→ trying $url"
curl -fL \
--retry 5 \
--retry-delay 3 \
--connect-timeout 10 \
--max-time 120 \
-o "$ARCHIVE" "$url" && return 0
return 1
}
download "https://musl.cc/$ARCHIVE" || \
download "https://more.musl.cc/$ARCHIVE" || \
{ echo "❌ failed to download musl toolchain"; exit 1; }
mkdir -p "$TOOLCHAIN_DIR"
tar -xzf "$ARCHIVE" --strip-components=1 -C "$TOOLCHAIN_DIR"
fi
echo "$TOOLCHAIN_DIR/bin" >> $GITHUB_PATH
- name: Add rust target
run: rustup target add ${{ matrix.target }}
- uses: actions/cache@v4
with:
path: |
/usr/local/cargo/registry
/usr/local/cargo/git
target
key: musl-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- name: Build
run: |
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
export CC=aarch64-linux-musl-gcc
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc"
else
export CC=musl-gcc
export CC_x86_64_unknown_linux_musl=musl-gcc
export RUSTFLAGS="-C target-feature=+crt-static"
fi
cargo build --release --target ${{ matrix.target }}
- name: Package
run: |
mkdir -p dist
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
cd dist
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.asset }}
path: |
dist/${{ matrix.asset }}.tar.gz
dist/${{ matrix.asset }}.sha256
# ==========================
# Docker
# ==========================
docker:
name: Docker
runs-on: ubuntu-latest
needs: [build-gnu, build-musl]
continue-on-error: true
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
path: artifacts
- name: Extract binaries
run: |
mkdir dist
find artifacts -name "*.tar.gz" -exec tar -xzf {} -C dist \;
cp dist/telemt-x86_64-unknown-linux-musl dist/telemt || true
- uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3
@@ -105,35 +253,43 @@ jobs:
id: vars
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
- name: Build and push
- name: Build & Push
uses: docker/build-push-action@v6
with:
context: .
push: true
platforms: linux/amd64,linux/arm64
tags: |
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
ghcr.io/${{ github.repository }}:latest
build-args: |
BINARY=dist/telemt
# ==========================
# Release
# ==========================
release:
name: Create Release
needs: build
name: Release
runs-on: ubuntu-latest
needs: [build-gnu, build-musl]
permissions:
contents: write
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/download-artifact@v4
with:
path: artifacts
- name: Flatten artifacts
run: |
mkdir dist
find artifacts -type f -exec cp {} dist/ \;
- name: Create Release
uses: softprops/action-gh-release@v2
with:
files: artifacts/**/*
files: dist/*
generate_release_notes: true
draft: false
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}

208
CODE_OF_CONDUCT.md Normal file
View File

@@ -0,0 +1,208 @@
# Code of Conduct
## 1. Purpose
Telemt exists to solve technical problems.
Telemt is open to contributors who want to learn, improve and build meaningful systems together.
It is a place for building, testing, reasoning, documenting, and improving systems.
Discussions that advance this work are in scope. Discussions that divert it are not.
Technology has consequences. Responsibility is inherent.
> **Zweck bestimmt die Form.**
> Purpose defines form.
---
## 2. Principles
* **Technical over emotional**
Arguments are grounded in data, logs, reproducible cases, or clear reasoning.
* **Clarity over noise**
Communication is structured, concise, and relevant.
* **Openness with standards**
Participation is open. The work remains disciplined.
* **Independence of judgment**
Claims are evaluated on technical merit, not affiliation or posture.
* **Responsibility over capability**
Capability does not justify careless use.
* **Cooperation over friction**
Progress depends on coordination, mutual support, and honest review.
* **Good intent, rigorous method**
Assume good intent, but require rigor.
> **Aussagen gelten nach ihrer Begründung.**
> Claims are weighed by evidence.
---
## 3. Expected Behavior
Participants are expected to:
* Communicate directly and respectfully
* Support claims with evidence
* Stay within technical scope
* Accept critique and provide it constructively
* Reduce noise, duplication, and ambiguity
* Help others reach correct and reproducible outcomes
* Act in a way that improves the system as a whole
Precision is learned.
New contributors are welcome. They are expected to grow into these standards. Existing contributors are expected to make that growth possible.
> **Wer behauptet, belegt.**
> Whoever claims, proves.
---
## 4. Unacceptable Behavior
The following is not allowed:
* Personal attacks, insults, harassment, or intimidation
* Repeatedly derailing discussion away from Telemts purpose
* Spam, flooding, or repeated low-quality input
* Misinformation presented as fact
* Attempts to degrade, destabilize, or exhaust Telemt or its participants
* Use of Telemt or its spaces to enable harm
Telemt is not a venue for disputes that displace technical work.
Such discussions may be closed, removed, or redirected.
> **Störung ist kein Beitrag.**
> Disruption is not contribution.
---
## 5. Security and Misuse
Telemt is intended for responsible use.
* Do not use it to plan, coordinate, or execute harm
* Do not publish vulnerabilities without responsible disclosure
* Report security issues privately where possible
Security is both technical and behavioral.
> **Verantwortung endet nicht am Code.**
> Responsibility does not end at the code.
---
## 6. Openness
Telemt is open to contributors of different backgrounds, experience levels, and working styles.
Standards are public, legible, and applied to the work itself.
Questions are welcome. Careful disagreement is welcome. Honest correction is welcome.
Gatekeeping by obscurity, status signaling, or hostility is not.
---
## 7. Scope
This Code of Conduct applies to all official spaces:
* Source repositories (issues, pull requests, discussions)
* Documentation
* Communication channels associated with Telemt
---
## 8. Maintainer Stewardship
Maintainers are responsible for final decisions in matters of conduct, scope, and direction.
This responsibility is stewardship: preserving continuity, protecting signal, maintaining standards, and keeping Telemt workable for others.
Judgment should be exercised with restraint, consistency, and institutional responsibility.
Not every decision requires extended debate.
Not every intervention requires public explanation.
All decisions are expected to serve the durability, clarity, and integrity of Telemt.
> **Ordnung ist Voraussetzung der Funktion.**
> Order is the precondition of function.
---
## 9. Enforcement
Maintainers may act to preserve the integrity of Telemt, including by:
* Removing content
* Locking discussions
* Rejecting contributions
* Restricting or banning participants
Actions are taken to maintain function, continuity, and signal quality.
Where possible, correction is preferred to exclusion.
Where necessary, exclusion is preferred to decay.
---
## 10. Final
Telemt is built on discipline, structure, and shared intent.
Signal over noise.
Facts over opinion.
Systems over rhetoric.
Work is collective.
Outcomes are shared.
Responsibility is distributed.
Precision is learned.
Rigor is expected.
Help is part of the work.
> **Ordnung ist Voraussetzung der Freiheit.**
If you contribute — contribute with care.
If you speak — speak with substance.
If you engage — engage constructively.
---
## 11. After All
Systems outlive intentions.
What is built will be used.
What is released will propagate.
What is maintained will define the future state.
There is no neutral infrastructure, only infrastructure shaped well or poorly.
> **Jedes System trägt Verantwortung.**
> Every system carries responsibility.
Stability requires discipline.
Freedom requires structure.
Trust requires honesty.
In the end, the system reflects its contributors.

903
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package]
name = "telemt"
version = "3.3.21"
version = "3.3.28"
edition = "2024"
[dependencies]
@@ -26,6 +26,7 @@ zeroize = { version = "1.8", features = ["derive"] }
# Network
socket2 = { version = "0.5", features = ["all"] }
nix = { version = "0.28", default-features = false, features = ["net"] }
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
@@ -40,6 +41,7 @@ tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
parking_lot = "0.12"
dashmap = "5.5"
arc-swap = "1.7"
lru = "0.16"
rand = "0.9"
chrono = { version = "0.4", features = ["serde"] }

View File

@@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# ==========================
# Stage 1: Build
# ==========================
@@ -5,39 +7,91 @@ FROM rust:1.88-slim-bookworm AS builder
RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \
ca-certificates \
&& rm -rf /var/lib/apt/lists/*
WORKDIR /build
# Depcache
COPY Cargo.toml Cargo.lock* ./
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
cargo build --release 2>/dev/null || true && \
rm -rf src
# Build
COPY . .
RUN cargo build --release && strip target/release/telemt
# ==========================
# Stage 2: Runtime
# Stage 2: Compress (strip + UPX)
# ==========================
FROM debian:bookworm-slim
FROM debian:12-slim AS minimal
RUN apt-get update && apt-get install -y --no-install-recommends \
upx \
binutils \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /build/target/release/telemt /telemt
RUN strip /telemt || true
RUN upx --best --lzma /telemt || true
# ==========================
# Stage 3: Debug base
# ==========================
FROM debian:12-slim AS debug-base
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
tzdata \
curl \
iproute2 \
busybox \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -r -s /usr/sbin/nologin telemt
# ==========================
# Stage 4: Debug image
# ==========================
FROM debug-base AS debug
WORKDIR /app
COPY --from=builder /build/target/release/telemt /app/telemt
COPY --from=minimal /telemt /app/telemt
COPY config.toml /app/config.toml
RUN chown -R telemt:telemt /app
USER telemt
USER root
EXPOSE 443
EXPOSE 9090
EXPOSE 9091
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]
# ==========================
# Stage 5: Production (distroless)
# ==========================
FROM gcr.io/distroless/base-debian12 AS prod
WORKDIR /app
COPY --from=minimal /telemt /app/telemt
COPY config.toml /app/config.toml
# TLS + timezone + shell
COPY --from=debug-base /etc/ssl/certs /etc/ssl/certs
COPY --from=debug-base /usr/share/zoneinfo /usr/share/zoneinfo
COPY --from=debug-base /bin/busybox /bin/busybox
RUN ["/bin/busybox", "--install", "-s", "/bin"]
# distroless user
USER nonroot:nonroot
EXPOSE 443
EXPOSE 9090
EXPOSE 9091
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]

View File

@@ -19,9 +19,9 @@
### 🇷🇺 RU
#### Релиз 3.3.15 Semistable
#### О релизах
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) по итогам работы в продакшн признан одним из самых стабильных и рекомендуется к использованию, когда cutting-edge фичи некритичны!
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
@@ -40,9 +40,9 @@
### 🇬🇧 EN
#### Release 3.3.15 Semistable
#### About releases
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) is, based on the results of his work in production, recognized as one of the most stable and recommended for use when cutting-edge features are not so necessary!
[3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**

View File

@@ -7,6 +7,7 @@ services:
ports:
- "443:443"
- "127.0.0.1:9090:9090"
- "127.0.0.1:9091:9091"
# Allow caching 'proxy-secret' in read-only container
working_dir: /run/telemt
volumes:

View File

@@ -497,13 +497,14 @@ Note: the request contract is defined, but the corresponding route currently ret
| `direct_total` | `usize` | Direct-route upstream entries. |
| `socks4_total` | `usize` | SOCKS4 upstream entries. |
| `socks5_total` | `usize` | SOCKS5 upstream entries. |
| `shadowsocks_total` | `usize` | Shadowsocks upstream entries. |
#### `RuntimeUpstreamQualityUpstreamData`
| Field | Type | Description |
| --- | --- | --- |
| `upstream_id` | `usize` | Runtime upstream index. |
| `route_kind` | `string` | `direct`, `socks4`, `socks5`. |
| `address` | `string` | Upstream address (`direct` literal for direct route kind). |
| `route_kind` | `string` | `direct`, `socks4`, `socks5`, `shadowsocks`. |
| `address` | `string` | Upstream address (`direct` literal for direct route kind, `host:port` only for proxied upstreams). |
| `weight` | `u16` | Selection weight. |
| `scopes` | `string` | Configured scope selector. |
| `healthy` | `bool` | Current health flag. |
@@ -757,13 +758,14 @@ Note: the request contract is defined, but the corresponding route currently ret
| `direct_total` | `usize` | Number of direct upstream entries. |
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
| `shadowsocks_total` | `usize` | Number of Shadowsocks upstream entries. |
#### `UpstreamStatus`
| Field | Type | Description |
| --- | --- | --- |
| `upstream_id` | `usize` | Runtime upstream index. |
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`. |
| `address` | `string` | Upstream address (`direct` for direct route kind). Authentication fields are intentionally omitted. |
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`, `shadowsocks`. |
| `address` | `string` | Upstream address (`direct` for direct route kind, `host:port` for Shadowsocks). Authentication fields are intentionally omitted. |
| `weight` | `u16` | Selection weight. |
| `scopes` | `string` | Configured scope selector string. |
| `healthy` | `bool` | Current health flag. |

View File

@@ -8,282 +8,287 @@ This document lists all configuration keys accepted by `config.toml`.
## Top-level keys
| Parameter | Type | Description |
|---|---|---|
| include | `String` (special directive) | Includes another TOML file with `include = "relative/or/absolute/path.toml"`; includes are processed recursively before parsing. |
| show_link | `"*" \| String[]` | Legacy top-level link visibility selector (`"*"` for all users or explicit usernames list). |
| dc_overrides | `Map<String, String[]>` | Overrides DC endpoints for non-standard DCs; key is DC id string, value is `ip:port` list. |
| default_dc | `u8` | Default DC index used for unmapped non-standard DCs. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| include | `String` (special directive) | `null` | — | Includes another TOML file with `include = "relative/or/absolute/path.toml"`; includes are processed recursively before parsing. |
| show_link | `"*" \| String[]` | `[]` (`ShowLink::None`) | — | Legacy top-level link visibility selector (`"*"` for all users or explicit usernames list). |
| dc_overrides | `Map<String, String[]>` | `{}` | — | Overrides DC endpoints for non-standard DCs; key is DC id string, value is `ip:port` list. |
| default_dc | `u8 \| null` | `null` (effective fallback: `2` in ME routing) | — | Default DC index used for unmapped non-standard DCs. |
## [general]
| Parameter | Type | Description |
|---|---|---|
| data_path | `String` | Optional runtime data directory path. |
| prefer_ipv6 | `bool` | Prefer IPv6 where applicable in runtime logic. |
| fast_mode | `bool` | Enables fast-path optimizations for traffic processing. |
| use_middle_proxy | `bool` | Enables Middle Proxy mode. |
| proxy_secret_path | `String` | Path to proxy secret binary; can be auto-downloaded if absent. |
| proxy_config_v4_cache_path | `String` | Optional cache path for raw `getProxyConfig` (IPv4) snapshot. |
| proxy_config_v6_cache_path | `String` | Optional cache path for raw `getProxyConfigV6` (IPv6) snapshot. |
| ad_tag | `String` | Global fallback ad tag (32 hex characters). |
| middle_proxy_nat_ip | `IpAddr` | Explicit public IP override for NAT environments. |
| middle_proxy_nat_probe | `bool` | Enables NAT probing for Middle Proxy KDF/public address discovery. |
| middle_proxy_nat_stun | `String` | Deprecated legacy single STUN server for NAT probing. |
| middle_proxy_nat_stun_servers | `String[]` | Deprecated legacy STUN list for NAT probing fallback. |
| stun_nat_probe_concurrency | `usize` | Maximum concurrent STUN probes during NAT detection. |
| middle_proxy_pool_size | `usize` | Target size of active Middle Proxy writer pool. |
| middle_proxy_warm_standby | `usize` | Number of warm standby Middle-End connections. |
| me_init_retry_attempts | `u32` | Startup retries for ME pool initialization (`0` means unlimited). |
| me2dc_fallback | `bool` | Allows fallback from ME mode to direct DC when ME startup fails. |
| me_keepalive_enabled | `bool` | Enables ME keepalive padding frames. |
| me_keepalive_interval_secs | `u64` | Keepalive interval in seconds. |
| me_keepalive_jitter_secs | `u64` | Keepalive jitter in seconds. |
| me_keepalive_payload_random | `bool` | Randomizes keepalive payload bytes instead of zero payload. |
| rpc_proxy_req_every | `u64` | Interval for service `RPC_PROXY_REQ` activity signals (`0` disables). |
| me_writer_cmd_channel_capacity | `usize` | Capacity of per-writer command channel. |
| me_route_channel_capacity | `usize` | Capacity of per-connection ME response route channel. |
| me_c2me_channel_capacity | `usize` | Capacity of per-client command queue (client reader -> ME sender). |
| me_reader_route_data_wait_ms | `u64` | Bounded wait for routing ME DATA to per-connection queue (`0` = no wait). |
| me_d2c_flush_batch_max_frames | `usize` | Max ME->client frames coalesced before flush. |
| me_d2c_flush_batch_max_bytes | `usize` | Max ME->client payload bytes coalesced before flush. |
| me_d2c_flush_batch_max_delay_us | `u64` | Max microsecond wait for coalescing more ME->client frames (`0` disables timed coalescing). |
| me_d2c_ack_flush_immediate | `bool` | Flushes client writer immediately after quick-ack write. |
| direct_relay_copy_buf_c2s_bytes | `usize` | Copy buffer size for client->DC direction in direct relay. |
| direct_relay_copy_buf_s2c_bytes | `usize` | Copy buffer size for DC->client direction in direct relay. |
| crypto_pending_buffer | `usize` | Max pending ciphertext buffer per client writer (bytes). |
| max_client_frame | `usize` | Maximum allowed client MTProto frame size (bytes). |
| desync_all_full | `bool` | Emits full crypto-desync forensic logs for every event. |
| beobachten | `bool` | Enables per-IP forensic observation buckets. |
| beobachten_minutes | `u64` | Retention window (minutes) for per-IP observation buckets. |
| beobachten_flush_secs | `u64` | Snapshot flush interval (seconds) for observation output file. |
| beobachten_file | `String` | Observation snapshot output file path. |
| hardswap | `bool` | Enables hard-swap generation switching for ME pool updates. |
| me_warmup_stagger_enabled | `bool` | Enables staggered warmup for extra ME writers. |
| me_warmup_step_delay_ms | `u64` | Base delay between warmup connections (ms). |
| me_warmup_step_jitter_ms | `u64` | Jitter for warmup delay (ms). |
| me_reconnect_max_concurrent_per_dc | `u32` | Max concurrent reconnect attempts per DC. |
| me_reconnect_backoff_base_ms | `u64` | Base reconnect backoff in ms. |
| me_reconnect_backoff_cap_ms | `u64` | Cap reconnect backoff in ms. |
| me_reconnect_fast_retry_count | `u32` | Number of fast retry attempts before backoff. |
| me_single_endpoint_shadow_writers | `u8` | Additional reserve writers for one-endpoint DC groups. |
| me_single_endpoint_outage_mode_enabled | `bool` | Enables aggressive outage recovery for one-endpoint DC groups. |
| me_single_endpoint_outage_disable_quarantine | `bool` | Ignores endpoint quarantine in one-endpoint outage mode. |
| me_single_endpoint_outage_backoff_min_ms | `u64` | Minimum reconnect backoff in outage mode (ms). |
| me_single_endpoint_outage_backoff_max_ms | `u64` | Maximum reconnect backoff in outage mode (ms). |
| me_single_endpoint_shadow_rotate_every_secs | `u64` | Periodic shadow writer rotation interval (`0` disables). |
| me_floor_mode | `"static" \| "adaptive"` | Writer floor policy mode. |
| me_adaptive_floor_idle_secs | `u64` | Idle time before adaptive floor may reduce one-endpoint target. |
| me_adaptive_floor_min_writers_single_endpoint | `u8` | Minimum adaptive writer target for one-endpoint DC groups. |
| me_adaptive_floor_min_writers_multi_endpoint | `u8` | Minimum adaptive writer target for multi-endpoint DC groups. |
| me_adaptive_floor_recover_grace_secs | `u64` | Grace period to hold static floor after activity. |
| me_adaptive_floor_writers_per_core_total | `u16` | Global writer budget per logical CPU core in adaptive mode. |
| me_adaptive_floor_cpu_cores_override | `u16` | Manual CPU core count override (`0` uses auto-detection). |
| me_adaptive_floor_max_extra_writers_single_per_core | `u16` | Per-core max extra writers above base floor for one-endpoint DCs. |
| me_adaptive_floor_max_extra_writers_multi_per_core | `u16` | Per-core max extra writers above base floor for multi-endpoint DCs. |
| me_adaptive_floor_max_active_writers_per_core | `u16` | Hard cap for active ME writers per logical CPU core. |
| me_adaptive_floor_max_warm_writers_per_core | `u16` | Hard cap for warm ME writers per logical CPU core. |
| me_adaptive_floor_max_active_writers_global | `u32` | Hard global cap for active ME writers. |
| me_adaptive_floor_max_warm_writers_global | `u32` | Hard global cap for warm ME writers. |
| upstream_connect_retry_attempts | `u32` | Connect attempts for selected upstream before error/fallback. |
| upstream_connect_retry_backoff_ms | `u64` | Delay between upstream connect attempts (ms). |
| upstream_connect_budget_ms | `u64` | Total wall-clock budget for one upstream connect request (ms). |
| upstream_unhealthy_fail_threshold | `u32` | Consecutive failed requests before upstream is marked unhealthy. |
| upstream_connect_failfast_hard_errors | `bool` | Skips additional retries for hard non-transient connect errors. |
| stun_iface_mismatch_ignore | `bool` | Ignores STUN/interface mismatch and keeps Middle Proxy mode. |
| unknown_dc_log_path | `String` | File path for unknown-DC request logging (`null` disables file path). |
| unknown_dc_file_log_enabled | `bool` | Enables unknown-DC file logging. |
| log_level | `"debug" \| "verbose" \| "normal" \| "silent"` | Runtime logging verbosity. |
| disable_colors | `bool` | Disables ANSI colors in logs. |
| me_socks_kdf_policy | `"strict" \| "compat"` | SOCKS-bound KDF fallback policy for ME handshake. |
| me_route_backpressure_base_timeout_ms | `u64` | Base backpressure timeout for route-channel send (ms). |
| me_route_backpressure_high_timeout_ms | `u64` | High backpressure timeout when queue occupancy exceeds watermark (ms). |
| me_route_backpressure_high_watermark_pct | `u8` | Queue occupancy threshold (%) for high timeout mode. |
| me_health_interval_ms_unhealthy | `u64` | Health monitor interval while writer coverage is degraded (ms). |
| me_health_interval_ms_healthy | `u64` | Health monitor interval while writer coverage is healthy (ms). |
| me_admission_poll_ms | `u64` | Poll interval for conditional-admission checks (ms). |
| me_warn_rate_limit_ms | `u64` | Cooldown for repetitive ME warning logs (ms). |
| me_route_no_writer_mode | `"async_recovery_failfast" \| "inline_recovery_legacy" \| "hybrid_async_persistent"` | Route behavior when no writer is immediately available. |
| me_route_no_writer_wait_ms | `u64` | Max wait in async-recovery failfast mode (ms). |
| me_route_inline_recovery_attempts | `u32` | Inline recovery attempts in legacy mode. |
| me_route_inline_recovery_wait_ms | `u64` | Max inline recovery wait in legacy mode (ms). |
| fast_mode_min_tls_record | `usize` | Minimum TLS record size when fast-mode coalescing is enabled (`0` disables). |
| update_every | `u64` | Unified interval for config/secret updater tasks. |
| me_reinit_every_secs | `u64` | Periodic ME pool reinitialization interval (seconds). |
| me_hardswap_warmup_delay_min_ms | `u64` | Minimum delay between hardswap warmup connects (ms). |
| me_hardswap_warmup_delay_max_ms | `u64` | Maximum delay between hardswap warmup connects (ms). |
| me_hardswap_warmup_extra_passes | `u8` | Additional warmup passes per hardswap cycle. |
| me_hardswap_warmup_pass_backoff_base_ms | `u64` | Base backoff between hardswap warmup passes (ms). |
| me_config_stable_snapshots | `u8` | Number of identical config snapshots required before apply. |
| me_config_apply_cooldown_secs | `u64` | Cooldown between applied ME map updates (seconds). |
| me_snapshot_require_http_2xx | `bool` | Requires 2xx HTTP responses for applying config snapshots. |
| me_snapshot_reject_empty_map | `bool` | Rejects empty config snapshots. |
| me_snapshot_min_proxy_for_lines | `u32` | Minimum parsed `proxy_for` rows required to accept snapshot. |
| proxy_secret_stable_snapshots | `u8` | Number of identical secret snapshots required before runtime rotation. |
| proxy_secret_rotate_runtime | `bool` | Enables runtime proxy-secret rotation from remote source. |
| me_secret_atomic_snapshot | `bool` | Keeps selector and secret bytes from the same snapshot atomically. |
| proxy_secret_len_max | `usize` | Maximum allowed proxy-secret length (bytes). |
| me_pool_drain_ttl_secs | `u64` | Drain TTL for stale ME writers after endpoint-map changes (seconds). |
| me_pool_drain_threshold | `u64` | Max draining stale writers before batch force-close (`0` disables threshold cleanup). |
| me_bind_stale_mode | `"never" \| "ttl" \| "always"` | Policy for new binds on stale draining writers. |
| me_bind_stale_ttl_secs | `u64` | TTL for stale bind allowance when stale mode is `ttl`. |
| me_pool_min_fresh_ratio | `f32` | Minimum desired-DC fresh coverage ratio before draining stale writers. |
| me_reinit_drain_timeout_secs | `u64` | Force-close timeout for stale writers after endpoint-map changes (`0` disables force-close). |
| proxy_secret_auto_reload_secs | `u64` | Deprecated legacy secret reload interval (fallback when `update_every` is not set). |
| proxy_config_auto_reload_secs | `u64` | Deprecated legacy config reload interval (fallback when `update_every` is not set). |
| me_reinit_singleflight | `bool` | Serializes ME reinit cycles across trigger sources. |
| me_reinit_trigger_channel | `usize` | Trigger queue capacity for reinit scheduler. |
| me_reinit_coalesce_window_ms | `u64` | Trigger coalescing window before starting reinit (ms). |
| me_deterministic_writer_sort | `bool` | Enables deterministic candidate sort for writer binding path. |
| me_writer_pick_mode | `"sorted_rr" \| "p2c"` | Writer selection mode for route bind path. |
| me_writer_pick_sample_size | `u8` | Number of candidates sampled by picker in `p2c` mode. |
| ntp_check | `bool` | Enables NTP drift check at startup. |
| ntp_servers | `String[]` | NTP servers used for drift check. |
| auto_degradation_enabled | `bool` | Enables automatic degradation from ME to direct DC. |
| degradation_min_unavailable_dc_groups | `u8` | Minimum unavailable ME DC groups required before degrading. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| data_path | `String \| null` | `null` | — | Optional runtime data directory path. |
| prefer_ipv6 | `bool` | `false` | — | Prefer IPv6 where applicable in runtime logic. |
| fast_mode | `bool` | `true` | — | Enables fast-path optimizations for traffic processing. |
| use_middle_proxy | `bool` | `true` | none | Enables ME transport mode; if `false`, runtime falls back to direct DC routing. |
| proxy_secret_path | `String \| null` | `"proxy-secret"` | Path may be `null`. | Path to Telegram infrastructure proxy-secret file used by ME handshake logic. |
| proxy_config_v4_cache_path | `String \| null` | `"cache/proxy-config-v4.txt"` | — | Optional cache path for raw `getProxyConfig` (IPv4) snapshot. |
| proxy_config_v6_cache_path | `String \| null` | `"cache/proxy-config-v6.txt"` | — | Optional cache path for raw `getProxyConfigV6` (IPv6) snapshot. |
| ad_tag | `String \| null` | `null` | — | Global fallback ad tag (32 hex characters). |
| middle_proxy_nat_ip | `IpAddr \| null` | `null` | Must be a valid IP when set. | Manual public NAT IP override used as ME address material when set. |
| middle_proxy_nat_probe | `bool` | `true` | Auto-forced to `true` when `use_middle_proxy = true`. | Enables ME NAT probing; runtime may force it on when ME mode is active. |
| middle_proxy_nat_stun | `String \| null` | `null` | Deprecated. Use `network.stun_servers`. | Deprecated legacy single STUN server for NAT probing. |
| middle_proxy_nat_stun_servers | `String[]` | `[]` | Deprecated. Use `network.stun_servers`. | Deprecated legacy STUN list for NAT probing fallback. |
| stun_nat_probe_concurrency | `usize` | `8` | Must be `> 0`. | Maximum number of parallel STUN probes during NAT/public endpoint discovery. |
| middle_proxy_pool_size | `usize` | `8` | none | Target size of active ME writer pool. |
| middle_proxy_warm_standby | `usize` | `16` | none | Reserved compatibility field in current runtime revision. |
| me_init_retry_attempts | `u32` | `0` | `0..=1_000_000`. | Startup retries for ME pool initialization (`0` means unlimited). |
| me2dc_fallback | `bool` | `true` | — | Allows fallback from ME mode to direct DC when ME startup fails. |
| me_keepalive_enabled | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. |
| me_keepalive_interval_secs | `u64` | `8` | none | Base ME keepalive interval in seconds. |
| me_keepalive_jitter_secs | `u64` | `2` | none | Keepalive jitter in seconds to reduce synchronized bursts. |
| me_keepalive_payload_random | `bool` | `true` | none | Randomizes keepalive payload bytes instead of fixed zero payload. |
| rpc_proxy_req_every | `u64` | `0` | `0` or `10..=300`. | Interval for service `RPC_PROXY_REQ` activity signals (`0` disables). |
| me_writer_cmd_channel_capacity | `usize` | `4096` | Must be `> 0`. | Capacity of per-writer command channel. |
| me_route_channel_capacity | `usize` | `768` | Must be `> 0`. | Capacity of per-connection ME response route channel. |
| me_c2me_channel_capacity | `usize` | `1024` | Must be `> 0`. | Capacity of per-client command queue (client reader -> ME sender). |
| me_reader_route_data_wait_ms | `u64` | `2` | `0..=20`. | Bounded wait for routing ME DATA to per-connection queue (`0` = no wait). |
| me_d2c_flush_batch_max_frames | `usize` | `32` | `1..=512`. | Max ME->client frames coalesced before flush. |
| me_d2c_flush_batch_max_bytes | `usize` | `131072` | `4096..=2_097_152`. | Max ME->client payload bytes coalesced before flush. |
| me_d2c_flush_batch_max_delay_us | `u64` | `500` | `0..=5000`. | Max microsecond wait for coalescing more ME->client frames (`0` disables timed coalescing). |
| me_d2c_ack_flush_immediate | `bool` | `true` | — | Flushes client writer immediately after quick-ack write. |
| direct_relay_copy_buf_c2s_bytes | `usize` | `65536` | `4096..=1_048_576`. | Copy buffer size for client->DC direction in direct relay. |
| direct_relay_copy_buf_s2c_bytes | `usize` | `262144` | `8192..=2_097_152`. | Copy buffer size for DC->client direction in direct relay. |
| crypto_pending_buffer | `usize` | `262144` | — | Max pending ciphertext buffer per client writer (bytes). |
| max_client_frame | `usize` | `16777216` | — | Maximum allowed client MTProto frame size (bytes). |
| desync_all_full | `bool` | `false` | — | Emits full crypto-desync forensic logs for every event. |
| beobachten | `bool` | `true` | — | Enables per-IP forensic observation buckets. |
| beobachten_minutes | `u64` | `10` | Must be `> 0`. | Retention window (minutes) for per-IP observation buckets. |
| beobachten_flush_secs | `u64` | `15` | Must be `> 0`. | Snapshot flush interval (seconds) for observation output file. |
| beobachten_file | `String` | `"cache/beobachten.txt"` | — | Observation snapshot output file path. |
| hardswap | `bool` | `true` | none | Enables generation-based ME hardswap strategy. |
| me_warmup_stagger_enabled | `bool` | `true` | none | Staggers extra ME warmup dials to avoid connection spikes. |
| me_warmup_step_delay_ms | `u64` | `500` | none | Base delay in milliseconds between warmup dial steps. |
| me_warmup_step_jitter_ms | `u64` | `300` | none | Additional random delay in milliseconds for warmup steps. |
| me_reconnect_max_concurrent_per_dc | `u32` | `8` | none | Limits concurrent reconnect workers per DC during health recovery. |
| me_reconnect_backoff_base_ms | `u64` | `500` | none | Initial reconnect backoff in milliseconds. |
| me_reconnect_backoff_cap_ms | `u64` | `30000` | none | Maximum reconnect backoff cap in milliseconds. |
| me_reconnect_fast_retry_count | `u32` | `16` | none | Immediate retry budget before long backoff behavior applies. |
| me_single_endpoint_shadow_writers | `u8` | `2` | `0..=32`. | Additional reserve writers for one-endpoint DC groups. |
| me_single_endpoint_outage_mode_enabled | `bool` | `true` | — | Enables aggressive outage recovery for one-endpoint DC groups. |
| me_single_endpoint_outage_disable_quarantine | `bool` | `true` | — | Ignores endpoint quarantine in one-endpoint outage mode. |
| me_single_endpoint_outage_backoff_min_ms | `u64` | `250` | Must be `> 0`; also `<= me_single_endpoint_outage_backoff_max_ms`. | Minimum reconnect backoff in outage mode (ms). |
| me_single_endpoint_outage_backoff_max_ms | `u64` | `3000` | Must be `> 0`; also `>= me_single_endpoint_outage_backoff_min_ms`. | Maximum reconnect backoff in outage mode (ms). |
| me_single_endpoint_shadow_rotate_every_secs | `u64` | `900` | — | Periodic shadow writer rotation interval (`0` disables). |
| me_floor_mode | `"static" \| "adaptive"` | `"adaptive"` | — | Writer floor policy mode. |
| me_adaptive_floor_idle_secs | `u64` | `90` | — | Idle time before adaptive floor may reduce one-endpoint target. |
| me_adaptive_floor_min_writers_single_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for one-endpoint DC groups. |
| me_adaptive_floor_min_writers_multi_endpoint | `u8` | `1` | `1..=32`. | Minimum adaptive writer target for multi-endpoint DC groups. |
| me_adaptive_floor_recover_grace_secs | `u64` | `180` | — | Grace period to hold static floor after activity. |
| me_adaptive_floor_writers_per_core_total | `u16` | `48` | Must be `> 0`. | Global writer budget per logical CPU core in adaptive mode. |
| me_adaptive_floor_cpu_cores_override | `u16` | `0` | — | Manual CPU core count override (`0` uses auto-detection). |
| me_adaptive_floor_max_extra_writers_single_per_core | `u16` | `1` | — | Per-core max extra writers above base floor for one-endpoint DCs. |
| me_adaptive_floor_max_extra_writers_multi_per_core | `u16` | `2` | — | Per-core max extra writers above base floor for multi-endpoint DCs. |
| me_adaptive_floor_max_active_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for active ME writers per logical CPU core. |
| me_adaptive_floor_max_warm_writers_per_core | `u16` | `64` | Must be `> 0`. | Hard cap for warm ME writers per logical CPU core. |
| me_adaptive_floor_max_active_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for active ME writers. |
| me_adaptive_floor_max_warm_writers_global | `u32` | `256` | Must be `> 0`. | Hard global cap for warm ME writers. |
| upstream_connect_retry_attempts | `u32` | `2` | Must be `> 0`. | Connect attempts for selected upstream before error/fallback. |
| upstream_connect_retry_backoff_ms | `u64` | `100` | — | Delay between upstream connect attempts (ms). |
| upstream_connect_budget_ms | `u64` | `3000` | Must be `> 0`. | Total wall-clock budget for one upstream connect request (ms). |
| upstream_unhealthy_fail_threshold | `u32` | `5` | Must be `> 0`. | Consecutive failed requests before upstream is marked unhealthy. |
| upstream_connect_failfast_hard_errors | `bool` | `false` | — | Skips additional retries for hard non-transient connect errors. |
| stun_iface_mismatch_ignore | `bool` | `false` | none | Reserved compatibility flag in current runtime revision. |
| unknown_dc_log_path | `String \| null` | `"unknown-dc.txt"` | — | File path for unknown-DC request logging (`null` disables file path). |
| unknown_dc_file_log_enabled | `bool` | `false` | — | Enables unknown-DC file logging. |
| log_level | `"debug" \| "verbose" \| "normal" \| "silent"` | `"normal"` | — | Runtime logging verbosity. |
| disable_colors | `bool` | `false` | — | Disables ANSI colors in logs. |
| me_socks_kdf_policy | `"strict" \| "compat"` | `"strict"` | — | SOCKS-bound KDF fallback policy for ME handshake. |
| me_route_backpressure_base_timeout_ms | `u64` | `25` | Must be `> 0`. | Base backpressure timeout for route-channel send (ms). |
| me_route_backpressure_high_timeout_ms | `u64` | `120` | Must be `>= me_route_backpressure_base_timeout_ms`. | High backpressure timeout when queue occupancy exceeds watermark (ms). |
| me_route_backpressure_high_watermark_pct | `u8` | `80` | `1..=100`. | Queue occupancy threshold (%) for high timeout mode. |
| me_health_interval_ms_unhealthy | `u64` | `1000` | Must be `> 0`. | Health monitor interval while writer coverage is degraded (ms). |
| me_health_interval_ms_healthy | `u64` | `3000` | Must be `> 0`. | Health monitor interval while writer coverage is healthy (ms). |
| me_admission_poll_ms | `u64` | `1000` | Must be `> 0`. | Poll interval for conditional-admission checks (ms). |
| me_warn_rate_limit_ms | `u64` | `5000` | Must be `> 0`. | Cooldown for repetitive ME warning logs (ms). |
| me_route_no_writer_mode | `"async_recovery_failfast" \| "inline_recovery_legacy" \| "hybrid_async_persistent"` | `"hybrid_async_persistent"` | — | Route behavior when no writer is immediately available. |
| me_route_no_writer_wait_ms | `u64` | `250` | `10..=5000`. | Max wait in async-recovery failfast mode (ms). |
| me_route_inline_recovery_attempts | `u32` | `3` | Must be `> 0`. | Inline recovery attempts in legacy mode. |
| me_route_inline_recovery_wait_ms | `u64` | `3000` | `10..=30000`. | Max inline recovery wait in legacy mode (ms). |
| fast_mode_min_tls_record | `usize` | `0` | — | Minimum TLS record size when fast-mode coalescing is enabled (`0` disables). |
| update_every | `u64 \| null` | `300` | If set: must be `> 0`; if `null`: legacy fallback path is used. | Unified refresh interval for ME config and proxy-secret updater tasks. |
| me_reinit_every_secs | `u64` | `900` | Must be `> 0`. | Periodic interval for zero-downtime ME reinit cycle. |
| me_hardswap_warmup_delay_min_ms | `u64` | `1000` | Must be `<= me_hardswap_warmup_delay_max_ms`. | Lower bound for hardswap warmup dial spacing. |
| me_hardswap_warmup_delay_max_ms | `u64` | `2000` | Must be `> 0`. | Upper bound for hardswap warmup dial spacing. |
| me_hardswap_warmup_extra_passes | `u8` | `3` | Must be within `[0, 10]`. | Additional warmup passes after the base pass in one hardswap cycle. |
| me_hardswap_warmup_pass_backoff_base_ms | `u64` | `500` | Must be `> 0`. | Base backoff between extra hardswap warmup passes. |
| me_config_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical ME config snapshots required before apply. |
| me_config_apply_cooldown_secs | `u64` | `300` | none | Cooldown between applied ME endpoint-map updates. |
| me_snapshot_require_http_2xx | `bool` | `true` | — | Requires 2xx HTTP responses for applying config snapshots. |
| me_snapshot_reject_empty_map | `bool` | `true` | — | Rejects empty config snapshots. |
| me_snapshot_min_proxy_for_lines | `u32` | `1` | Must be `> 0`. | Minimum parsed `proxy_for` rows required to accept snapshot. |
| proxy_secret_stable_snapshots | `u8` | `2` | Must be `> 0`. | Number of identical proxy-secret snapshots required before rotation. |
| proxy_secret_rotate_runtime | `bool` | `true` | none | Enables runtime proxy-secret rotation from updater snapshots. |
| me_secret_atomic_snapshot | `bool` | `true` | — | Keeps selector and secret bytes from the same snapshot atomically. |
| proxy_secret_len_max | `usize` | `256` | Must be within `[32, 4096]`. | Upper length limit for accepted proxy-secret bytes. |
| me_pool_drain_ttl_secs | `u64` | `90` | none | Time window where stale writers remain fallback-eligible after map change. |
| me_pool_drain_threshold | `u64` | `128` | — | Max draining stale writers before batch force-close (`0` disables threshold cleanup). |
| me_pool_drain_soft_evict_enabled | `bool` | `true` | — | Enables gradual soft-eviction of stale writers during drain/reinit instead of immediate hard close. |
| me_pool_drain_soft_evict_grace_secs | `u64` | `30` | `0..=3600`. | Grace period before stale writers become soft-evict candidates. |
| me_pool_drain_soft_evict_per_writer | `u8` | `1` | `1..=16`. | Maximum stale routes soft-evicted per writer in one eviction pass. |
| me_pool_drain_soft_evict_budget_per_core | `u16` | `8` | `1..=64`. | Per-core budget limiting aggregate soft-eviction work per pass. |
| me_pool_drain_soft_evict_cooldown_ms | `u64` | `5000` | Must be `> 0`. | Cooldown between consecutive soft-eviction passes (ms). |
| me_bind_stale_mode | `"never" \| "ttl" \| "always"` | `"ttl"` | — | Policy for new binds on stale draining writers. |
| me_bind_stale_ttl_secs | `u64` | `90` | — | TTL for stale bind allowance when stale mode is `ttl`. |
| me_pool_min_fresh_ratio | `f32` | `0.8` | Must be within `[0.0, 1.0]`. | Minimum fresh desired-DC coverage ratio before stale writers are drained. |
| me_reinit_drain_timeout_secs | `u64` | `120` | `0` disables force-close; if `> 0` and `< me_pool_drain_ttl_secs`, runtime bumps it to TTL. | Force-close timeout for draining stale writers (`0` keeps indefinite draining). |
| proxy_secret_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy secret reload interval (fallback when `update_every` is not set). |
| proxy_config_auto_reload_secs | `u64` | `3600` | Deprecated. Use `general.update_every`. | Deprecated legacy config reload interval (fallback when `update_every` is not set). |
| me_reinit_singleflight | `bool` | `true` | — | Serializes ME reinit cycles across trigger sources. |
| me_reinit_trigger_channel | `usize` | `64` | Must be `> 0`. | Trigger queue capacity for reinit scheduler. |
| me_reinit_coalesce_window_ms | `u64` | `200` | — | Trigger coalescing window before starting reinit (ms). |
| me_deterministic_writer_sort | `bool` | `true` | — | Enables deterministic candidate sort for writer binding path. |
| me_writer_pick_mode | `"sorted_rr" \| "p2c"` | `"p2c"` | — | Writer selection mode for route bind path. |
| me_writer_pick_sample_size | `u8` | `3` | `2..=4`. | Number of candidates sampled by picker in `p2c` mode. |
| ntp_check | `bool` | `true` | — | Enables NTP drift check at startup. |
| ntp_servers | `String[]` | `["pool.ntp.org"]` | — | NTP servers used for drift check. |
| auto_degradation_enabled | `bool` | `true` | none | Reserved compatibility flag in current runtime revision. |
| degradation_min_unavailable_dc_groups | `u8` | `2` | none | Reserved compatibility threshold in current runtime revision. |
## [general.modes]
| Parameter | Type | Description |
|---|---|---|
| classic | `bool` | Enables classic MTProxy mode. |
| secure | `bool` | Enables secure mode. |
| tls | `bool` | Enables TLS mode. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| classic | `bool` | `false` | — | Enables classic MTProxy mode. |
| secure | `bool` | `false` | — | Enables secure mode. |
| tls | `bool` | `true` | — | Enables TLS mode. |
## [general.links]
| Parameter | Type | Description |
|---|---|---|
| show | `"*" \| String[]` | Selects users whose tg:// links are shown at startup. |
| public_host | `String` | Public hostname/IP override for generated tg:// links. |
| public_port | `u16` | Public port override for generated tg:// links. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| show | `"*" \| String[]` | `"*"` | — | Selects users whose tg:// links are shown at startup. |
| public_host | `String \| null` | `null` | — | Public hostname/IP override for generated tg:// links. |
| public_port | `u16 \| null` | `null` | — | Public port override for generated tg:// links. |
## [general.telemetry]
| Parameter | Type | Description |
|---|---|---|
| core_enabled | `bool` | Enables core hot-path telemetry counters. |
| user_enabled | `bool` | Enables per-user telemetry counters. |
| me_level | `"silent" \| "normal" \| "debug"` | Middle-End telemetry verbosity level. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| core_enabled | `bool` | `true` | — | Enables core hot-path telemetry counters. |
| user_enabled | `bool` | `true` | — | Enables per-user telemetry counters. |
| me_level | `"silent" \| "normal" \| "debug"` | `"normal"` | — | Middle-End telemetry verbosity level. |
## [network]
| Parameter | Type | Description |
|---|---|---|
| ipv4 | `bool` | Enables IPv4 networking. |
| ipv6 | `bool` | Enables/disables IPv6 (`null` = auto-detect availability). |
| prefer | `u8` | Preferred IP family for selection (`4` or `6`). |
| multipath | `bool` | Enables multipath behavior where supported. |
| stun_use | `bool` | Global switch for STUN probing. |
| stun_servers | `String[]` | STUN server list for public IP detection. |
| stun_tcp_fallback | `bool` | Enables TCP STUN fallback when UDP STUN is blocked. |
| http_ip_detect_urls | `String[]` | HTTP endpoints used as fallback public IP detectors. |
| cache_public_ip_path | `String` | File path for caching detected public IP. |
| dns_overrides | `String[]` | Runtime DNS overrides in `host:port:ip` format. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| ipv4 | `bool` | `true` | — | Enables IPv4 networking. |
| ipv6 | `bool` | `false` | — | Enables/disables IPv6 when set |
| prefer | `u8` | `4` | Must be `4` or `6`. | Preferred IP family for selection (`4` or `6`). |
| multipath | `bool` | `false` | — | Enables multipath behavior where supported. |
| stun_use | `bool` | `true` | none | Global STUN switch; when `false`, STUN probing path is disabled. |
| stun_servers | `String[]` | Built-in STUN list (13 hosts) | Deduplicated; empty values are removed. | Primary STUN server list for NAT/public endpoint discovery. |
| stun_tcp_fallback | `bool` | `true` | none | Enables TCP fallback for STUN when UDP path is blocked. |
| http_ip_detect_urls | `String[]` | `["https://ifconfig.me/ip", "https://api.ipify.org"]` | none | HTTP fallback endpoints for public IP detection when STUN is unavailable. |
| cache_public_ip_path | `String` | `"cache/public_ip.txt"` | — | File path for caching detected public IP. |
| dns_overrides | `String[]` | `[]` | Must match `host:port:ip`; IPv6 must be bracketed. | Runtime DNS overrides in `host:port:ip` format. |
## [server]
| Parameter | Type | Description |
|---|---|---|
| port | `u16` | Main proxy listen port. |
| listen_addr_ipv4 | `String` | IPv4 bind address for TCP listener. |
| listen_addr_ipv6 | `String` | IPv6 bind address for TCP listener. |
| listen_unix_sock | `String` | Unix socket path for listener. |
| listen_unix_sock_perm | `String` | Unix socket permissions in octal string (e.g., `"0666"`). |
| listen_tcp | `bool` | Explicit TCP listener enable/disable override. |
| proxy_protocol | `bool` | Enables HAProxy PROXY protocol parsing on incoming client connections. |
| proxy_protocol_header_timeout_ms | `u64` | Timeout for PROXY protocol header read/parse (ms). |
| metrics_port | `u16` | Metrics endpoint port (enables metrics listener). |
| metrics_listen | `String` | Full metrics bind address (`IP:PORT`), overrides `metrics_port`. |
| metrics_whitelist | `IpNetwork[]` | CIDR whitelist for metrics endpoint access. |
| max_connections | `u32` | Max concurrent client connections (`0` = unlimited). |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| port | `u16` | `443` | — | Main proxy listen port. |
| listen_addr_ipv4 | `String \| null` | `"0.0.0.0"` | — | IPv4 bind address for TCP listener. |
| listen_addr_ipv6 | `String \| null` | `"::"` | — | IPv6 bind address for TCP listener. |
| listen_unix_sock | `String \| null` | `null` | — | Unix socket path for listener. |
| listen_unix_sock_perm | `String \| null` | `null` | — | Unix socket permissions in octal string (e.g., `"0666"`). |
| listen_tcp | `bool \| null` | `null` (auto) | — | Explicit TCP listener enable/disable override. |
| proxy_protocol | `bool` | `false` | — | Enables HAProxy PROXY protocol parsing on incoming client connections. |
| proxy_protocol_header_timeout_ms | `u64` | `500` | Must be `> 0`. | Timeout for PROXY protocol header read/parse (ms). |
| metrics_port | `u16 \| null` | `null` | — | Metrics endpoint port (enables metrics listener). |
| metrics_listen | `String \| null` | `null` | — | Full metrics bind address (`IP:PORT`), overrides `metrics_port`. |
| metrics_whitelist | `IpNetwork[]` | `["127.0.0.1/32", "::1/128"]` | — | CIDR whitelist for metrics endpoint access. |
| max_connections | `u32` | `10000` | — | Max concurrent client connections (`0` = unlimited). |
## [server.api]
| Parameter | Type | Description |
|---|---|---|
| enabled | `bool` | Enables control-plane REST API. |
| listen | `String` | API bind address in `IP:PORT` format. |
| whitelist | `IpNetwork[]` | CIDR whitelist allowed to access API. |
| auth_header | `String` | Exact expected `Authorization` header value (empty = disabled). |
| request_body_limit_bytes | `usize` | Maximum accepted HTTP request body size. |
| minimal_runtime_enabled | `bool` | Enables minimal runtime snapshots endpoint logic. |
| minimal_runtime_cache_ttl_ms | `u64` | Cache TTL for minimal runtime snapshots (ms; `0` disables cache). |
| runtime_edge_enabled | `bool` | Enables runtime edge endpoints. |
| runtime_edge_cache_ttl_ms | `u64` | Cache TTL for runtime edge aggregation payloads (ms). |
| runtime_edge_top_n | `usize` | Top-N size for edge connection leaderboard. |
| runtime_edge_events_capacity | `usize` | Ring-buffer capacity for runtime edge events. |
| read_only | `bool` | Rejects mutating API endpoints when enabled. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| enabled | `bool` | `true` | — | Enables control-plane REST API. |
| listen | `String` | `"0.0.0.0:9091"` | Must be valid `IP:PORT`. | API bind address in `IP:PORT` format. |
| whitelist | `IpNetwork[]` | `["127.0.0.0/8"]` | — | CIDR whitelist allowed to access API. |
| auth_header | `String` | `""` | — | Exact expected `Authorization` header value (empty = disabled). |
| request_body_limit_bytes | `usize` | `65536` | Must be `> 0`. | Maximum accepted HTTP request body size. |
| minimal_runtime_enabled | `bool` | `true` | — | Enables minimal runtime snapshots endpoint logic. |
| minimal_runtime_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for minimal runtime snapshots (ms; `0` disables cache). |
| runtime_edge_enabled | `bool` | `false` | — | Enables runtime edge endpoints. |
| runtime_edge_cache_ttl_ms | `u64` | `1000` | `0..=60000`. | Cache TTL for runtime edge aggregation payloads (ms). |
| runtime_edge_top_n | `usize` | `10` | `1..=1000`. | Top-N size for edge connection leaderboard. |
| runtime_edge_events_capacity | `usize` | `256` | `16..=4096`. | Ring-buffer capacity for runtime edge events. |
| read_only | `bool` | `false` | — | Rejects mutating API endpoints when enabled. |
## [[server.listeners]]
| Parameter | Type | Description |
|---|---|---|
| ip | `IpAddr` | Listener bind IP. |
| announce | `String` | Public IP/domain announced in proxy links (priority over `announce_ip`). |
| announce_ip | `IpAddr` | Deprecated legacy announce IP (migrated to `announce` if needed). |
| proxy_protocol | `bool` | Per-listener override for PROXY protocol enable flag. |
| reuse_allow | `bool` | Enables `SO_REUSEPORT` for multi-instance bind sharing. |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| ip | `IpAddr` | — | — | Listener bind IP. |
| announce | `String \| null` | — | — | Public IP/domain announced in proxy links (priority over `announce_ip`). |
| announce_ip | `IpAddr \| null` | — | — | Deprecated legacy announce IP (migrated to `announce` if needed). |
| proxy_protocol | `bool \| null` | `null` | — | Per-listener override for PROXY protocol enable flag. |
| reuse_allow | `bool` | `false` | — | Enables `SO_REUSEPORT` for multi-instance bind sharing. |
## [timeouts]
| Parameter | Type | Description |
|---|---|---|
| client_handshake | `u64` | Client handshake timeout. |
| tg_connect | `u64` | Upstream Telegram connect timeout. |
| client_keepalive | `u64` | Client keepalive timeout. |
| client_ack | `u64` | Client ACK timeout. |
| me_one_retry | `u8` | Quick ME reconnect attempts for single-address DC. |
| me_one_timeout_ms | `u64` | Timeout per quick attempt for single-address DC (ms). |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| client_handshake | `u64` | `30` | — | Client handshake timeout. |
| tg_connect | `u64` | `10` | — | Upstream Telegram connect timeout. |
| client_keepalive | `u64` | `15` | — | Client keepalive timeout. |
| client_ack | `u64` | `90` | — | Client ACK timeout. |
| me_one_retry | `u8` | `12` | none | Fast reconnect attempts budget for single-endpoint DC scenarios. |
| me_one_timeout_ms | `u64` | `1200` | none | Timeout in milliseconds for each quick single-endpoint reconnect attempt. |
## [censorship]
| Parameter | Type | Description |
|---|---|---|
| tls_domain | `String` | Primary TLS domain used in fake TLS handshake profile. |
| tls_domains | `String[]` | Additional TLS domains for generating multiple links. |
| mask | `bool` | Enables masking/fronting relay mode. |
| mask_host | `String` | Upstream mask host for TLS fronting relay. |
| mask_port | `u16` | Upstream mask port for TLS fronting relay. |
| mask_unix_sock | `String` | Unix socket path for mask backend instead of TCP host/port. |
| fake_cert_len | `usize` | Length of synthetic certificate payload when emulation data is unavailable. |
| tls_emulation | `bool` | Enables certificate/TLS behavior emulation from cached real fronts. |
| tls_front_dir | `String` | Directory path for TLS front cache storage. |
| server_hello_delay_min_ms | `u64` | Minimum server_hello delay for anti-fingerprint behavior (ms). |
| server_hello_delay_max_ms | `u64` | Maximum server_hello delay for anti-fingerprint behavior (ms). |
| tls_new_session_tickets | `u8` | Number of `NewSessionTicket` messages to emit after handshake. |
| tls_full_cert_ttl_secs | `u64` | TTL for sending full cert payload per (domain, client IP) tuple. |
| alpn_enforce | `bool` | Enforces ALPN echo behavior based on client preference. |
| mask_proxy_protocol | `u8` | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| tls_domain | `String` | `"petrovich.ru"` | — | Primary TLS domain used in fake TLS handshake profile. |
| tls_domains | `String[]` | `[]` | — | Additional TLS domains for generating multiple links. |
| mask | `bool` | `true` | — | Enables masking/fronting relay mode. |
| mask_host | `String \| null` | `null` | — | Upstream mask host for TLS fronting relay. |
| mask_port | `u16` | `443` | — | Upstream mask port for TLS fronting relay. |
| mask_unix_sock | `String \| null` | `null` | — | Unix socket path for mask backend instead of TCP host/port. |
| fake_cert_len | `usize` | `2048` | — | Length of synthetic certificate payload when emulation data is unavailable. |
| tls_emulation | `bool` | `true` | — | Enables certificate/TLS behavior emulation from cached real fronts. |
| tls_front_dir | `String` | `"tlsfront"` | — | Directory path for TLS front cache storage. |
| server_hello_delay_min_ms | `u64` | `0` | — | Minimum server_hello delay for anti-fingerprint behavior (ms). |
| server_hello_delay_max_ms | `u64` | `0` | — | Maximum server_hello delay for anti-fingerprint behavior (ms). |
| tls_new_session_tickets | `u8` | `0` | — | Number of `NewSessionTicket` messages to emit after handshake. |
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
## [access]
| Parameter | Type | Description |
|---|---|---|
| users | `Map<String, String>` | Username -> 32-hex secret mapping. |
| user_ad_tags | `Map<String, String>` | Per-user ad tags (32 hex chars). |
| user_max_tcp_conns | `Map<String, usize>` | Per-user maximum concurrent TCP connections. |
| user_expirations | `Map<String, DateTime<Utc>>` | Per-user account expiration timestamps. |
| user_data_quota | `Map<String, u64>` | Per-user data quota limits. |
| user_max_unique_ips | `Map<String, usize>` | Per-user unique source IP limits. |
| user_max_unique_ips_global_each | `usize` | Global fallback per-user unique IP limit when no per-user override exists. |
| user_max_unique_ips_mode | `"active_window" \| "time_window" \| "combined"` | Unique source IP limit accounting mode. |
| user_max_unique_ips_window_secs | `u64` | Recent-window size for unique IP accounting (seconds). |
| replay_check_len | `usize` | Replay check storage length. |
| replay_window_secs | `u64` | Replay protection time window in seconds. |
| ignore_time_skew | `bool` | Ignores client/server timestamp skew in replay validation. |
| Parameter | Type | Default | Constraints / validation | TOML shape example | Description |
|---|---|---|---|---|---|
| users | `Map<String, String>` | `{"default": "000…000"}` | Secret must be 32 hex characters. | `[access.users]`<br>`user = "32-hex secret"`<br>`user2 = "32-hex secret"` | User credentials map used for client authentication. |
| user_ad_tags | `Map<String, String>` | `{}` | Every value must be exactly 32 hex characters. | `[access.user_ad_tags]`<br>`user = "32-hex ad_tag"` | Per-user ad tags used as override over `general.ad_tag`. |
| user_max_tcp_conns | `Map<String, usize>` | `{}` | — | `[access.user_max_tcp_conns]`<br>`user = 500` | Per-user maximum concurrent TCP connections. |
| user_expirations | `Map<String, DateTime<Utc>>` | `{}` | Timestamp must be valid RFC3339/ISO-8601 datetime. | `[access.user_expirations]`<br>`user = "2026-12-31T23:59:59Z"` | Per-user account expiration timestamps. |
| user_data_quota | `Map<String, u64>` | `{}` | — | `[access.user_data_quota]`<br>`user = 1073741824` | Per-user traffic quota in bytes. |
| user_max_unique_ips | `Map<String, usize>` | `{}` | — | `[access.user_max_unique_ips]`<br>`user = 16` | Per-user unique source IP limits. |
| user_max_unique_ips_global_each | `usize` | `0` | — | `user_max_unique_ips_global_each = 0` | Global fallback used when `[access.user_max_unique_ips]` has no per-user override. |
| user_max_unique_ips_mode | `"active_window" \| "time_window" \| "combined"` | `"active_window"` | — | `user_max_unique_ips_mode = "active_window"` | Unique source IP limit accounting mode. |
| user_max_unique_ips_window_secs | `u64` | `30` | Must be `> 0`. | `user_max_unique_ips_window_secs = 30` | Window size (seconds) used by unique-IP accounting modes that use time windows. |
| replay_check_len | `usize` | `65536` | — | `replay_check_len = 65536` | Replay-protection storage length. |
| replay_window_secs | `u64` | `1800` | — | `replay_window_secs = 1800` | Replay-protection window in seconds. |
| ignore_time_skew | `bool` | `false` | — | `ignore_time_skew = false` | Disables client/server timestamp skew checks in replay validation when enabled. |
## [[upstreams]]
| Parameter | Type | Description |
|---|---|---|
| type | `"direct" \| "socks4" \| "socks5"` | Upstream transport type selector. |
| weight | `u16` | Weighted selection coefficient for this upstream. |
| enabled | `bool` | Enables/disables this upstream entry. |
| scopes | `String` | Comma-separated scope tags for routing. |
| interface | `String` | Optional outgoing interface name (`direct`, `socks4`, `socks5`). |
| bind_addresses | `String[]` | Optional source bind addresses for `direct` upstream. |
| address | `String` | Upstream proxy address (`host:port`) for SOCKS upstreams. |
| user_id | `String` | SOCKS4 user ID (only for `type = "socks4"`). |
| username | `String` | SOCKS5 username (only for `type = "socks5"`). |
| password | `String` | SOCKS5 password (only for `type = "socks5"`). |
| Parameter | Type | Default | Constraints / validation | Description |
|---|---|---|---|---|
| type | `"direct" \| "socks4" \| "socks5"` | — | Required field. | Upstream transport type selector. |
| weight | `u16` | `1` | none | Base weight used by weighted-random upstream selection. |
| enabled | `bool` | `true` | none | Disabled entries are excluded from upstream selection at runtime. |
| scopes | `String` | `""` | none | Comma-separated scope tags used for request-level upstream filtering. |
| interface | `String \| null` | `null` | Optional; type-specific runtime rules apply. | Optional outbound interface/local bind hint (supported with type-specific rules). |
| bind_addresses | `String[] \| null` | `null` | Applies to `type = "direct"`. | Optional explicit local source bind addresses for `type = "direct"`. |
| address | `String` | — | Required for `type = "socks4"` and `type = "socks5"`. | SOCKS server endpoint (`host:port` or `ip:port`) for SOCKS upstream types. |
| user_id | `String \| null` | `null` | Only for `type = "socks4"`. | SOCKS4 CONNECT user ID (`type = "socks4"` only). |
| username | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 username (`type = "socks5"` only). |
| password | `String \| null` | `null` | Only for `type = "socks5"`. | SOCKS5 password (`type = "socks5"` only). |

View File

@@ -120,3 +120,17 @@ password = "pass" # Password for Auth on SOCKS-server
weight = 1 # Set Weight for Scenarios
enabled = true
```
#### Shadowsocks as Upstream
Requires `use_middle_proxy = false`.
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
weight = 1
enabled = true
```

View File

@@ -121,3 +121,16 @@ weight = 1 # Set Weight for Scenarios
enabled = true
```
#### Shadowsocks как Upstream
Требует `use_middle_proxy = false`.
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
weight = 1
enabled = true
```

View File

@@ -181,6 +181,8 @@ docker compose down
docker build -t telemt:local .
docker run --name telemt --restart unless-stopped \
-p 443:443 \
-p 9090:9090 \
-p 9091:9091 \
-e RUST_LOG=info \
-v "$PWD/config.toml:/app/config.toml:ro" \
--read-only \

View File

@@ -178,11 +178,13 @@ docker compose down
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
**Запуск в Docker Compose**
**Запуск без Docker Compose**
```bash
docker build -t telemt:local .
docker run --name telemt --restart unless-stopped \
-p 443:443 \
-p 9090:9090 \
-p 9091:9091 \
-e RUST_LOG=info \
-v "$PWD/config.toml:/app/config.toml:ro" \
--read-only \

View File

@@ -82,7 +82,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|---|---|---|---|---|---|
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. |
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | ja | n/a | Upstream-Transporttyp. |
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
@@ -95,6 +95,8 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
| `url` | `shadowsocks` | `String` | ja | n/a | Shadowsocks-SIP002-URL (`ss://...`). In Runtime-APIs wird nur `host:port` offengelegt. |
| `interface` | `shadowsocks` | `Option<String>` | nein | `null` | Optionales ausgehendes Bind-Interface oder lokale Literal-IP. |
### Runtime-Regeln (wichtig)
@@ -115,6 +117,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
11. `shadowsocks`-Upstreams erfordern `general.use_middle_proxy = false`. Mit aktiviertem ME-Modus schlägt das Laden der Config sofort fehl.
## Upstream-Konfigurationsbeispiele
@@ -150,7 +153,20 @@ weight = 2
enabled = true
```
### Beispiel 4: Gemischte Upstreams mit Scopes
### Beispiel 4: Shadowsocks-Upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Beispiel 5: Gemischte Upstreams mit Scopes
```toml
[[upstreams]]

View File

@@ -82,7 +82,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
| Field | Applies to | Type | Required | Default | Meaning |
|---|---|---|---|---|---|
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. |
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | yes | n/a | Upstream transport type. |
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
@@ -95,6 +95,8 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
| `url` | `shadowsocks` | `String` | yes | n/a | Shadowsocks SIP002 URL (`ss://...`). Only `host:port` is exposed in runtime APIs. |
| `interface` | `shadowsocks` | `Option<String>` | no | `null` | Optional outgoing bind interface or literal local IP. |
### Runtime rules (important)
@@ -115,6 +117,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
8. In ME mode, the selected upstream is also used for ME TCP dial path.
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
11. `shadowsocks` upstreams require `general.use_middle_proxy = false`. Config load fails fast if ME mode is enabled.
## Upstream Configuration Examples
@@ -150,7 +153,20 @@ weight = 2
enabled = true
```
### Example 4: Mixed upstreams with scopes
### Example 4: Shadowsocks upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Example 5: Mixed upstreams with scopes
```toml
[[upstreams]]

View File

@@ -82,7 +82,7 @@
| Поле | Применимость | Тип | Обязательно | Default | Назначение |
|---|---|---|---|---|---|
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. |
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | да | n/a | Тип upstream транспорта. |
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
@@ -95,6 +95,8 @@
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
| `url` | `shadowsocks` | `String` | да | n/a | Shadowsocks SIP002 URL (`ss://...`). В runtime API раскрывается только `host:port`. |
| `interface` | `shadowsocks` | `Option<String>` | нет | `null` | Необязательный исходящий bind-интерфейс или literal локальный IP. |
### Runtime-правила
@@ -115,6 +117,7 @@
8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
11. `shadowsocks` upstream требует `general.use_middle_proxy = false`. При включенном ME-режиме конфиг отклоняется при загрузке.
## Примеры конфигурации Upstreams
@@ -150,7 +153,20 @@ weight = 2
enabled = true
```
### Пример 4: смешанные upstream с scopes
### Пример 4: Shadowsocks upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Пример 5: смешанные upstream с scopes
```toml
[[upstreams]]

View File

@@ -3,113 +3,554 @@ set -eu
REPO="${REPO:-telemt/telemt}"
BIN_NAME="${BIN_NAME:-telemt}"
VERSION="${1:-${VERSION:-latest}}"
INSTALL_DIR="${INSTALL_DIR:-/usr/local/bin}"
INSTALL_DIR="${INSTALL_DIR:-/bin}"
CONFIG_DIR="${CONFIG_DIR:-/etc/telemt}"
CONFIG_FILE="${CONFIG_FILE:-${CONFIG_DIR}/telemt.toml}"
WORK_DIR="${WORK_DIR:-/opt/telemt}"
TLS_DOMAIN="${TLS_DOMAIN:-petrovich.ru}"
SERVICE_NAME="telemt"
TEMP_DIR=""
SUDO=""
CONFIG_PARENT_DIR=""
SERVICE_START_FAILED=0
ACTION="install"
TARGET_VERSION="${VERSION:-latest}"
while [ $# -gt 0 ]; do
case "$1" in
-h|--help) ACTION="help"; shift ;;
uninstall|--uninstall)
if [ "$ACTION" != "purge" ]; then ACTION="uninstall"; fi
shift ;;
purge|--purge) ACTION="purge"; shift ;;
install|--install) ACTION="install"; shift ;;
-*) printf '[ERROR] Unknown option: %s\n' "$1" >&2; exit 1 ;;
*)
if [ "$ACTION" = "install" ]; then TARGET_VERSION="$1"
else printf '[WARNING] Ignoring extra argument: %s\n' "$1" >&2; fi
shift ;;
esac
done
say() {
printf '%s\n' "$*"
if [ "$#" -eq 0 ] || [ -z "${1:-}" ]; then
printf '\n'
else
printf '[INFO] %s\n' "$*"
fi
}
die() { printf '[ERROR] %s\n' "$*" >&2; exit 1; }
write_root() { $SUDO sh -c 'cat > "$1"' _ "$1"; }
cleanup() {
if [ -n "${TEMP_DIR:-}" ] && [ -d "$TEMP_DIR" ]; then
rm -rf -- "$TEMP_DIR"
fi
}
trap cleanup EXIT INT TERM
show_help() {
say "Usage: $0 [ <version> | install | uninstall | purge | --help ]"
say " <version> Install specific version (e.g. 3.3.15, default: latest)"
say " install Install the latest version"
say " uninstall Remove the binary and service (keeps config and user)"
say " purge Remove everything including configuration, data, and user"
exit 0
}
die() {
printf 'Error: %s\n' "$*" >&2
exit 1
check_os_entity() {
if command -v getent >/dev/null 2>&1; then getent "$1" "$2" >/dev/null 2>&1
else grep -q "^${2}:" "/etc/$1" 2>/dev/null; fi
}
need_cmd() {
command -v "$1" >/dev/null 2>&1 || die "required command not found: $1"
normalize_path() {
printf '%s\n' "$1" | tr -s '/' | sed 's|/$||; s|^$|/|'
}
detect_os() {
os="$(uname -s)"
case "$os" in
Linux) printf 'linux\n' ;;
OpenBSD) printf 'openbsd\n' ;;
*) printf '%s\n' "$os" ;;
get_realpath() {
path_in="$1"
case "$path_in" in /*) ;; *) path_in="$(pwd)/$path_in" ;; esac
if command -v realpath >/dev/null 2>&1; then
if realpath_out="$(realpath -m "$path_in" 2>/dev/null)"; then
printf '%s\n' "$realpath_out"
return
fi
fi
if command -v readlink >/dev/null 2>&1; then
resolved_path="$(readlink -f "$path_in" 2>/dev/null || true)"
if [ -n "$resolved_path" ]; then
printf '%s\n' "$resolved_path"
return
fi
fi
d="${path_in%/*}"; b="${path_in##*/}"
if [ -z "$d" ]; then d="/"; fi
if [ "$d" = "$path_in" ]; then d="/"; b="$path_in"; fi
if [ -d "$d" ]; then
abs_d="$(cd "$d" >/dev/null 2>&1 && pwd || true)"
if [ -n "$abs_d" ]; then
if [ "$b" = "." ] || [ -z "$b" ]; then printf '%s\n' "$abs_d"
elif [ "$abs_d" = "/" ]; then printf '/%s\n' "$b"
else printf '%s/%s\n' "$abs_d" "$b"; fi
else
normalize_path "$path_in"
fi
else
normalize_path "$path_in"
fi
}
get_svc_mgr() {
if command -v systemctl >/dev/null 2>&1 && [ -d /run/systemd/system ]; then echo "systemd"
elif command -v rc-service >/dev/null 2>&1; then echo "openrc"
else echo "none"; fi
}
verify_common() {
[ -n "$BIN_NAME" ] || die "BIN_NAME cannot be empty."
[ -n "$INSTALL_DIR" ] || die "INSTALL_DIR cannot be empty."
[ -n "$CONFIG_DIR" ] || die "CONFIG_DIR cannot be empty."
[ -n "$CONFIG_FILE" ] || die "CONFIG_FILE cannot be empty."
case "${INSTALL_DIR}${CONFIG_DIR}${WORK_DIR}${CONFIG_FILE}" in
*[!a-zA-Z0-9_./-]*) die "Invalid characters in paths. Only alphanumeric, _, ., -, and / allowed." ;;
esac
case "$TARGET_VERSION" in *[!a-zA-Z0-9_.-]*) die "Invalid characters in version." ;; esac
case "$BIN_NAME" in *[!a-zA-Z0-9_-]*) die "Invalid characters in BIN_NAME." ;; esac
INSTALL_DIR="$(get_realpath "$INSTALL_DIR")"
CONFIG_DIR="$(get_realpath "$CONFIG_DIR")"
WORK_DIR="$(get_realpath "$WORK_DIR")"
CONFIG_FILE="$(get_realpath "$CONFIG_FILE")"
CONFIG_PARENT_DIR="${CONFIG_FILE%/*}"
if [ -z "$CONFIG_PARENT_DIR" ]; then CONFIG_PARENT_DIR="/"; fi
if [ "$CONFIG_PARENT_DIR" = "$CONFIG_FILE" ]; then CONFIG_PARENT_DIR="."; fi
if [ "$(id -u)" -eq 0 ]; then
SUDO=""
else
command -v sudo >/dev/null 2>&1 || die "This script requires root or sudo. Neither found."
SUDO="sudo"
if ! sudo -n true 2>/dev/null; then
if ! [ -t 0 ]; then
die "sudo requires a password, but no TTY detected. Aborting to prevent hang."
fi
fi
fi
if [ -n "$SUDO" ]; then
if $SUDO sh -c '[ -d "$1" ]' _ "$CONFIG_FILE"; then
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
fi
elif [ -d "$CONFIG_FILE" ]; then
die "Safety check failed: CONFIG_FILE '$CONFIG_FILE' is a directory."
fi
for path in "$CONFIG_DIR" "$CONFIG_PARENT_DIR" "$WORK_DIR"; do
check_path="$(get_realpath "$path")"
case "$check_path" in
/|/bin|/sbin|/usr|/usr/bin|/usr/sbin|/usr/local|/usr/local/bin|/usr/local/sbin|/usr/local/etc|/usr/local/share|/etc|/var|/var/lib|/var/log|/var/run|/home|/root|/tmp|/lib|/lib64|/opt|/run|/boot|/dev|/sys|/proc)
die "Safety check failed: '$path' (resolved to '$check_path') is a critical system directory." ;;
esac
done
check_install_dir="$(get_realpath "$INSTALL_DIR")"
case "$check_install_dir" in
/|/etc|/var|/home|/root|/tmp|/usr|/usr/local|/opt|/boot|/dev|/sys|/proc|/run)
die "Safety check failed: INSTALL_DIR '$INSTALL_DIR' is a critical system directory." ;;
esac
for cmd in id uname grep find rm chown chmod mv mktemp mkdir tr dd sed ps head sleep cat tar gzip rmdir; do
command -v "$cmd" >/dev/null 2>&1 || die "Required command not found: $cmd"
done
}
verify_install_deps() {
command -v curl >/dev/null 2>&1 || command -v wget >/dev/null 2>&1 || die "Neither curl nor wget is installed."
command -v cp >/dev/null 2>&1 || command -v install >/dev/null 2>&1 || die "Need cp or install"
if ! command -v setcap >/dev/null 2>&1; then
if command -v apk >/dev/null 2>&1; then
$SUDO apk add --no-cache libcap-utils >/dev/null 2>&1 || $SUDO apk add --no-cache libcap >/dev/null 2>&1 || true
elif command -v apt-get >/dev/null 2>&1; then
$SUDO apt-get update -q >/dev/null 2>&1 || true
$SUDO apt-get install -y -q libcap2-bin >/dev/null 2>&1 || true
elif command -v dnf >/dev/null 2>&1; then $SUDO dnf install -y -q libcap >/dev/null 2>&1 || true
elif command -v yum >/dev/null 2>&1; then $SUDO yum install -y -q libcap >/dev/null 2>&1 || true
fi
fi
}
detect_arch() {
arch="$(uname -m)"
case "$arch" in
x86_64|amd64) printf 'x86_64\n' ;;
aarch64|arm64) printf 'aarch64\n' ;;
*) die "unsupported architecture: $arch" ;;
sys_arch="$(uname -m)"
case "$sys_arch" in
x86_64|amd64) echo "x86_64" ;;
aarch64|arm64) echo "aarch64" ;;
*) die "Unsupported architecture: $sys_arch" ;;
esac
}
detect_libc() {
case "$(ldd --version 2>&1 || true)" in
*musl*) printf 'musl\n' ;;
*) printf 'gnu\n' ;;
esac
for f in /lib/ld-musl-*.so.* /lib64/ld-musl-*.so.*; do
if [ -e "$f" ]; then echo "musl"; return 0; fi
done
if grep -qE '^ID="?alpine"?' /etc/os-release 2>/dev/null; then echo "musl"; return 0; fi
if command -v ldd >/dev/null 2>&1 && (ldd --version 2>&1 || true) | grep -qi musl; then echo "musl"; return 0; fi
echo "gnu"
}
fetch_to_stdout() {
url="$1"
if command -v curl >/dev/null 2>&1; then
curl -fsSL "$url"
elif command -v wget >/dev/null 2>&1; then
wget -qO- "$url"
else
die "neither curl nor wget is installed"
fetch_file() {
if command -v curl >/dev/null 2>&1; then curl -fsSL "$1" -o "$2"
else wget -q -O "$2" "$1"; fi
}
ensure_user_group() {
nologin_bin="$(command -v nologin 2>/dev/null || command -v false 2>/dev/null || echo /bin/false)"
if ! check_os_entity group telemt; then
if command -v groupadd >/dev/null 2>&1; then $SUDO groupadd -r telemt
elif command -v addgroup >/dev/null 2>&1; then $SUDO addgroup -S telemt
else die "Cannot create group"; fi
fi
if ! check_os_entity passwd telemt; then
if command -v useradd >/dev/null 2>&1; then
$SUDO useradd -r -g telemt -d "$WORK_DIR" -s "$nologin_bin" -c "Telemt Proxy" telemt
elif command -v adduser >/dev/null 2>&1; then
if adduser --help 2>&1 | grep -q -- '-S'; then
$SUDO adduser -S -D -H -h "$WORK_DIR" -s "$nologin_bin" -G telemt telemt
else
$SUDO adduser --system --home "$WORK_DIR" --shell "$nologin_bin" --no-create-home --ingroup telemt --disabled-password telemt
fi
else die "Cannot create user"; fi
fi
}
setup_dirs() {
$SUDO mkdir -p "$WORK_DIR" "$CONFIG_DIR" "$CONFIG_PARENT_DIR" || die "Failed to create directories"
$SUDO chown telemt:telemt "$WORK_DIR" && $SUDO chmod 750 "$WORK_DIR"
$SUDO chown root:telemt "$CONFIG_DIR" && $SUDO chmod 750 "$CONFIG_DIR"
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
$SUDO chown root:telemt "$CONFIG_PARENT_DIR" && $SUDO chmod 750 "$CONFIG_PARENT_DIR"
fi
}
stop_service() {
svc="$(get_svc_mgr)"
if [ "$svc" = "systemd" ] && systemctl is-active --quiet "$SERVICE_NAME" 2>/dev/null; then
$SUDO systemctl stop "$SERVICE_NAME" 2>/dev/null || true
elif [ "$svc" = "openrc" ] && rc-service "$SERVICE_NAME" status >/dev/null 2>&1; then
$SUDO rc-service "$SERVICE_NAME" stop 2>/dev/null || true
fi
}
install_binary() {
src="$1"
dst="$2"
bin_src="$1"; bin_dst="$2"
if [ -e "$INSTALL_DIR" ] && [ ! -d "$INSTALL_DIR" ]; then
die "'$INSTALL_DIR' is not a directory."
fi
if [ -w "$INSTALL_DIR" ] || { [ ! -e "$INSTALL_DIR" ] && [ -w "$(dirname "$INSTALL_DIR")" ]; }; then
mkdir -p "$INSTALL_DIR"
install -m 0755 "$src" "$dst"
elif command -v sudo >/dev/null 2>&1; then
sudo mkdir -p "$INSTALL_DIR"
sudo install -m 0755 "$src" "$dst"
$SUDO mkdir -p "$INSTALL_DIR" || die "Failed to create install directory"
if command -v install >/dev/null 2>&1; then
$SUDO install -m 0755 "$bin_src" "$bin_dst" || die "Failed to install binary"
else
die "cannot write to $INSTALL_DIR and sudo is not available"
$SUDO rm -f "$bin_dst" 2>/dev/null || true
$SUDO cp "$bin_src" "$bin_dst" && $SUDO chmod 0755 "$bin_dst" || die "Failed to copy binary"
fi
$SUDO sh -c '[ -x "$1" ]' _ "$bin_dst" || die "Binary not executable: $bin_dst"
if command -v setcap >/dev/null 2>&1; then
$SUDO setcap cap_net_bind_service=+ep "$bin_dst" 2>/dev/null || true
fi
}
need_cmd uname
need_cmd tar
need_cmd mktemp
need_cmd grep
need_cmd install
generate_secret() {
secret="$(command -v openssl >/dev/null 2>&1 && openssl rand -hex 16 2>/dev/null || true)"
if [ -z "$secret" ] || [ "${#secret}" -ne 32 ]; then
if command -v od >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | od -An -tx1 | tr -d ' \n')"
elif command -v hexdump >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | hexdump -e '1/1 "%02x"')"
elif command -v xxd >/dev/null 2>&1; then secret="$(dd if=/dev/urandom bs=16 count=1 2>/dev/null | xxd -p | tr -d '\n')"
fi
fi
if [ "${#secret}" -eq 32 ]; then echo "$secret"; else return 1; fi
}
ARCH="$(detect_arch)"
OS="$(detect_os)"
generate_config_content() {
escaped_tls_domain="$(printf '%s\n' "$TLS_DOMAIN" | tr -d '[:cntrl:]' | sed 's/\\/\\\\/g; s/"/\\"/g')"
if [ "$OS" != "linux" ]; then
case "$OS" in
openbsd)
die "install.sh installs only Linux release artifacts. On OpenBSD, build from source (see docs/OPENBSD.en.md)."
;;
*)
die "unsupported operating system for install.sh: $OS"
;;
esac
fi
cat <<EOF
[general]
use_middle_proxy = false
LIBC="$(detect_libc)"
[general.modes]
classic = false
secure = false
tls = true
case "$VERSION" in
latest)
URL="https://github.com/$REPO/releases/latest/download/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
;;
*)
URL="https://github.com/$REPO/releases/download/${VERSION}/${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
[server]
port = 443
[server.api]
enabled = true
listen = "127.0.0.1:9091"
whitelist = ["127.0.0.1/32"]
[censorship]
tls_domain = "${escaped_tls_domain}"
[access.users]
hello = "$1"
EOF
}
install_config() {
if [ -n "$SUDO" ]; then
if $SUDO sh -c '[ -f "$1" ]' _ "$CONFIG_FILE"; then
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
return 0
fi
elif [ -f "$CONFIG_FILE" ]; then
say " -> Config already exists at $CONFIG_FILE. Skipping creation."
return 0
fi
toml_secret="$(generate_secret)" || die "Failed to generate secret."
generate_config_content "$toml_secret" | write_root "$CONFIG_FILE" || die "Failed to install config"
$SUDO chown root:telemt "$CONFIG_FILE" && $SUDO chmod 640 "$CONFIG_FILE"
say " -> Config created successfully."
say " -> Generated secret for default user 'hello': $toml_secret"
}
generate_systemd_content() {
cat <<EOF
[Unit]
Description=Telemt
After=network-online.target
Wants=network-online.target
[Service]
Type=simple
User=telemt
Group=telemt
WorkingDirectory=$WORK_DIR
ExecStart="${INSTALL_DIR}/${BIN_NAME}" "${CONFIG_FILE}"
Restart=on-failure
LimitNOFILE=65536
AmbientCapabilities=CAP_NET_BIND_SERVICE
CapabilityBoundingSet=CAP_NET_BIND_SERVICE
[Install]
WantedBy=multi-user.target
EOF
}
generate_openrc_content() {
cat <<EOF
#!/sbin/openrc-run
name="$SERVICE_NAME"
description="Telemt Proxy Service"
command="${INSTALL_DIR}/${BIN_NAME}"
command_args="${CONFIG_FILE}"
command_background=true
command_user="telemt:telemt"
pidfile="/run/\${RC_SVCNAME}.pid"
directory="${WORK_DIR}"
rc_ulimit="-n 65536"
depend() { need net; use logger; }
EOF
}
install_service() {
svc="$(get_svc_mgr)"
if [ "$svc" = "systemd" ]; then
generate_systemd_content | write_root "/etc/systemd/system/${SERVICE_NAME}.service"
$SUDO chown root:root "/etc/systemd/system/${SERVICE_NAME}.service" && $SUDO chmod 644 "/etc/systemd/system/${SERVICE_NAME}.service"
$SUDO systemctl daemon-reload || true
$SUDO systemctl enable "$SERVICE_NAME" || true
if ! $SUDO systemctl start "$SERVICE_NAME"; then
say "[WARNING] Failed to start service"
SERVICE_START_FAILED=1
fi
elif [ "$svc" = "openrc" ]; then
generate_openrc_content | write_root "/etc/init.d/${SERVICE_NAME}"
$SUDO chown root:root "/etc/init.d/${SERVICE_NAME}" && $SUDO chmod 0755 "/etc/init.d/${SERVICE_NAME}"
$SUDO rc-update add "$SERVICE_NAME" default 2>/dev/null || true
if ! $SUDO rc-service "$SERVICE_NAME" start 2>/dev/null; then
say "[WARNING] Failed to start service"
SERVICE_START_FAILED=1
fi
else
cmd="\"${INSTALL_DIR}/${BIN_NAME}\" \"${CONFIG_FILE}\""
if [ -n "$SUDO" ]; then
say " -> Service manager not found. Start manually: sudo -u telemt $cmd"
else
say " -> Service manager not found. Start manually: su -s /bin/sh telemt -c '$cmd'"
fi
fi
}
kill_user_procs() {
if command -v pkill >/dev/null 2>&1; then
$SUDO pkill -u telemt "$BIN_NAME" 2>/dev/null || true
sleep 1
$SUDO pkill -9 -u telemt "$BIN_NAME" 2>/dev/null || true
else
if command -v pgrep >/dev/null 2>&1; then
pids="$(pgrep -u telemt 2>/dev/null || true)"
else
pids="$(ps -u telemt -o pid= 2>/dev/null || true)"
fi
if [ -n "$pids" ]; then
for pid in $pids; do
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill "$pid" 2>/dev/null || true ;; esac
done
sleep 1
for pid in $pids; do
case "$pid" in ''|*[!0-9]*) continue ;; *) $SUDO kill -9 "$pid" 2>/dev/null || true ;; esac
done
fi
fi
}
uninstall() {
say "Starting uninstallation of $BIN_NAME..."
say ">>> Stage 1: Stopping services"
stop_service
say ">>> Stage 2: Removing service configuration"
svc="$(get_svc_mgr)"
if [ "$svc" = "systemd" ]; then
$SUDO systemctl disable "$SERVICE_NAME" 2>/dev/null || true
$SUDO rm -f "/etc/systemd/system/${SERVICE_NAME}.service"
$SUDO systemctl daemon-reload 2>/dev/null || true
elif [ "$svc" = "openrc" ]; then
$SUDO rc-update del "$SERVICE_NAME" 2>/dev/null || true
$SUDO rm -f "/etc/init.d/${SERVICE_NAME}"
fi
say ">>> Stage 3: Terminating user processes"
kill_user_procs
say ">>> Stage 4: Removing binary"
$SUDO rm -f "${INSTALL_DIR}/${BIN_NAME}"
if [ "$ACTION" = "purge" ]; then
say ">>> Stage 5: Purging configuration, data, and user"
$SUDO rm -rf "$CONFIG_DIR" "$WORK_DIR"
$SUDO rm -f "$CONFIG_FILE"
if [ "$CONFIG_PARENT_DIR" != "$CONFIG_DIR" ] && [ "$CONFIG_PARENT_DIR" != "." ] && [ "$CONFIG_PARENT_DIR" != "/" ]; then
$SUDO rmdir "$CONFIG_PARENT_DIR" 2>/dev/null || true
fi
$SUDO userdel telemt 2>/dev/null || $SUDO deluser telemt 2>/dev/null || true
$SUDO groupdel telemt 2>/dev/null || $SUDO delgroup telemt 2>/dev/null || true
else
say "Note: Configuration and user kept. Run with 'purge' to remove completely."
fi
printf '\n====================================================================\n'
printf ' UNINSTALLATION COMPLETE\n'
printf '====================================================================\n\n'
exit 0
}
case "$ACTION" in
help) show_help ;;
uninstall|purge) verify_common; uninstall ;;
install)
say "Starting installation of $BIN_NAME (Version: $TARGET_VERSION)"
say ">>> Stage 1: Verifying environment and dependencies"
verify_common; verify_install_deps
if [ "$TARGET_VERSION" != "latest" ]; then
TARGET_VERSION="${TARGET_VERSION#v}"
fi
ARCH="$(detect_arch)"; LIBC="$(detect_libc)"
FILE_NAME="${BIN_NAME}-${ARCH}-linux-${LIBC}.tar.gz"
if [ "$TARGET_VERSION" = "latest" ]; then
DL_URL="https://github.com/${REPO}/releases/latest/download/${FILE_NAME}"
else
DL_URL="https://github.com/${REPO}/releases/download/${TARGET_VERSION}/${FILE_NAME}"
fi
say ">>> Stage 2: Downloading archive"
TEMP_DIR="$(mktemp -d)" || die "Temp directory creation failed"
if [ -z "$TEMP_DIR" ] || [ ! -d "$TEMP_DIR" ]; then
die "Temp directory is invalid or was not created"
fi
fetch_file "$DL_URL" "${TEMP_DIR}/${FILE_NAME}" || die "Download failed"
say ">>> Stage 3: Extracting archive"
if ! gzip -dc "${TEMP_DIR}/${FILE_NAME}" | tar -xf - -C "$TEMP_DIR" 2>/dev/null; then
die "Extraction failed (downloaded archive might be invalid or 404)."
fi
EXTRACTED_BIN="$(find "$TEMP_DIR" -type f -name "$BIN_NAME" -print 2>/dev/null | head -n 1 || true)"
[ -n "$EXTRACTED_BIN" ] || die "Binary '$BIN_NAME' not found in archive"
say ">>> Stage 4: Setting up environment (User, Group, Directories)"
ensure_user_group; setup_dirs; stop_service
say ">>> Stage 5: Installing binary"
install_binary "$EXTRACTED_BIN" "${INSTALL_DIR}/${BIN_NAME}"
say ">>> Stage 6: Generating configuration"
install_config
say ">>> Stage 7: Installing and starting service"
install_service
if [ "${SERVICE_START_FAILED:-0}" -eq 1 ]; then
printf '\n====================================================================\n'
printf ' INSTALLATION COMPLETED WITH WARNINGS\n'
printf '====================================================================\n\n'
printf 'The service was installed but failed to start automatically.\n'
printf 'Please check the logs to determine the issue.\n\n'
else
printf '\n====================================================================\n'
printf ' INSTALLATION SUCCESS\n'
printf '====================================================================\n\n'
fi
svc="$(get_svc_mgr)"
if [ "$svc" = "systemd" ]; then
printf 'To check the status of your proxy service, run:\n'
printf ' systemctl status %s\n\n' "$SERVICE_NAME"
elif [ "$svc" = "openrc" ]; then
printf 'To check the status of your proxy service, run:\n'
printf ' rc-service %s status\n\n' "$SERVICE_NAME"
fi
printf 'To get your user connection links (for Telegram), run:\n'
if command -v jq >/dev/null 2>&1; then
printf ' curl -s http://127.0.0.1:9091/v1/users | jq -r '\''.data[] | "User: \\(.username)\\n\\(.links.tls[0] // empty)\\n"'\''\n'
else
printf ' curl -s http://127.0.0.1:9091/v1/users\n'
printf ' (Tip: Install '\''jq'\'' for a much cleaner output)\n'
fi
printf '\n====================================================================\n'
;;
esac
TMPDIR="$(mktemp -d)"
trap 'rm -rf "$TMPDIR"' EXIT INT TERM
say "Installing $BIN_NAME ($VERSION) for $ARCH-linux-$LIBC..."
fetch_to_stdout "$URL" | tar -xzf - -C "$TMPDIR"
[ -f "$TMPDIR/$BIN_NAME" ] || die "archive did not contain $BIN_NAME"
install_binary "$TMPDIR/$BIN_NAME" "$INSTALL_DIR/$BIN_NAME"
say "Installed: $INSTALL_DIR/$BIN_NAME"
"$INSTALL_DIR/$BIN_NAME" --version 2>/dev/null || true

View File

@@ -134,6 +134,7 @@ pub(super) struct UpstreamSummaryData {
pub(super) direct_total: usize,
pub(super) socks4_total: usize,
pub(super) socks5_total: usize,
pub(super) shadowsocks_total: usize,
}
#[derive(Serialize, Clone)]
@@ -205,6 +206,16 @@ pub(super) struct ZeroPoolData {
pub(super) refill_failed_total: u64,
pub(super) writer_restored_same_endpoint_total: u64,
pub(super) writer_restored_fallback_total: u64,
pub(super) teardown_attempt_total_normal: u64,
pub(super) teardown_attempt_total_hard_detach: u64,
pub(super) teardown_success_total_normal: u64,
pub(super) teardown_success_total_hard_detach: u64,
pub(super) teardown_timeout_total: u64,
pub(super) teardown_escalation_total: u64,
pub(super) teardown_noop_total: u64,
pub(super) teardown_cleanup_side_effect_failures_total: u64,
pub(super) teardown_duration_count_total: u64,
pub(super) teardown_duration_sum_seconds_total: f64,
}
#[derive(Serialize, Clone)]
@@ -364,6 +375,7 @@ pub(super) struct MinimalMeRuntimeData {
pub(super) me_reconnect_backoff_cap_ms: u64,
pub(super) me_reconnect_fast_retry_count: u32,
pub(super) me_pool_drain_ttl_secs: u64,
pub(super) me_instadrain: bool,
pub(super) me_pool_drain_soft_evict_enabled: bool,
pub(super) me_pool_drain_soft_evict_grace_secs: u64,
pub(super) me_pool_drain_soft_evict_per_writer: u8,

View File

@@ -4,6 +4,9 @@ use std::time::{SystemTime, UNIX_EPOCH};
use serde::Serialize;
use crate::config::ProxyConfig;
use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
};
use super::ApiShared;
@@ -98,6 +101,50 @@ pub(super) struct RuntimeMeQualityCountersData {
pub(super) reconnect_success_total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownAttemptData {
pub(super) reason: &'static str,
pub(super) mode: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownSuccessData {
pub(super) mode: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownSideEffectData {
pub(super) step: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownDurationBucketData {
pub(super) le_seconds: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownDurationData {
pub(super) mode: &'static str,
pub(super) count: u64,
pub(super) sum_seconds: f64,
pub(super) buckets: Vec<RuntimeMeQualityTeardownDurationBucketData>,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownData {
pub(super) attempts: Vec<RuntimeMeQualityTeardownAttemptData>,
pub(super) success: Vec<RuntimeMeQualityTeardownSuccessData>,
pub(super) timeout_total: u64,
pub(super) escalation_total: u64,
pub(super) noop_total: u64,
pub(super) cleanup_side_effect_failures: Vec<RuntimeMeQualityTeardownSideEffectData>,
pub(super) duration: Vec<RuntimeMeQualityTeardownDurationData>,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityRouteDropData {
pub(super) no_conn_total: u64,
@@ -107,6 +154,25 @@ pub(super) struct RuntimeMeQualityRouteDropData {
pub(super) queue_full_high_total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityFamilyStateData {
pub(super) family: &'static str,
pub(super) state: &'static str,
pub(super) state_since_epoch_secs: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) suppressed_until_epoch_secs: Option<u64>,
pub(super) fail_streak: u32,
pub(super) recover_success_streak: u32,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityDrainGateData {
pub(super) route_quorum_ok: bool,
pub(super) redundancy_ok: bool,
pub(super) block_reason: &'static str,
pub(super) updated_at_epoch_secs: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityDcRttData {
pub(super) dc: i16,
@@ -120,7 +186,10 @@ pub(super) struct RuntimeMeQualityDcRttData {
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityPayload {
pub(super) counters: RuntimeMeQualityCountersData,
pub(super) teardown: RuntimeMeQualityTeardownData,
pub(super) route_drops: RuntimeMeQualityRouteDropData,
pub(super) family_states: Vec<RuntimeMeQualityFamilyStateData>,
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
}
@@ -159,6 +228,7 @@ pub(super) struct RuntimeUpstreamQualitySummaryData {
pub(super) direct_total: usize,
pub(super) socks4_total: usize,
pub(super) socks5_total: usize,
pub(super) shadowsocks_total: usize,
}
#[derive(Serialize)]
@@ -361,6 +431,19 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
};
let status = pool.api_status_snapshot().await;
let family_states = pool
.api_family_state_snapshot()
.into_iter()
.map(|entry| RuntimeMeQualityFamilyStateData {
family: entry.family,
state: entry.state,
state_since_epoch_secs: entry.state_since_epoch_secs,
suppressed_until_epoch_secs: entry.suppressed_until_epoch_secs,
fail_streak: entry.fail_streak,
recover_success_streak: entry.recover_success_streak,
})
.collect();
let drain_gate_snapshot = pool.api_drain_gate_snapshot();
RuntimeMeQualityData {
enabled: true,
reason: None,
@@ -374,6 +457,7 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
reconnect_success_total: shared.stats.get_me_reconnect_success(),
},
teardown: build_runtime_me_teardown_data(shared),
route_drops: RuntimeMeQualityRouteDropData {
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
@@ -381,6 +465,13 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
},
family_states,
drain_gate: RuntimeMeQualityDrainGateData {
route_quorum_ok: drain_gate_snapshot.route_quorum_ok,
redundancy_ok: drain_gate_snapshot.redundancy_ok,
block_reason: drain_gate_snapshot.block_reason,
updated_at_epoch_secs: drain_gate_snapshot.updated_at_epoch_secs,
},
dc_rtt: status
.dcs
.into_iter()
@@ -397,6 +488,81 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
}
}
fn build_runtime_me_teardown_data(shared: &ApiShared) -> RuntimeMeQualityTeardownData {
let attempts = MeWriterTeardownReason::ALL
.iter()
.copied()
.flat_map(|reason| {
MeWriterTeardownMode::ALL
.iter()
.copied()
.map(move |mode| RuntimeMeQualityTeardownAttemptData {
reason: reason.as_str(),
mode: mode.as_str(),
total: shared.stats.get_me_writer_teardown_attempt_total(reason, mode),
})
})
.collect();
let success = MeWriterTeardownMode::ALL
.iter()
.copied()
.map(|mode| RuntimeMeQualityTeardownSuccessData {
mode: mode.as_str(),
total: shared.stats.get_me_writer_teardown_success_total(mode),
})
.collect();
let cleanup_side_effect_failures = MeWriterCleanupSideEffectStep::ALL
.iter()
.copied()
.map(|step| RuntimeMeQualityTeardownSideEffectData {
step: step.as_str(),
total: shared
.stats
.get_me_writer_cleanup_side_effect_failures_total(step),
})
.collect();
let duration = MeWriterTeardownMode::ALL
.iter()
.copied()
.map(|mode| {
let count = shared.stats.get_me_writer_teardown_duration_count(mode);
let mut buckets: Vec<RuntimeMeQualityTeardownDurationBucketData> = Stats::me_writer_teardown_duration_bucket_labels()
.iter()
.enumerate()
.map(|(bucket_idx, label)| RuntimeMeQualityTeardownDurationBucketData {
le_seconds: label,
total: shared
.stats
.get_me_writer_teardown_duration_bucket_total(mode, bucket_idx),
})
.collect();
buckets.push(RuntimeMeQualityTeardownDurationBucketData {
le_seconds: "+Inf",
total: count,
});
RuntimeMeQualityTeardownDurationData {
mode: mode.as_str(),
count,
sum_seconds: shared.stats.get_me_writer_teardown_duration_sum_seconds(mode),
buckets,
}
})
.collect();
RuntimeMeQualityTeardownData {
attempts,
success,
timeout_total: shared.stats.get_me_writer_teardown_timeout_total(),
escalation_total: shared.stats.get_me_writer_teardown_escalation_total(),
noop_total: shared.stats.get_me_writer_teardown_noop_total(),
cleanup_side_effect_failures,
duration,
}
}
pub(super) async fn build_runtime_upstream_quality_data(
shared: &ApiShared,
) -> RuntimeUpstreamQualityData {
@@ -406,7 +572,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
connect_success_total: shared.stats.get_upstream_connect_success_total(),
connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(),
connect_failfast_hard_error_total: shared
.stats
.get_upstream_connect_failfast_hard_error_total(),
};
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
@@ -446,6 +614,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
direct_total: snapshot.summary.direct_total,
socks4_total: snapshot.summary.socks4_total,
socks5_total: snapshot.summary.socks5_total,
shadowsocks_total: snapshot.summary.shadowsocks_total,
}),
upstreams: Some(
snapshot
@@ -457,6 +626,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
crate::transport::UpstreamRouteKind::Direct => "direct",
crate::transport::UpstreamRouteKind::Socks4 => "socks4",
crate::transport::UpstreamRouteKind::Socks5 => "socks5",
crate::transport::UpstreamRouteKind::Shadowsocks => "shadowsocks",
},
address: upstream.address,
weight: upstream.weight,
@@ -476,7 +646,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
crate::transport::upstream::IpPreference::BothWork => "both_work",
crate::transport::upstream::IpPreference::Unavailable => "unavailable",
crate::transport::upstream::IpPreference::Unavailable => {
"unavailable"
}
},
})
.collect(),
@@ -514,14 +686,18 @@ pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNa
live_total: snapshot.live_servers.len(),
},
reflection: RuntimeNatStunReflectionBlockData {
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData {
addr: entry.addr.to_string(),
age_secs: entry.age_secs,
}),
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData {
addr: entry.addr.to_string(),
age_secs: entry.age_secs,
}),
v4: snapshot
.reflection_v4
.map(|entry| RuntimeNatStunReflectionData {
addr: entry.addr.to_string(),
age_secs: entry.age_secs,
}),
v6: snapshot
.reflection_v6
.map(|entry| RuntimeNatStunReflectionData {
addr: entry.addr.to_string(),
age_secs: entry.age_secs,
}),
},
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
}),

View File

@@ -1,5 +1,5 @@
use std::net::IpAddr;
use std::collections::HashMap;
use std::net::IpAddr;
use std::sync::{Mutex, OnceLock};
use std::time::{SystemTime, UNIX_EPOCH};
@@ -7,8 +7,8 @@ use serde::Serialize;
use crate::config::{ProxyConfig, UpstreamType};
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
use crate::transport::UpstreamRouteKind;
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
use super::ApiShared;
@@ -262,8 +262,8 @@ fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
guard.ewma_errors_per_min = guard.ewma_errors_per_min
+ alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
guard.ewma_errors_per_min =
guard.ewma_errors_per_min + alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
guard.last_epoch_secs = now_epoch_secs;
guard.last_total_errors = total_errors;
guard.ewma_errors_per_min
@@ -284,6 +284,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
UpstreamRouteKind::Direct => "direct",
UpstreamRouteKind::Socks4 => "socks4",
UpstreamRouteKind::Socks5 => "socks5",
UpstreamRouteKind::Shadowsocks => "shadowsocks",
}
}

View File

@@ -1,7 +1,7 @@
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use crate::config::ApiConfig;
use crate::stats::Stats;
use crate::stats::{MeWriterTeardownMode, Stats};
use crate::transport::upstream::IpPreference;
use crate::transport::UpstreamRouteKind;
@@ -106,6 +106,29 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
refill_failed_total: stats.get_me_refill_failed_total(),
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
teardown_attempt_total_normal: stats
.get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::Normal),
teardown_attempt_total_hard_detach: stats
.get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::HardDetach),
teardown_success_total_normal: stats
.get_me_writer_teardown_success_total(MeWriterTeardownMode::Normal),
teardown_success_total_hard_detach: stats
.get_me_writer_teardown_success_total(MeWriterTeardownMode::HardDetach),
teardown_timeout_total: stats.get_me_writer_teardown_timeout_total(),
teardown_escalation_total: stats.get_me_writer_teardown_escalation_total(),
teardown_noop_total: stats.get_me_writer_teardown_noop_total(),
teardown_cleanup_side_effect_failures_total: stats
.get_me_writer_cleanup_side_effect_failures_total_all(),
teardown_duration_count_total: stats
.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal)
.saturating_add(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::HardDetach),
),
teardown_duration_sum_seconds_total: stats
.get_me_writer_teardown_duration_sum_seconds(MeWriterTeardownMode::Normal)
+ stats.get_me_writer_teardown_duration_sum_seconds(
MeWriterTeardownMode::HardDetach,
),
},
desync: ZeroDesyncData {
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
@@ -138,7 +161,8 @@ fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
.get_upstream_connect_duration_success_bucket_501_1000ms(),
connect_duration_success_bucket_gt_1000ms: stats
.get_upstream_connect_duration_success_bucket_gt_1000ms(),
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(),
connect_duration_fail_bucket_le_100ms: stats
.get_upstream_connect_duration_fail_bucket_le_100ms(),
connect_duration_fail_bucket_101_500ms: stats
.get_upstream_connect_duration_fail_bucket_101_500ms(),
connect_duration_fail_bucket_501_1000ms: stats
@@ -180,6 +204,7 @@ pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> U
direct_total: snapshot.summary.direct_total,
socks4_total: snapshot.summary.socks4_total,
socks5_total: snapshot.summary.socks5_total,
shadowsocks_total: snapshot.summary.shadowsocks_total,
};
let upstreams = snapshot
.upstreams
@@ -395,8 +420,7 @@ async fn get_minimal_payload_cached(
adaptive_floor_min_writers_multi_endpoint: runtime
.adaptive_floor_min_writers_multi_endpoint,
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
adaptive_floor_writers_per_core_total: runtime
.adaptive_floor_writers_per_core_total,
adaptive_floor_writers_per_core_total: runtime.adaptive_floor_writers_per_core_total,
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
adaptive_floor_max_extra_writers_single_per_core: runtime
.adaptive_floor_max_extra_writers_single_per_core,
@@ -404,12 +428,9 @@ async fn get_minimal_payload_cached(
.adaptive_floor_max_extra_writers_multi_per_core,
adaptive_floor_max_active_writers_per_core: runtime
.adaptive_floor_max_active_writers_per_core,
adaptive_floor_max_warm_writers_per_core: runtime
.adaptive_floor_max_warm_writers_per_core,
adaptive_floor_max_active_writers_global: runtime
.adaptive_floor_max_active_writers_global,
adaptive_floor_max_warm_writers_global: runtime
.adaptive_floor_max_warm_writers_global,
adaptive_floor_max_warm_writers_per_core: runtime.adaptive_floor_max_warm_writers_per_core,
adaptive_floor_max_active_writers_global: runtime.adaptive_floor_max_active_writers_global,
adaptive_floor_max_warm_writers_global: runtime.adaptive_floor_max_warm_writers_global,
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
@@ -431,6 +452,7 @@ async fn get_minimal_payload_cached(
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
me_instadrain: runtime.me_instadrain,
me_pool_drain_soft_evict_enabled: runtime.me_pool_drain_soft_evict_enabled,
me_pool_drain_soft_evict_grace_secs: runtime.me_pool_drain_soft_evict_grace_secs,
me_pool_drain_soft_evict_per_writer: runtime.me_pool_drain_soft_evict_per_writer,
@@ -527,6 +549,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
UpstreamRouteKind::Direct => "direct",
UpstreamRouteKind::Socks4 => "socks4",
UpstreamRouteKind::Socks5 => "socks5",
UpstreamRouteKind::Shadowsocks => "shadowsocks",
}
}

View File

@@ -198,8 +198,15 @@ desync_all_full = false
update_every = 43200
hardswap = false
me_pool_drain_ttl_secs = 90
me_instadrain = false
me_pool_drain_threshold = 32
me_pool_drain_soft_evict_grace_secs = 10
me_pool_drain_soft_evict_per_writer = 2
me_pool_drain_soft_evict_budget_per_core = 16
me_pool_drain_soft_evict_cooldown_ms = 1000
me_bind_stale_mode = "never"
me_pool_min_fresh_ratio = 0.8
me_reinit_drain_timeout_secs = 120
me_reinit_drain_timeout_secs = 90
[network]
ipv4 = true
@@ -261,7 +268,7 @@ fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
format!(
r#"[Unit]
Description=Telemt MTProxy
Documentation=https://github.com/nicepkg/telemt
Documentation=https://github.com/telemt/telemt
After=network-online.target
Wants=network-online.target

View File

@@ -36,12 +36,16 @@ const DEFAULT_ME_HEALTH_INTERVAL_MS_UNHEALTHY: u64 = 1000;
const DEFAULT_ME_HEALTH_INTERVAL_MS_HEALTHY: u64 = 3000;
const DEFAULT_ME_ADMISSION_POLL_MS: u64 = 1000;
const DEFAULT_ME_WARN_RATE_LIMIT_MS: u64 = 5000;
const DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS: u64 = 3000;
const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250;
const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 30;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 1;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 8;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 5000;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 10;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 2;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 5;
const DEFAULT_UPSTREAM_CONNECT_BUDGET_MS: u64 = 3000;
@@ -61,6 +65,10 @@ pub(crate) fn default_tls_domain() -> String {
"petrovich.ru".to_string()
}
pub(crate) fn default_tls_fetch_scope() -> String {
String::new()
}
pub(crate) fn default_mask_port() -> u16 {
443
}
@@ -156,6 +164,10 @@ pub(crate) fn default_server_max_connections() -> u32 {
10_000
}
pub(crate) fn default_accept_permit_timeout_ms() -> u64 {
DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS
}
pub(crate) fn default_prefer_4() -> u8 {
4
}
@@ -380,6 +392,18 @@ pub(crate) fn default_me_warn_rate_limit_ms() -> u64 {
DEFAULT_ME_WARN_RATE_LIMIT_MS
}
pub(crate) fn default_me_route_hybrid_max_wait_ms() -> u64 {
DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS
}
pub(crate) fn default_me_route_blocking_send_timeout_ms() -> u64 {
DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS
}
pub(crate) fn default_me_c2me_send_timeout_ms() -> u64 {
DEFAULT_ME_C2ME_SEND_TIMEOUT_MS
}
pub(crate) fn default_upstream_connect_retry_attempts() -> u32 {
DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS
}
@@ -586,15 +610,19 @@ pub(crate) fn default_proxy_secret_len_max() -> usize {
}
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
120
90
}
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
90
}
pub(crate) fn default_me_instadrain() -> bool {
false
}
pub(crate) fn default_me_pool_drain_threshold() -> u64 {
128
32
}
pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool {

View File

@@ -39,6 +39,7 @@ use super::load::{LoadedConfig, ProxyConfig};
const HOT_RELOAD_STABLE_SNAPSHOTS: u8 = 2;
const HOT_RELOAD_DEBOUNCE: Duration = Duration::from_millis(50);
const HOT_RELOAD_STABLE_RECHECK: Duration = Duration::from_millis(75);
// ── Hot fields ────────────────────────────────────────────────────────────────
@@ -55,6 +56,7 @@ pub struct HotFields {
pub me_reinit_coalesce_window_ms: u64,
pub hardswap: bool,
pub me_pool_drain_ttl_secs: u64,
pub me_instadrain: bool,
pub me_pool_drain_threshold: u64,
pub me_pool_drain_soft_evict_enabled: bool,
pub me_pool_drain_soft_evict_grace_secs: u64,
@@ -142,6 +144,7 @@ impl HotFields {
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
hardswap: cfg.general.hardswap,
me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
me_instadrain: cfg.general.me_instadrain,
me_pool_drain_threshold: cfg.general.me_pool_drain_threshold,
me_pool_drain_soft_evict_enabled: cfg.general.me_pool_drain_soft_evict_enabled,
me_pool_drain_soft_evict_grace_secs: cfg.general.me_pool_drain_soft_evict_grace_secs,
@@ -379,6 +382,14 @@ impl ReloadState {
self.applied_snapshot_hash = Some(hash);
self.reset_candidate();
}
fn pending_candidate(&self) -> Option<(u64, u8)> {
let hash = self.candidate_snapshot_hash?;
if self.candidate_hits < HOT_RELOAD_STABLE_SNAPSHOTS {
return Some((hash, self.candidate_hits));
}
None
}
}
fn normalize_watch_path(path: &Path) -> PathBuf {
@@ -468,6 +479,7 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
cfg.general.me_reinit_coalesce_window_ms = new.general.me_reinit_coalesce_window_ms;
cfg.general.hardswap = new.general.hardswap;
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
cfg.general.me_instadrain = new.general.me_instadrain;
cfg.general.me_pool_drain_threshold = new.general.me_pool_drain_threshold;
cfg.general.me_pool_drain_soft_evict_enabled = new.general.me_pool_drain_soft_evict_enabled;
cfg.general.me_pool_drain_soft_evict_grace_secs =
@@ -603,12 +615,15 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|| old.server.listen_tcp != new.server.listen_tcp
|| old.server.listen_unix_sock != new.server.listen_unix_sock
|| old.server.listen_unix_sock_perm != new.server.listen_unix_sock_perm
|| old.server.max_connections != new.server.max_connections
|| old.server.accept_permit_timeout_ms != new.server.accept_permit_timeout_ms
{
warned = true;
warn!("config reload: server listener settings changed; restart required");
}
if old.censorship.tls_domain != new.censorship.tls_domain
|| old.censorship.tls_domains != new.censorship.tls_domains
|| old.censorship.tls_fetch_scope != new.censorship.tls_fetch_scope
|| old.censorship.mask != new.censorship.mask
|| old.censorship.mask_host != new.censorship.mask_host
|| old.censorship.mask_port != new.censorship.mask_port
@@ -662,6 +677,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
}
if old.general.me_route_no_writer_mode != new.general.me_route_no_writer_mode
|| old.general.me_route_no_writer_wait_ms != new.general.me_route_no_writer_wait_ms
|| old.general.me_route_hybrid_max_wait_ms != new.general.me_route_hybrid_max_wait_ms
|| old.general.me_route_blocking_send_timeout_ms
!= new.general.me_route_blocking_send_timeout_ms
|| old.general.me_route_inline_recovery_attempts
!= new.general.me_route_inline_recovery_attempts
|| old.general.me_route_inline_recovery_wait_ms
@@ -670,6 +688,10 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
warned = true;
warn!("config reload: general.me_route_no_writer_* changed; restart required");
}
if old.general.me_c2me_send_timeout_ms != new.general.me_c2me_send_timeout_ms {
warned = true;
warn!("config reload: general.me_c2me_send_timeout_ms changed; restart required");
}
if old.general.unknown_dc_log_path != new.general.unknown_dc_log_path
|| old.general.unknown_dc_file_log_enabled != new.general.unknown_dc_file_log_enabled
{
@@ -851,6 +873,12 @@ fn log_changes(
old_hot.me_pool_drain_ttl_secs, new_hot.me_pool_drain_ttl_secs,
);
}
if old_hot.me_instadrain != new_hot.me_instadrain {
info!(
"config reload: me_instadrain: {} → {}",
old_hot.me_instadrain, new_hot.me_instadrain,
);
}
if old_hot.me_pool_drain_threshold != new_hot.me_pool_drain_threshold {
info!(
@@ -1253,6 +1281,73 @@ fn reload_config(
Some(next_manifest)
}
async fn reload_with_internal_stable_rechecks(
config_path: &PathBuf,
config_tx: &watch::Sender<Arc<ProxyConfig>>,
log_tx: &watch::Sender<LogLevel>,
detected_ip_v4: Option<IpAddr>,
detected_ip_v6: Option<IpAddr>,
reload_state: &mut ReloadState,
) -> Option<WatchManifest> {
let mut next_manifest = reload_config(
config_path,
config_tx,
log_tx,
detected_ip_v4,
detected_ip_v6,
reload_state,
);
let mut rechecks_left = HOT_RELOAD_STABLE_SNAPSHOTS.saturating_sub(1);
while rechecks_left > 0 {
let Some((snapshot_hash, candidate_hits)) = reload_state.pending_candidate() else {
break;
};
info!(
snapshot_hash,
candidate_hits,
required_hits = HOT_RELOAD_STABLE_SNAPSHOTS,
rechecks_left,
recheck_delay_ms = HOT_RELOAD_STABLE_RECHECK.as_millis(),
"config reload: scheduling internal stable recheck"
);
tokio::time::sleep(HOT_RELOAD_STABLE_RECHECK).await;
let recheck_manifest = reload_config(
config_path,
config_tx,
log_tx,
detected_ip_v4,
detected_ip_v6,
reload_state,
);
if recheck_manifest.is_some() {
next_manifest = recheck_manifest;
}
if reload_state.is_applied(snapshot_hash) {
info!(
snapshot_hash,
"config reload: applied after internal stable recheck"
);
break;
}
if reload_state.pending_candidate().is_none() {
info!(
snapshot_hash,
"config reload: internal stable recheck aborted"
);
break;
}
rechecks_left = rechecks_left.saturating_sub(1);
}
next_manifest
}
// ── Public API ────────────────────────────────────────────────────────────────
/// Spawn the hot-reload watcher task.
@@ -1376,14 +1471,16 @@ pub fn spawn_config_watcher(
tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
while notify_rx.try_recv().is_ok() {}
if let Some(next_manifest) = reload_config(
if let Some(next_manifest) = reload_with_internal_stable_rechecks(
&config_path,
&config_tx,
&log_tx,
detected_ip_v4,
detected_ip_v6,
&mut reload_state,
) {
)
.await
{
apply_watch_manifest(
inotify_watcher.as_mut(),
poll_watcher.as_mut(),
@@ -1540,6 +1637,35 @@ mod tests {
let _ = std::fs::remove_file(path);
}
#[tokio::test]
async fn reload_cycle_applies_after_single_external_event() {
let initial_tag = "10101010101010101010101010101010";
let final_tag = "20202020202020202020202020202020";
let path = temp_config_path("telemt_hot_reload_single_event");
write_reload_config(&path, Some(initial_tag), None);
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
let mut reload_state = ReloadState::new(Some(initial_hash));
write_reload_config(&path, Some(final_tag), None);
reload_with_internal_stable_rechecks(
&path,
&config_tx,
&log_tx,
None,
None,
&mut reload_state,
)
.await
.unwrap();
assert_eq!(config_tx.borrow().general.ad_tag.as_deref(), Some(final_tag));
let _ = std::fs::remove_file(path);
}
#[test]
fn reload_keeps_hot_apply_when_non_hot_fields_change() {
let initial_tag = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";

View File

@@ -6,8 +6,9 @@ use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
use rand::Rng;
use serde::{Deserialize, Serialize};
use shadowsocks::config::ServerConfig as ShadowsocksServerConfig;
use tracing::warn;
use serde::{Serialize, Deserialize};
use crate::error::{ProxyError, Result};
@@ -122,13 +123,37 @@ fn sanitize_ad_tag(ad_tag: &mut Option<String>) {
};
if !is_valid_ad_tag(tag) {
warn!(
"Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled"
);
warn!("Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled");
*ad_tag = None;
}
}
fn validate_upstreams(config: &ProxyConfig) -> Result<()> {
let has_enabled_shadowsocks = config.upstreams.iter().any(|upstream| {
upstream.enabled && matches!(upstream.upstream_type, UpstreamType::Shadowsocks { .. })
});
if has_enabled_shadowsocks && config.general.use_middle_proxy {
return Err(ProxyError::Config(
"shadowsocks upstreams require general.use_middle_proxy = false".to_string(),
));
}
for upstream in &config.upstreams {
if let UpstreamType::Shadowsocks { url, .. } = &upstream.upstream_type {
let parsed = ShadowsocksServerConfig::from_url(url)
.map_err(|error| ProxyError::Config(format!("invalid shadowsocks url: {error}")))?;
if parsed.plugin().is_some() {
return Err(ProxyError::Config(
"shadowsocks plugins are not supported".to_string(),
));
}
}
}
Ok(())
}
// ============= Main Config =============
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
@@ -180,7 +205,8 @@ impl ProxyConfig {
pub(crate) fn load_with_metadata<P: AsRef<Path>>(path: P) -> Result<LoadedConfig> {
let path = path.as_ref();
let content = std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
let content =
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
let base_dir = path.parent().unwrap_or(Path::new("."));
let mut source_files = BTreeSet::new();
source_files.insert(normalize_config_path(path));
@@ -207,15 +233,17 @@ impl ProxyConfig {
.map(|table| table.contains_key("stun_servers"))
.unwrap_or(false);
let mut config: ProxyConfig =
parsed_toml.try_into().map_err(|e| ProxyError::Config(e.to_string()))?;
let mut config: ProxyConfig = parsed_toml
.try_into()
.map_err(|e| ProxyError::Config(e.to_string()))?;
if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) {
config.general.update_every = None;
}
let legacy_nat_stun = config.general.middle_proxy_nat_stun.take();
let legacy_nat_stun_servers = std::mem::take(&mut config.general.middle_proxy_nat_stun_servers);
let legacy_nat_stun_servers =
std::mem::take(&mut config.general.middle_proxy_nat_stun_servers);
let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty();
if stun_servers_is_explicit {
let mut explicit_stun_servers = Vec::new();
@@ -225,7 +253,9 @@ impl ProxyConfig {
config.network.stun_servers = explicit_stun_servers;
if legacy_nat_stun_used {
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set");
warn!(
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set"
);
}
} else {
// Keep the default STUN pool unless network.stun_servers is explicitly overridden.
@@ -240,7 +270,9 @@ impl ProxyConfig {
config.network.stun_servers = unified_stun_servers;
if legacy_nat_stun_used {
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers");
warn!(
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"
);
}
}
@@ -346,6 +378,12 @@ impl ProxyConfig {
));
}
if config.general.me_c2me_send_timeout_ms > 60_000 {
return Err(ProxyError::Config(
"general.me_c2me_send_timeout_ms must be within [0, 60000]".to_string(),
));
}
if config.general.me_reader_route_data_wait_ms > 20 {
return Err(ProxyError::Config(
"general.me_reader_route_data_wait_ms must be within [0, 20]".to_string(),
@@ -372,13 +410,15 @@ impl ProxyConfig {
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
return Err(ProxyError::Config(
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]".to_string(),
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
.to_string(),
));
}
if !(8192..=2 * 1024 * 1024).contains(&config.general.direct_relay_copy_buf_s2c_bytes) {
return Err(ProxyError::Config(
"general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]".to_string(),
"general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]"
.to_string(),
));
}
@@ -606,6 +646,11 @@ impl ProxyConfig {
"general.me_route_backpressure_base_timeout_ms must be > 0".to_string(),
));
}
if config.general.me_route_backpressure_base_timeout_ms > 5000 {
return Err(ProxyError::Config(
"general.me_route_backpressure_base_timeout_ms must be within [1, 5000]".to_string(),
));
}
if config.general.me_route_backpressure_high_timeout_ms
< config.general.me_route_backpressure_base_timeout_ms
@@ -614,10 +659,16 @@ impl ProxyConfig {
"general.me_route_backpressure_high_timeout_ms must be >= general.me_route_backpressure_base_timeout_ms".to_string(),
));
}
if config.general.me_route_backpressure_high_timeout_ms > 5000 {
return Err(ProxyError::Config(
"general.me_route_backpressure_high_timeout_ms must be within [1, 5000]".to_string(),
));
}
if !(1..=100).contains(&config.general.me_route_backpressure_high_watermark_pct) {
return Err(ProxyError::Config(
"general.me_route_backpressure_high_watermark_pct must be within [1, 100]".to_string(),
"general.me_route_backpressure_high_watermark_pct must be within [1, 100]"
.to_string(),
));
}
@@ -627,6 +678,18 @@ impl ProxyConfig {
));
}
if !(50..=60_000).contains(&config.general.me_route_hybrid_max_wait_ms) {
return Err(ProxyError::Config(
"general.me_route_hybrid_max_wait_ms must be within [50, 60000]".to_string(),
));
}
if config.general.me_route_blocking_send_timeout_ms > 5000 {
return Err(ProxyError::Config(
"general.me_route_blocking_send_timeout_ms must be within [0, 5000]".to_string(),
));
}
if !(2..=4).contains(&config.general.me_writer_pick_sample_size) {
return Err(ProxyError::Config(
"general.me_writer_pick_sample_size must be within [2, 4]".to_string(),
@@ -687,6 +750,12 @@ impl ProxyConfig {
));
}
if config.server.accept_permit_timeout_ms > 60_000 {
return Err(ProxyError::Config(
"server.accept_permit_timeout_ms must be within [0, 60000]".to_string(),
));
}
if config.general.effective_me_pool_force_close_secs() > 0
&& config.general.effective_me_pool_force_close_secs()
< config.general.me_pool_drain_ttl_secs
@@ -745,6 +814,9 @@ impl ProxyConfig {
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
}
// Normalize optional TLS fetch scope: whitespace-only values disable scoped routing.
config.censorship.tls_fetch_scope = config.censorship.tls_fetch_scope.trim().to_string();
// Merge primary + extra TLS domains, deduplicate (primary always first).
if !config.censorship.tls_domains.is_empty() {
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
@@ -779,11 +851,15 @@ impl ProxyConfig {
crate::network::dns_overrides::validate_entries(&config.network.dns_overrides)?;
if config.general.use_middle_proxy && config.network.ipv6 == Some(true) {
warn!("IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME");
warn!(
"IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME"
);
}
// Random fake_cert_len only when default is in use.
if !config.censorship.tls_emulation && config.censorship.fake_cert_len == default_fake_cert_len() {
if !config.censorship.tls_emulation
&& config.censorship.fake_cert_len == default_fake_cert_len()
{
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
}
@@ -793,8 +869,7 @@ impl ProxyConfig {
let listen_tcp = config.server.listen_tcp.unwrap_or_else(|| {
if config.server.listen_unix_sock.is_some() {
// Unix socket present: TCP only if user explicitly set addresses or listeners.
config.server.listen_addr_ipv4.is_some()
|| !config.server.listeners.is_empty()
config.server.listen_addr_ipv4.is_some() || !config.server.listeners.is_empty()
} else {
true
}
@@ -802,7 +877,9 @@ impl ProxyConfig {
// Migration: Populate listeners if empty (skip when listen_tcp = false).
if config.server.listeners.is_empty() && listen_tcp {
let ipv4_str = config.server.listen_addr_ipv4
let ipv4_str = config
.server
.listen_addr_ipv4
.as_deref()
.unwrap_or("0.0.0.0");
if let Ok(ipv4) = ipv4_str.parse::<IpAddr>() {
@@ -844,7 +921,10 @@ impl ProxyConfig {
// Migration: Populate upstreams if empty (Default Direct).
if config.upstreams.is_empty() {
config.upstreams.push(UpstreamConfig {
upstream_type: UpstreamType::Direct { interface: None, bind_addresses: None },
upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1,
enabled: true,
scopes: String::new(),
@@ -858,6 +938,8 @@ impl ProxyConfig {
.entry("203".to_string())
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
validate_upstreams(&config)?;
Ok(LoadedConfig {
config,
source_files: source_files.into_iter().collect(),
@@ -904,6 +986,9 @@ impl ProxyConfig {
mod tests {
use super::*;
const TEST_SHADOWSOCKS_URL: &str =
"ss://2022-blake3-aes-256-gcm:MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDE=@127.0.0.1:8388";
#[test]
fn serde_defaults_remain_unchanged_for_present_sections() {
let toml = r#"
@@ -933,10 +1018,7 @@ mod tests {
cfg.general.me_init_retry_attempts,
default_me_init_retry_attempts()
);
assert_eq!(
cfg.general.me2dc_fallback,
default_me2dc_fallback()
);
assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
assert_eq!(
cfg.general.proxy_config_v4_cache_path,
default_proxy_config_v4_cache_path()
@@ -1245,11 +1327,12 @@ mod tests {
let path = dir.join("telemt_dc_override_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg
.dc_overrides
.get("203")
.map(|v| v.contains(&"91.105.192.100:443".to_string()))
.unwrap_or(false));
assert!(
cfg.dc_overrides
.get("203")
.map(|v| v.contains(&"91.105.192.100:443".to_string()))
.unwrap_or(false)
);
let _ = std::fs::remove_file(path);
}
@@ -1436,11 +1519,9 @@ mod tests {
let path = dir.join("telemt_me_adaptive_floor_min_writers_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(
err.contains(
"general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]"
)
);
assert!(err.contains(
"general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]"
));
let _ = std::fs::remove_file(path);
}
@@ -1600,6 +1681,47 @@ mod tests {
let _ = std::fs::remove_file(path_valid);
}
#[test]
fn me_route_backpressure_base_timeout_ms_out_of_range_is_rejected() {
let toml = r#"
[general]
me_route_backpressure_base_timeout_ms = 5001
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_me_route_backpressure_base_timeout_ms_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("general.me_route_backpressure_base_timeout_ms must be within [1, 5000]"));
let _ = std::fs::remove_file(path);
}
#[test]
fn me_route_backpressure_high_timeout_ms_out_of_range_is_rejected() {
let toml = r#"
[general]
me_route_backpressure_base_timeout_ms = 100
me_route_backpressure_high_timeout_ms = 5001
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_me_route_backpressure_high_timeout_ms_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("general.me_route_backpressure_high_timeout_ms must be within [1, 5000]"));
let _ = std::fs::remove_file(path);
}
#[test]
fn me_route_no_writer_wait_ms_out_of_range_is_rejected() {
let toml = r#"
@@ -1962,6 +2084,45 @@ mod tests {
let _ = std::fs::remove_file(path);
}
#[test]
fn force_close_default_matches_drain_ttl() {
let toml = r#"
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_force_close_default_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90);
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 90);
let _ = std::fs::remove_file(path);
}
#[test]
fn force_close_zero_uses_runtime_safety_fallback() {
let toml = r#"
[general]
me_reinit_drain_timeout_secs = 0
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_force_close_zero_fallback_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 0);
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 300);
let _ = std::fs::remove_file(path);
}
#[test]
fn force_close_bumped_when_below_drain_ttl() {
let toml = r#"
@@ -1983,6 +2144,59 @@ mod tests {
let _ = std::fs::remove_file(path);
}
#[test]
fn tls_fetch_scope_default_is_empty() {
let toml = r#"
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_default_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg.censorship.tls_fetch_scope.is_empty());
let _ = std::fs::remove_file(path);
}
#[test]
fn tls_fetch_scope_is_trimmed_during_load() {
let toml = r#"
[censorship]
tls_domain = "example.com"
tls_fetch_scope = " me "
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_trim_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.censorship.tls_fetch_scope, "me");
let _ = std::fs::remove_file(path);
}
#[test]
fn tls_fetch_scope_whitespace_becomes_empty() {
let toml = r#"
[censorship]
tls_domain = "example.com"
tls_fetch_scope = " "
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_blank_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg.censorship.tls_fetch_scope.is_empty());
let _ = std::fs::remove_file(path);
}
#[test]
fn invalid_ad_tag_is_disabled_during_load() {
let toml = r#"
@@ -2026,6 +2240,124 @@ mod tests {
let _ = std::fs::remove_file(path);
}
#[test]
fn shadowsocks_upstream_url_loads_successfully() {
let toml = format!(
r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}"
interface = "127.0.0.2"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_valid_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(matches!(
&cfg.upstreams[0].upstream_type,
UpstreamType::Shadowsocks { url, interface }
if url == TEST_SHADOWSOCKS_URL && interface.as_deref() == Some("127.0.0.2")
));
let _ = std::fs::remove_file(path);
}
#[test]
fn shadowsocks_requires_direct_mode() {
let toml = format!(
r#"
[general]
use_middle_proxy = true
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_me_reject_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("shadowsocks upstreams require general.use_middle_proxy = false"));
let _ = std::fs::remove_file(path);
}
#[test]
fn invalid_shadowsocks_url_is_rejected() {
let toml = r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "not-a-valid-ss-url"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_invalid_url_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("invalid shadowsocks url"));
let _ = std::fs::remove_file(path);
}
#[test]
fn shadowsocks_plugins_are_rejected() {
let toml = format!(
r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}?plugin=obfs-local%3Bobfs%3Dhttp"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_plugin_reject_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("shadowsocks plugins are not supported"));
let _ = std::fs::remove_file(path);
}
#[test]
fn invalid_user_ad_tag_reports_access_user_ad_tags_key() {
let toml = r#"

View File

@@ -135,8 +135,8 @@ impl MeSocksKdfPolicy {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")]
pub enum MeBindStaleMode {
Never,
#[default]
Never,
Ttl,
Always,
}
@@ -462,6 +462,11 @@ pub struct GeneralConfig {
#[serde(default = "default_me_c2me_channel_capacity")]
pub me_c2me_channel_capacity: usize,
/// Maximum wait in milliseconds for enqueueing C2ME commands when the queue is full.
/// `0` keeps legacy unbounded wait behavior.
#[serde(default = "default_me_c2me_send_timeout_ms")]
pub me_c2me_send_timeout_ms: u64,
/// Bounded wait in milliseconds for routing ME DATA to per-connection queue.
/// `0` keeps legacy no-wait behavior.
#[serde(default = "default_me_reader_route_data_wait_ms")]
@@ -716,6 +721,15 @@ pub struct GeneralConfig {
#[serde(default = "default_me_route_no_writer_wait_ms")]
pub me_route_no_writer_wait_ms: u64,
/// Maximum cumulative wait in milliseconds for hybrid no-writer mode before failfast.
#[serde(default = "default_me_route_hybrid_max_wait_ms")]
pub me_route_hybrid_max_wait_ms: u64,
/// Maximum wait in milliseconds for blocking ME writer channel send fallback.
/// `0` keeps legacy unbounded wait behavior.
#[serde(default = "default_me_route_blocking_send_timeout_ms")]
pub me_route_blocking_send_timeout_ms: u64,
/// Number of inline recovery attempts in legacy mode.
#[serde(default = "default_me_route_inline_recovery_attempts")]
pub me_route_inline_recovery_attempts: u32,
@@ -798,6 +812,10 @@ pub struct GeneralConfig {
#[serde(default = "default_me_pool_drain_ttl_secs")]
pub me_pool_drain_ttl_secs: u64,
/// Force-remove any draining writer on the next cleanup tick, regardless of age/deadline.
#[serde(default = "default_me_instadrain")]
pub me_instadrain: bool,
/// Maximum allowed number of draining ME writers before oldest ones are force-closed in batches.
/// Set to 0 to disable threshold-based draining cleanup and keep timeout-only behavior.
#[serde(default = "default_me_pool_drain_threshold")]
@@ -837,7 +855,7 @@ pub struct GeneralConfig {
pub me_pool_min_fresh_ratio: f32,
/// Drain timeout in seconds for stale ME writers after endpoint map changes.
/// Set to 0 to keep stale writers draining indefinitely (no force-close).
/// Set to 0 to use the runtime safety fallback timeout.
#[serde(default = "default_me_reinit_drain_timeout_secs")]
pub me_reinit_drain_timeout_secs: u64,
@@ -921,6 +939,7 @@ impl Default for GeneralConfig {
me_writer_cmd_channel_capacity: default_me_writer_cmd_channel_capacity(),
me_route_channel_capacity: default_me_route_channel_capacity(),
me_c2me_channel_capacity: default_me_c2me_channel_capacity(),
me_c2me_send_timeout_ms: default_me_c2me_send_timeout_ms(),
me_reader_route_data_wait_ms: default_me_reader_route_data_wait_ms(),
me_d2c_flush_batch_max_frames: default_me_d2c_flush_batch_max_frames(),
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
@@ -936,24 +955,38 @@ impl Default for GeneralConfig {
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(),
me_single_endpoint_shadow_writers: default_me_single_endpoint_shadow_writers(),
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(),
me_single_endpoint_outage_disable_quarantine: default_me_single_endpoint_outage_disable_quarantine(),
me_single_endpoint_outage_backoff_min_ms: default_me_single_endpoint_outage_backoff_min_ms(),
me_single_endpoint_outage_backoff_max_ms: default_me_single_endpoint_outage_backoff_max_ms(),
me_single_endpoint_shadow_rotate_every_secs: default_me_single_endpoint_shadow_rotate_every_secs(),
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(
),
me_single_endpoint_outage_disable_quarantine:
default_me_single_endpoint_outage_disable_quarantine(),
me_single_endpoint_outage_backoff_min_ms:
default_me_single_endpoint_outage_backoff_min_ms(),
me_single_endpoint_outage_backoff_max_ms:
default_me_single_endpoint_outage_backoff_max_ms(),
me_single_endpoint_shadow_rotate_every_secs:
default_me_single_endpoint_shadow_rotate_every_secs(),
me_floor_mode: MeFloorMode::default(),
me_adaptive_floor_idle_secs: default_me_adaptive_floor_idle_secs(),
me_adaptive_floor_min_writers_single_endpoint: default_me_adaptive_floor_min_writers_single_endpoint(),
me_adaptive_floor_min_writers_multi_endpoint: default_me_adaptive_floor_min_writers_multi_endpoint(),
me_adaptive_floor_min_writers_single_endpoint:
default_me_adaptive_floor_min_writers_single_endpoint(),
me_adaptive_floor_min_writers_multi_endpoint:
default_me_adaptive_floor_min_writers_multi_endpoint(),
me_adaptive_floor_recover_grace_secs: default_me_adaptive_floor_recover_grace_secs(),
me_adaptive_floor_writers_per_core_total: default_me_adaptive_floor_writers_per_core_total(),
me_adaptive_floor_writers_per_core_total:
default_me_adaptive_floor_writers_per_core_total(),
me_adaptive_floor_cpu_cores_override: default_me_adaptive_floor_cpu_cores_override(),
me_adaptive_floor_max_extra_writers_single_per_core: default_me_adaptive_floor_max_extra_writers_single_per_core(),
me_adaptive_floor_max_extra_writers_multi_per_core: default_me_adaptive_floor_max_extra_writers_multi_per_core(),
me_adaptive_floor_max_active_writers_per_core: default_me_adaptive_floor_max_active_writers_per_core(),
me_adaptive_floor_max_warm_writers_per_core: default_me_adaptive_floor_max_warm_writers_per_core(),
me_adaptive_floor_max_active_writers_global: default_me_adaptive_floor_max_active_writers_global(),
me_adaptive_floor_max_warm_writers_global: default_me_adaptive_floor_max_warm_writers_global(),
me_adaptive_floor_max_extra_writers_single_per_core:
default_me_adaptive_floor_max_extra_writers_single_per_core(),
me_adaptive_floor_max_extra_writers_multi_per_core:
default_me_adaptive_floor_max_extra_writers_multi_per_core(),
me_adaptive_floor_max_active_writers_per_core:
default_me_adaptive_floor_max_active_writers_per_core(),
me_adaptive_floor_max_warm_writers_per_core:
default_me_adaptive_floor_max_warm_writers_per_core(),
me_adaptive_floor_max_active_writers_global:
default_me_adaptive_floor_max_active_writers_global(),
me_adaptive_floor_max_warm_writers_global:
default_me_adaptive_floor_max_warm_writers_global(),
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
@@ -968,13 +1001,16 @@ impl Default for GeneralConfig {
me_socks_kdf_policy: MeSocksKdfPolicy::Strict,
me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(),
me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(),
me_route_backpressure_high_watermark_pct: default_me_route_backpressure_high_watermark_pct(),
me_route_backpressure_high_watermark_pct:
default_me_route_backpressure_high_watermark_pct(),
me_health_interval_ms_unhealthy: default_me_health_interval_ms_unhealthy(),
me_health_interval_ms_healthy: default_me_health_interval_ms_healthy(),
me_admission_poll_ms: default_me_admission_poll_ms(),
me_warn_rate_limit_ms: default_me_warn_rate_limit_ms(),
me_route_no_writer_mode: MeRouteNoWriterMode::default(),
me_route_no_writer_wait_ms: default_me_route_no_writer_wait_ms(),
me_route_hybrid_max_wait_ms: default_me_route_hybrid_max_wait_ms(),
me_route_blocking_send_timeout_ms: default_me_route_blocking_send_timeout_ms(),
me_route_inline_recovery_attempts: default_me_route_inline_recovery_attempts(),
me_route_inline_recovery_wait_ms: default_me_route_inline_recovery_wait_ms(),
links: LinksConfig::default(),
@@ -992,7 +1028,8 @@ impl Default for GeneralConfig {
me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(),
me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(),
me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(),
me_hardswap_warmup_pass_backoff_base_ms: default_me_hardswap_warmup_pass_backoff_base_ms(),
me_hardswap_warmup_pass_backoff_base_ms:
default_me_hardswap_warmup_pass_backoff_base_ms(),
me_config_stable_snapshots: default_me_config_stable_snapshots(),
me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(),
me_snapshot_require_http_2xx: default_me_snapshot_require_http_2xx(),
@@ -1003,6 +1040,7 @@ impl Default for GeneralConfig {
me_secret_atomic_snapshot: default_me_secret_atomic_snapshot(),
proxy_secret_len_max: default_proxy_secret_len_max(),
me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(),
me_instadrain: default_me_instadrain(),
me_pool_drain_threshold: default_me_pool_drain_threshold(),
me_pool_drain_soft_evict_enabled: default_me_pool_drain_soft_evict_enabled(),
me_pool_drain_soft_evict_grace_secs: default_me_pool_drain_soft_evict_grace_secs(),
@@ -1035,8 +1073,10 @@ impl GeneralConfig {
/// Resolve the active updater interval for ME infrastructure refresh tasks.
/// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used.
pub fn effective_update_every_secs(&self) -> u64 {
self.update_every
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs))
self.update_every.unwrap_or_else(|| {
self.proxy_secret_auto_reload_secs
.min(self.proxy_config_auto_reload_secs)
})
}
/// Resolve periodic zero-downtime reinit interval for ME writers.
@@ -1046,8 +1086,13 @@ impl GeneralConfig {
/// Resolve force-close timeout for stale writers.
/// `me_reinit_drain_timeout_secs` remains backward-compatible alias.
/// A configured `0` uses the runtime safety fallback (300s).
pub fn effective_me_pool_force_close_secs(&self) -> u64 {
self.me_reinit_drain_timeout_secs
if self.me_reinit_drain_timeout_secs == 0 {
300
} else {
self.me_reinit_drain_timeout_secs
}
}
}
@@ -1207,6 +1252,11 @@ pub struct ServerConfig {
/// 0 means unlimited.
#[serde(default = "default_server_max_connections")]
pub max_connections: u32,
/// Maximum wait in milliseconds while acquiring a connection slot permit.
/// `0` keeps legacy unbounded wait behavior.
#[serde(default = "default_accept_permit_timeout_ms")]
pub accept_permit_timeout_ms: u64,
}
impl Default for ServerConfig {
@@ -1226,6 +1276,7 @@ impl Default for ServerConfig {
api: ApiConfig::default(),
listeners: Vec::new(),
max_connections: default_server_max_connections(),
accept_permit_timeout_ms: default_accept_permit_timeout_ms(),
}
}
}
@@ -1275,6 +1326,11 @@ pub struct AntiCensorshipConfig {
#[serde(default)]
pub tls_domains: Vec<String>,
/// Upstream scope used for TLS front metadata fetches.
/// Empty value keeps default upstream routing behavior.
#[serde(default = "default_tls_fetch_scope")]
pub tls_fetch_scope: String,
#[serde(default = "default_true")]
pub mask: bool,
@@ -1332,6 +1388,7 @@ impl Default for AntiCensorshipConfig {
Self {
tls_domain: default_tls_domain(),
tls_domains: Vec::new(),
tls_fetch_scope: default_tls_fetch_scope(),
mask: default_true(),
mask_host: None,
mask_port: default_mask_port(),
@@ -1437,6 +1494,11 @@ pub enum UpstreamType {
#[serde(default)]
password: Option<String>,
},
Shadowsocks {
url: String,
#[serde(default)]
interface: Option<String>,
},
}
#[derive(Debug, Clone, Serialize, Deserialize)]
@@ -1517,7 +1579,10 @@ impl ShowLink {
}
impl Serialize for ShowLink {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> {
fn serialize<S: serde::Serializer>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error> {
match self {
ShowLink::None => Vec::<String>::new().serialize(serializer),
ShowLink::All => serializer.serialize_str("*"),
@@ -1527,7 +1592,9 @@ impl Serialize for ShowLink {
}
impl<'de> Deserialize<'de> for ShowLink {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> {
fn deserialize<D: serde::Deserializer<'de>>(
deserializer: D,
) -> std::result::Result<Self, D::Error> {
use serde::de;
struct ShowLinkVisitor;
@@ -1543,14 +1610,14 @@ impl<'de> Deserialize<'de> for ShowLink {
if v == "*" {
Ok(ShowLink::All)
} else {
Err(de::Error::invalid_value(
de::Unexpected::Str(v),
&r#""*""#,
))
Err(de::Error::invalid_value(de::Unexpected::Str(v), &r#""*""#))
}
}
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> {
fn visit_seq<A: de::SeqAccess<'de>>(
self,
mut seq: A,
) -> std::result::Result<ShowLink, A::Error> {
let mut names = Vec::new();
while let Some(name) = seq.next_element::<String>()? {
names.push(name);

View File

@@ -205,6 +205,7 @@ pub(crate) fn format_uptime(total_secs: u64) -> String {
format!("{} / {} seconds", parts.join(", "), total_secs)
}
#[allow(dead_code)]
pub(crate) async fn wait_until_admission_open(admission_rx: &mut watch::Receiver<bool>) -> bool {
loop {
if *admission_rx.borrow() {

View File

@@ -24,7 +24,7 @@ use crate::transport::{
ListenOptions, UpstreamManager, create_listener, find_listener_processes,
};
use super::helpers::{is_expected_handshake_eof, print_proxy_links, wait_until_admission_open};
use super::helpers::{is_expected_handshake_eof, print_proxy_links};
pub(crate) struct BoundListeners {
pub(crate) listeners: Vec<(TcpListener, bool)>,
@@ -195,7 +195,7 @@ pub(crate) async fn bind_listeners(
has_unix_listener = true;
let mut config_rx_unix: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
let mut admission_rx_unix = admission_rx.clone();
let admission_rx_unix = admission_rx.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
@@ -212,17 +212,44 @@ pub(crate) async fn bind_listeners(
let unix_conn_counter = Arc::new(std::sync::atomic::AtomicU64::new(1));
loop {
if !wait_until_admission_open(&mut admission_rx_unix).await {
warn!("Conditional-admission gate channel closed for unix listener");
break;
}
match unix_listener.accept().await {
Ok((stream, _)) => {
let permit = match max_connections_unix.clone().acquire_owned().await {
Ok(permit) => permit,
Err(_) => {
error!("Connection limiter is closed");
break;
if !*admission_rx_unix.borrow() {
drop(stream);
continue;
}
let accept_permit_timeout_ms = config_rx_unix
.borrow()
.server
.accept_permit_timeout_ms;
let permit = if accept_permit_timeout_ms == 0 {
match max_connections_unix.clone().acquire_owned().await {
Ok(permit) => permit,
Err(_) => {
error!("Connection limiter is closed");
break;
}
}
} else {
match tokio::time::timeout(
Duration::from_millis(accept_permit_timeout_ms),
max_connections_unix.clone().acquire_owned(),
)
.await
{
Ok(Ok(permit)) => permit,
Ok(Err(_)) => {
error!("Connection limiter is closed");
break;
}
Err(_) => {
debug!(
timeout_ms = accept_permit_timeout_ms,
"Dropping accepted unix connection: permit wait timeout"
);
drop(stream);
continue;
}
}
};
let conn_id =
@@ -312,7 +339,7 @@ pub(crate) fn spawn_tcp_accept_loops(
) {
for (listener, listener_proxy_protocol) in listeners {
let mut config_rx: watch::Receiver<Arc<ProxyConfig>> = config_rx.clone();
let mut admission_rx_tcp = admission_rx.clone();
let admission_rx_tcp = admission_rx.clone();
let stats = stats.clone();
let upstream_manager = upstream_manager.clone();
let replay_checker = replay_checker.clone();
@@ -327,17 +354,46 @@ pub(crate) fn spawn_tcp_accept_loops(
tokio::spawn(async move {
loop {
if !wait_until_admission_open(&mut admission_rx_tcp).await {
warn!("Conditional-admission gate channel closed for tcp listener");
break;
}
match listener.accept().await {
Ok((stream, peer_addr)) => {
let permit = match max_connections_tcp.clone().acquire_owned().await {
Ok(permit) => permit,
Err(_) => {
error!("Connection limiter is closed");
break;
if !*admission_rx_tcp.borrow() {
debug!(peer = %peer_addr, "Admission gate closed, dropping connection");
drop(stream);
continue;
}
let accept_permit_timeout_ms = config_rx
.borrow()
.server
.accept_permit_timeout_ms;
let permit = if accept_permit_timeout_ms == 0 {
match max_connections_tcp.clone().acquire_owned().await {
Ok(permit) => permit,
Err(_) => {
error!("Connection limiter is closed");
break;
}
}
} else {
match tokio::time::timeout(
Duration::from_millis(accept_permit_timeout_ms),
max_connections_tcp.clone().acquire_owned(),
)
.await
{
Ok(Ok(permit)) => permit,
Ok(Err(_)) => {
error!("Connection limiter is closed");
break;
}
Err(_) => {
debug!(
peer = %peer_addr,
timeout_ms = accept_permit_timeout_ms,
"Dropping accepted connection: permit wait timeout"
);
drop(stream);
continue;
}
}
};
let config = config_rx.borrow_and_update().clone();

View File

@@ -237,6 +237,7 @@ pub(crate) async fn initialize_me_pool(
config.general.me_adaptive_floor_max_warm_writers_global,
config.general.hardswap,
config.general.me_pool_drain_ttl_secs,
config.general.me_instadrain,
config.general.me_pool_drain_threshold,
config.general.me_pool_drain_soft_evict_enabled,
config.general.me_pool_drain_soft_evict_grace_secs,
@@ -267,6 +268,8 @@ pub(crate) async fn initialize_me_pool(
config.general.me_warn_rate_limit_ms,
config.general.me_route_no_writer_mode,
config.general.me_route_no_writer_wait_ms,
config.general.me_route_hybrid_max_wait_ms,
config.general.me_route_blocking_send_timeout_ms,
config.general.me_route_inline_recovery_attempts,
config.general.me_route_inline_recovery_wait_ms,
);
@@ -329,18 +332,76 @@ pub(crate) async fn initialize_me_pool(
"Middle-End pool initialized successfully"
);
let pool_health = pool_bg.clone();
let rng_health = rng_bg.clone();
let min_conns = pool_size;
tokio::spawn(async move {
crate::transport::middle_proxy::me_health_monitor(
pool_health,
rng_health,
min_conns,
)
.await;
});
break;
// ── Supervised background tasks ──────────────────
// Each task runs inside a nested tokio::spawn so
// that a panic is caught via JoinHandle and the
// outer loop restarts the task automatically.
let pool_health = pool_bg.clone();
let rng_health = rng_bg.clone();
let min_conns = pool_size;
tokio::spawn(async move {
loop {
let p = pool_health.clone();
let r = rng_health.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_health_monitor(
p, r, min_conns,
)
.await;
})
.await;
match res {
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_drain_enforcer = pool_bg.clone();
tokio::spawn(async move {
loop {
let p = pool_drain_enforcer.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
})
.await;
match res {
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_watchdog = pool_bg.clone();
tokio::spawn(async move {
loop {
let p = pool_watchdog.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
})
.await;
match res {
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
// CRITICAL: keep the current-thread runtime
// alive. Without this, block_on() returns,
// the Runtime is dropped, and ALL spawned
// background tasks (health monitor, drain
// enforcer, zombie watchdog) are silently
// cancelled — causing the draining-writer
// leak that brought us here.
std::future::pending::<()>().await;
unreachable!();
}
Err(e) => {
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
@@ -398,16 +459,65 @@ pub(crate) async fn initialize_me_pool(
"Middle-End pool initialized successfully"
);
let pool_clone = pool.clone();
let rng_clone = rng.clone();
let min_conns = pool_size;
tokio::spawn(async move {
crate::transport::middle_proxy::me_health_monitor(
pool_clone, rng_clone, min_conns,
)
.await;
});
// ── Supervised background tasks ──────────────────
let pool_clone = pool.clone();
let rng_clone = rng.clone();
let min_conns = pool_size;
tokio::spawn(async move {
loop {
let p = pool_clone.clone();
let r = rng_clone.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_health_monitor(
p, r, min_conns,
)
.await;
})
.await;
match res {
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_drain_enforcer = pool.clone();
tokio::spawn(async move {
loop {
let p = pool_drain_enforcer.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
})
.await;
match res {
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_watchdog = pool.clone();
tokio::spawn(async move {
loop {
let p = pool_watchdog.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
})
.await;
match res {
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
break Some(pool);
}
Err(e) => {

View File

@@ -38,12 +38,15 @@ pub(crate) async fn bootstrap_tls_front(
.clone()
.unwrap_or_else(|| config.censorship.tls_domain.clone());
let mask_unix_sock = config.censorship.mask_unix_sock.clone();
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
.then(|| config.censorship.tls_fetch_scope.clone());
let fetch_timeout = Duration::from_secs(5);
let cache_initial = cache.clone();
let domains_initial = tls_domains.to_vec();
let host_initial = mask_host.clone();
let unix_sock_initial = mask_unix_sock.clone();
let scope_initial = tls_fetch_scope.clone();
let upstream_initial = upstream_manager.clone();
tokio::spawn(async move {
let mut join = tokio::task::JoinSet::new();
@@ -51,6 +54,7 @@ pub(crate) async fn bootstrap_tls_front(
let cache_domain = cache_initial.clone();
let host_domain = host_initial.clone();
let unix_sock_domain = unix_sock_initial.clone();
let scope_domain = scope_initial.clone();
let upstream_domain = upstream_initial.clone();
join.spawn(async move {
match crate::tls_front::fetcher::fetch_real_tls(
@@ -59,6 +63,7 @@ pub(crate) async fn bootstrap_tls_front(
&domain,
fetch_timeout,
Some(upstream_domain),
scope_domain.as_deref(),
proxy_protocol,
unix_sock_domain.as_deref(),
)
@@ -100,6 +105,7 @@ pub(crate) async fn bootstrap_tls_front(
let domains_refresh = tls_domains.to_vec();
let host_refresh = mask_host.clone();
let unix_sock_refresh = mask_unix_sock.clone();
let scope_refresh = tls_fetch_scope.clone();
let upstream_refresh = upstream_manager.clone();
tokio::spawn(async move {
loop {
@@ -112,6 +118,7 @@ pub(crate) async fn bootstrap_tls_front(
let cache_domain = cache_refresh.clone();
let host_domain = host_refresh.clone();
let unix_sock_domain = unix_sock_refresh.clone();
let scope_domain = scope_refresh.clone();
let upstream_domain = upstream_refresh.clone();
join.spawn(async move {
match crate::tls_front::fetcher::fetch_real_tls(
@@ -120,6 +127,7 @@ pub(crate) async fn bootstrap_tls_front(
&domain,
fetch_timeout,
Some(upstream_domain),
scope_domain.as_deref(),
proxy_protocol,
unix_sock_domain.as_deref(),
)

View File

@@ -16,7 +16,9 @@ use tracing::{info, warn, debug};
use crate::config::ProxyConfig;
use crate::ip_tracker::UserIpTracker;
use crate::stats::beobachten::BeobachtenStore;
use crate::stats::Stats;
use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
};
use crate::transport::{ListenOptions, create_listener};
pub async fn serve(
@@ -1692,6 +1694,57 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_close_signal_drop_total Close-signal drops for already-removed ME writers"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_close_signal_drop_total counter");
let _ = writeln!(
out,
"telemt_me_writer_close_signal_drop_total {}",
if me_allows_normal {
stats.get_me_writer_close_signal_drop_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_close_signal_channel_full_total Close-signal drops caused by full writer command channels"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_close_signal_channel_full_total counter"
);
let _ = writeln!(
out,
"telemt_me_writer_close_signal_channel_full_total {}",
if me_allows_normal {
stats.get_me_writer_close_signal_channel_full_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_draining_writers_reap_progress_total Draining-writer removals processed by reap cleanup"
);
let _ = writeln!(
out,
"# TYPE telemt_me_draining_writers_reap_progress_total counter"
);
let _ = writeln!(
out,
"telemt_me_draining_writers_reap_progress_total {}",
if me_allows_normal {
stats.get_me_draining_writers_reap_progress_total()
} else {
0
}
);
let _ = writeln!(out, "# HELP telemt_me_writer_removed_total Total ME writer removals");
let _ = writeln!(out, "# TYPE telemt_me_writer_removed_total counter");
let _ = writeln!(
@@ -1719,6 +1772,169 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_attempt_total ME writer teardown attempts by reason and mode"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_attempt_total counter");
for reason in MeWriterTeardownReason::ALL {
for mode in MeWriterTeardownMode::ALL {
let _ = writeln!(
out,
"telemt_me_writer_teardown_attempt_total{{reason=\"{}\",mode=\"{}\"}} {}",
reason.as_str(),
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_attempt_total(reason, mode)
} else {
0
}
);
}
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_success_total ME writer teardown successes by mode"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_success_total counter");
for mode in MeWriterTeardownMode::ALL {
let _ = writeln!(
out,
"telemt_me_writer_teardown_success_total{{mode=\"{}\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_success_total(mode)
} else {
0
}
);
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_timeout_total Teardown operations that timed out"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_timeout_total counter");
let _ = writeln!(
out,
"telemt_me_writer_teardown_timeout_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_timeout_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_escalation_total Watchdog teardown escalations to hard detach"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_teardown_escalation_total counter"
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_escalation_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_escalation_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_noop_total Teardown operations that became no-op"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_noop_total counter");
let _ = writeln!(
out,
"telemt_me_writer_teardown_noop_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_noop_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_duration_seconds ME writer teardown latency histogram by mode"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_teardown_duration_seconds histogram"
);
let bucket_labels = Stats::me_writer_teardown_duration_bucket_labels();
for mode in MeWriterTeardownMode::ALL {
for (bucket_idx, label) in bucket_labels.iter().enumerate() {
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"{}\"}} {}",
mode.as_str(),
label,
if me_allows_normal {
stats.get_me_writer_teardown_duration_bucket_total(mode, bucket_idx)
} else {
0
}
);
}
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"+Inf\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_count(mode)
} else {
0
}
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_sum{{mode=\"{}\"}} {:.6}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_sum_seconds(mode)
} else {
0.0
}
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_count{{mode=\"{}\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_count(mode)
} else {
0
}
);
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_cleanup_side_effect_failures_total Failed cleanup side effects by step"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
);
for step in MeWriterCleanupSideEffectStep::ALL {
let _ = writeln!(
out,
"telemt_me_writer_cleanup_side_effect_failures_total{{step=\"{}\"}} {}",
step.as_str(),
if me_allows_normal {
stats.get_me_writer_cleanup_side_effect_failures_total(step)
} else {
0
}
);
}
let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started");
let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter");
let _ = writeln!(
@@ -2124,6 +2340,24 @@ mod tests {
assert!(output.contains("# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"));
assert!(output.contains("# TYPE telemt_me_idle_close_by_peer_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_attempt_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_success_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_timeout_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_escalation_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_noop_total counter"));
assert!(output.contains(
"# TYPE telemt_me_writer_teardown_duration_seconds histogram"
));
assert!(output.contains(
"# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
));
assert!(output.contains("# TYPE telemt_me_writer_close_signal_drop_total counter"));
assert!(output.contains(
"# TYPE telemt_me_writer_close_signal_channel_full_total counter"
));
assert!(output.contains(
"# TYPE telemt_me_draining_writers_reap_progress_total counter"
));
assert!(output.contains("# TYPE telemt_pool_drain_soft_evict_total counter"));
assert!(output.contains("# TYPE telemt_pool_drain_soft_evict_writer_total counter"));
assert!(output.contains(

View File

@@ -3,8 +3,7 @@ use std::io::Write;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, split};
use tokio::sync::watch;
use tracing::{debug, info, warn};
@@ -15,7 +14,7 @@ use crate::protocol::constants::*;
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
use crate::proxy::relay::relay_bidirectional;
use crate::proxy::route_mode::{
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state,
ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
cutover_stagger_delay,
};
use crate::proxy::adaptive_buffers;
@@ -56,7 +55,11 @@ where
);
let tg_stream = upstream_manager
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty()))
.connect(
dc_addr,
Some(success.dc_idx),
user.strip_prefix("scope_").filter(|s| !s.is_empty()),
)
.await?;
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
@@ -93,11 +96,9 @@ where
);
tokio::pin!(relay_result);
let relay_result = loop {
if let Some(cutover) = affected_cutover_state(
&route_rx,
RelayRouteMode::Direct,
route_snapshot.generation,
) {
if let Some(cutover) =
affected_cutover_state(&route_rx, RelayRouteMode::Direct, route_snapshot.generation)
{
let delay = cutover_stagger_delay(session_id, cutover.generation);
warn!(
user = %user,
@@ -148,7 +149,9 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
for addr_str in addrs {
match addr_str.parse::<SocketAddr>() {
Ok(addr) => parsed.push(addr),
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"),
Err(_) => {
warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring")
}
}
}
@@ -170,7 +173,10 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
// Unknown DC requested by client without override: log and fall back.
if !config.dc_overrides.contains_key(&dc_key) {
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster");
warn!(
dc_idx = dc_idx,
"Requested non-standard DC with no override; falling back to default cluster"
);
if config.general.unknown_dc_file_log_enabled
&& let Some(path) = &config.general.unknown_dc_log_path
&& let Ok(handle) = tokio::runtime::Handle::try_current()
@@ -204,15 +210,15 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
))
}
async fn do_tg_handshake_static(
mut stream: TcpStream,
async fn do_tg_handshake_static<S>(
mut stream: S,
success: &HandshakeSuccess,
config: &ProxyConfig,
rng: &SecureRandom,
) -> Result<(
CryptoReader<tokio::net::tcp::OwnedReadHalf>,
CryptoWriter<tokio::net::tcp::OwnedWriteHalf>,
)> {
) -> Result<(CryptoReader<ReadHalf<S>>, CryptoWriter<WriteHalf<S>>)>
where
S: AsyncRead + AsyncWrite + Unpin,
{
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) = generate_tg_nonce(
success.proto_tag,
success.dc_idx,
@@ -235,7 +241,7 @@ async fn do_tg_handshake_static(
stream.write_all(&encrypted_nonce).await?;
stream.flush().await?;
let (read_half, write_half) = stream.into_split();
let (read_half, write_half) = split(stream);
let max_pending = config.general.crypto_pending_buffer;
Ok((

View File

@@ -222,6 +222,7 @@ fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool
async fn enqueue_c2me_command(
tx: &mpsc::Sender<C2MeCommand>,
cmd: C2MeCommand,
send_timeout: Duration,
) -> std::result::Result<(), mpsc::error::SendError<C2MeCommand>> {
match tx.try_send(cmd) {
Ok(()) => Ok(()),
@@ -231,7 +232,17 @@ async fn enqueue_c2me_command(
if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS {
tokio::task::yield_now().await;
}
tx.send(cmd).await
if send_timeout.is_zero() {
return tx.send(cmd).await;
}
match tokio::time::timeout(send_timeout, tx.reserve()).await {
Ok(Ok(permit)) => {
permit.send(cmd);
Ok(())
}
Ok(Err(_)) => Err(mpsc::error::SendError(cmd)),
Err(_) => Err(mpsc::error::SendError(cmd)),
}
}
}
}
@@ -355,6 +366,7 @@ where
.general
.me_c2me_channel_capacity
.max(C2ME_CHANNEL_CAPACITY_FALLBACK);
let c2me_send_timeout = Duration::from_millis(config.general.me_c2me_send_timeout_ms);
let (c2me_tx, mut c2me_rx) = mpsc::channel::<C2MeCommand>(c2me_channel_capacity);
let me_pool_c2me = me_pool.clone();
let effective_tag = effective_tag;
@@ -363,15 +375,42 @@ where
while let Some(cmd) = c2me_rx.recv().await {
match cmd {
C2MeCommand::Data { payload, flags } => {
me_pool_c2me.send_proxy_req(
conn_id,
success.dc_idx,
peer,
translated_local_addr,
payload.as_ref(),
flags,
effective_tag.as_deref(),
).await?;
if c2me_send_timeout.is_zero() {
me_pool_c2me
.send_proxy_req(
conn_id,
success.dc_idx,
peer,
translated_local_addr,
payload.as_ref(),
flags,
effective_tag.as_deref(),
)
.await?;
} else {
match tokio::time::timeout(
c2me_send_timeout,
me_pool_c2me.send_proxy_req(
conn_id,
success.dc_idx,
peer,
translated_local_addr,
payload.as_ref(),
flags,
effective_tag.as_deref(),
),
)
.await
{
Ok(send_result) => send_result?,
Err(_) => {
return Err(ProxyError::Proxy(format!(
"ME send timeout after {}ms",
c2me_send_timeout.as_millis()
)));
}
}
}
sent_since_yield = sent_since_yield.saturating_add(1);
if should_yield_c2me_sender(sent_since_yield, !c2me_rx.is_empty()) {
sent_since_yield = 0;
@@ -555,7 +594,7 @@ where
loop {
if session_lease.is_stale() {
stats.increment_reconnect_stale_close_total();
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close, c2me_send_timeout).await;
main_result = Err(ProxyError::Proxy("Session evicted by reconnect".to_string()));
break;
}
@@ -573,7 +612,7 @@ where
"Cutover affected middle session, closing client connection"
);
tokio::time::sleep(delay).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close, c2me_send_timeout).await;
main_result = Err(ProxyError::Proxy(ROUTE_SWITCH_ERROR_MSG.to_string()));
break;
}
@@ -607,9 +646,13 @@ where
flags |= RPC_FLAG_NOT_ENCRYPTED;
}
// Keep client read loop lightweight: route heavy ME send path via a dedicated task.
if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags })
.await
.is_err()
if enqueue_c2me_command(
&c2me_tx,
C2MeCommand::Data { payload, flags },
c2me_send_timeout,
)
.await
.is_err()
{
main_result = Err(ProxyError::Proxy("ME sender channel closed".into()));
break;
@@ -618,7 +661,12 @@ where
Ok(None) => {
debug!(conn_id, "Client EOF");
client_closed = true;
let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await;
let _ = enqueue_c2me_command(
&c2me_tx,
C2MeCommand::Close,
c2me_send_timeout,
)
.await;
break;
}
Err(e) => {
@@ -993,6 +1041,7 @@ mod tests {
payload: Bytes::from_static(&[1, 2, 3]),
flags: 0,
},
TokioDuration::from_millis(50),
)
.await
.unwrap();
@@ -1028,6 +1077,7 @@ mod tests {
payload: Bytes::from_static(&[7, 7]),
flags: 7,
},
TokioDuration::from_millis(100),
)
.await
.unwrap();

View File

@@ -19,6 +19,137 @@ use tracing::debug;
use crate::config::{MeTelemetryLevel, MeWriterPickMode};
use self::telemetry::TelemetryPolicy;
const ME_WRITER_TEARDOWN_MODE_COUNT: usize = 2;
const ME_WRITER_TEARDOWN_REASON_COUNT: usize = 11;
const ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT: usize = 2;
const ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT: usize = 12;
const ME_WRITER_TEARDOWN_DURATION_BUCKET_BOUNDS_MICROS: [u64; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] = [
1_000,
5_000,
10_000,
25_000,
50_000,
100_000,
250_000,
500_000,
1_000_000,
2_500_000,
5_000_000,
10_000_000,
];
const ME_WRITER_TEARDOWN_DURATION_BUCKET_LABELS: [&str; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] = [
"0.001",
"0.005",
"0.01",
"0.025",
"0.05",
"0.1",
"0.25",
"0.5",
"1",
"2.5",
"5",
"10",
];
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterTeardownMode {
Normal = 0,
HardDetach = 1,
}
impl MeWriterTeardownMode {
pub const ALL: [Self; ME_WRITER_TEARDOWN_MODE_COUNT] =
[Self::Normal, Self::HardDetach];
pub const fn as_str(self) -> &'static str {
match self {
Self::Normal => "normal",
Self::HardDetach => "hard_detach",
}
}
const fn idx(self) -> usize {
self as usize
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterTeardownReason {
ReaderExit = 0,
WriterTaskExit = 1,
PingSendFail = 2,
SignalSendFail = 3,
RouteChannelClosed = 4,
CloseRpcChannelClosed = 5,
PruneClosedWriter = 6,
ReapTimeoutExpired = 7,
ReapThresholdForce = 8,
ReapEmpty = 9,
WatchdogStuckDraining = 10,
}
impl MeWriterTeardownReason {
pub const ALL: [Self; ME_WRITER_TEARDOWN_REASON_COUNT] = [
Self::ReaderExit,
Self::WriterTaskExit,
Self::PingSendFail,
Self::SignalSendFail,
Self::RouteChannelClosed,
Self::CloseRpcChannelClosed,
Self::PruneClosedWriter,
Self::ReapTimeoutExpired,
Self::ReapThresholdForce,
Self::ReapEmpty,
Self::WatchdogStuckDraining,
];
pub const fn as_str(self) -> &'static str {
match self {
Self::ReaderExit => "reader_exit",
Self::WriterTaskExit => "writer_task_exit",
Self::PingSendFail => "ping_send_fail",
Self::SignalSendFail => "signal_send_fail",
Self::RouteChannelClosed => "route_channel_closed",
Self::CloseRpcChannelClosed => "close_rpc_channel_closed",
Self::PruneClosedWriter => "prune_closed_writer",
Self::ReapTimeoutExpired => "reap_timeout_expired",
Self::ReapThresholdForce => "reap_threshold_force",
Self::ReapEmpty => "reap_empty",
Self::WatchdogStuckDraining => "watchdog_stuck_draining",
}
}
const fn idx(self) -> usize {
self as usize
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterCleanupSideEffectStep {
CloseSignalChannelFull = 0,
CloseSignalChannelClosed = 1,
}
impl MeWriterCleanupSideEffectStep {
pub const ALL: [Self; ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT] =
[Self::CloseSignalChannelFull, Self::CloseSignalChannelClosed];
pub const fn as_str(self) -> &'static str {
match self {
Self::CloseSignalChannelFull => "close_signal_channel_full",
Self::CloseSignalChannelClosed => "close_signal_channel_closed",
}
}
const fn idx(self) -> usize {
self as usize
}
}
// ============= Stats =============
#[derive(Default)]
@@ -123,8 +254,23 @@ pub struct Stats {
pool_drain_soft_evict_total: AtomicU64,
pool_drain_soft_evict_writer_total: AtomicU64,
pool_stale_pick_total: AtomicU64,
me_writer_close_signal_drop_total: AtomicU64,
me_writer_close_signal_channel_full_total: AtomicU64,
me_draining_writers_reap_progress_total: AtomicU64,
me_writer_removed_total: AtomicU64,
me_writer_removed_unexpected_total: AtomicU64,
me_writer_teardown_attempt_total:
[[AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT]; ME_WRITER_TEARDOWN_REASON_COUNT],
me_writer_teardown_success_total: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_timeout_total: AtomicU64,
me_writer_teardown_escalation_total: AtomicU64,
me_writer_teardown_noop_total: AtomicU64,
me_writer_cleanup_side_effect_failures_total:
[AtomicU64; ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT],
me_writer_teardown_duration_bucket_hits:
[[AtomicU64; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT + 1]; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_duration_sum_micros: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_duration_count: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_refill_triggered_total: AtomicU64,
me_refill_skipped_inflight_total: AtomicU64,
me_refill_failed_total: AtomicU64,
@@ -734,6 +880,24 @@ impl Stats {
self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_close_signal_drop_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_close_signal_drop_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_close_signal_channel_full_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_close_signal_channel_full_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_draining_writers_reap_progress_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_draining_writers_reap_progress_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_removed_total(&self) {
if self.telemetry_me_allows_debug() {
self.me_writer_removed_total.fetch_add(1, Ordering::Relaxed);
@@ -744,6 +908,74 @@ impl Stats {
self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_attempt_total(
&self,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_attempt_total[reason.idx()][mode.idx()]
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_success_total(&self, mode: MeWriterTeardownMode) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_success_total[mode.idx()].fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_timeout_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_timeout_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_escalation_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_escalation_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_noop_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_noop_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_cleanup_side_effect_failures_total(
&self,
step: MeWriterCleanupSideEffectStep,
) {
if self.telemetry_me_allows_normal() {
self.me_writer_cleanup_side_effect_failures_total[step.idx()]
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn observe_me_writer_teardown_duration(
&self,
mode: MeWriterTeardownMode,
duration: Duration,
) {
if !self.telemetry_me_allows_normal() {
return;
}
let duration_micros = duration.as_micros().min(u64::MAX as u128) as u64;
let mut bucket_idx = ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT;
for (idx, upper_bound_micros) in ME_WRITER_TEARDOWN_DURATION_BUCKET_BOUNDS_MICROS
.iter()
.copied()
.enumerate()
{
if duration_micros <= upper_bound_micros {
bucket_idx = idx;
break;
}
}
self.me_writer_teardown_duration_bucket_hits[mode.idx()][bucket_idx]
.fetch_add(1, Ordering::Relaxed);
self.me_writer_teardown_duration_sum_micros[mode.idx()]
.fetch_add(duration_micros, Ordering::Relaxed);
self.me_writer_teardown_duration_count[mode.idx()].fetch_add(1, Ordering::Relaxed);
}
pub fn increment_me_refill_triggered_total(&self) {
if self.telemetry_me_allows_debug() {
self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed);
@@ -1259,12 +1491,96 @@ impl Stats {
pub fn get_pool_stale_pick_total(&self) -> u64 {
self.pool_stale_pick_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_close_signal_drop_total(&self) -> u64 {
self.me_writer_close_signal_drop_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_close_signal_channel_full_total(&self) -> u64 {
self.me_writer_close_signal_channel_full_total
.load(Ordering::Relaxed)
}
pub fn get_me_draining_writers_reap_progress_total(&self) -> u64 {
self.me_draining_writers_reap_progress_total
.load(Ordering::Relaxed)
}
pub fn get_me_writer_removed_total(&self) -> u64 {
self.me_writer_removed_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_removed_unexpected_total(&self) -> u64 {
self.me_writer_removed_unexpected_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_attempt_total(
&self,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
) -> u64 {
self.me_writer_teardown_attempt_total[reason.idx()][mode.idx()]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_attempt_total_by_mode(&self, mode: MeWriterTeardownMode) -> u64 {
MeWriterTeardownReason::ALL
.iter()
.copied()
.map(|reason| self.get_me_writer_teardown_attempt_total(reason, mode))
.sum()
}
pub fn get_me_writer_teardown_success_total(&self, mode: MeWriterTeardownMode) -> u64 {
self.me_writer_teardown_success_total[mode.idx()].load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_timeout_total(&self) -> u64 {
self.me_writer_teardown_timeout_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_escalation_total(&self) -> u64 {
self.me_writer_teardown_escalation_total
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_noop_total(&self) -> u64 {
self.me_writer_teardown_noop_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_cleanup_side_effect_failures_total(
&self,
step: MeWriterCleanupSideEffectStep,
) -> u64 {
self.me_writer_cleanup_side_effect_failures_total[step.idx()]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_cleanup_side_effect_failures_total_all(&self) -> u64 {
MeWriterCleanupSideEffectStep::ALL
.iter()
.copied()
.map(|step| self.get_me_writer_cleanup_side_effect_failures_total(step))
.sum()
}
pub fn me_writer_teardown_duration_bucket_labels(
) -> &'static [&'static str; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] {
&ME_WRITER_TEARDOWN_DURATION_BUCKET_LABELS
}
pub fn get_me_writer_teardown_duration_bucket_hits(
&self,
mode: MeWriterTeardownMode,
bucket_idx: usize,
) -> u64 {
self.me_writer_teardown_duration_bucket_hits[mode.idx()][bucket_idx]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_duration_bucket_total(
&self,
mode: MeWriterTeardownMode,
bucket_idx: usize,
) -> u64 {
let capped_idx = bucket_idx.min(ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT);
let mut total = 0u64;
for idx in 0..=capped_idx {
total = total.saturating_add(self.get_me_writer_teardown_duration_bucket_hits(mode, idx));
}
total
}
pub fn get_me_writer_teardown_duration_count(&self, mode: MeWriterTeardownMode) -> u64 {
self.me_writer_teardown_duration_count[mode.idx()].load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_duration_sum_seconds(&self, mode: MeWriterTeardownMode) -> f64 {
self.me_writer_teardown_duration_sum_micros[mode.idx()].load(Ordering::Relaxed) as f64
/ 1_000_000.0
}
pub fn get_me_refill_triggered_total(&self) -> u64 {
self.me_refill_triggered_total.load(Ordering::Relaxed)
}
@@ -1768,6 +2084,79 @@ mod tests {
assert_eq!(stats.get_me_keepalive_sent(), 0);
assert_eq!(stats.get_me_route_drop_queue_full(), 0);
}
#[test]
fn test_teardown_counters_and_duration() {
let stats = Stats::new();
stats.increment_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal,
);
stats.increment_me_writer_teardown_success_total(MeWriterTeardownMode::Normal);
stats.observe_me_writer_teardown_duration(
MeWriterTeardownMode::Normal,
Duration::from_millis(3),
);
stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull,
);
assert_eq!(
stats.get_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal
),
1
);
assert_eq!(
stats.get_me_writer_teardown_success_total(MeWriterTeardownMode::Normal),
1
);
assert_eq!(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal),
1
);
assert!(
stats.get_me_writer_teardown_duration_sum_seconds(MeWriterTeardownMode::Normal) > 0.0
);
assert_eq!(
stats.get_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull
),
1
);
}
#[test]
fn test_teardown_counters_respect_me_silent() {
let stats = Stats::new();
stats.apply_telemetry_policy(TelemetryPolicy {
core_enabled: true,
user_enabled: true,
me_level: MeTelemetryLevel::Silent,
});
stats.increment_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal,
);
stats.increment_me_writer_teardown_timeout_total();
stats.observe_me_writer_teardown_duration(
MeWriterTeardownMode::Normal,
Duration::from_millis(1),
);
assert_eq!(
stats.get_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal
),
0
);
assert_eq!(stats.get_me_writer_teardown_timeout_total(), 0);
assert_eq!(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal),
0
);
}
#[test]
fn test_replay_checker_basic() {

View File

@@ -7,33 +7,29 @@ use tokio::net::TcpStream;
#[cfg(unix)]
use tokio::net::UnixStream;
use tokio::time::timeout;
use tokio_rustls::client::TlsStream;
use tokio_rustls::TlsConnector;
use tokio_rustls::client::TlsStream;
use tracing::{debug, warn};
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
use rustls::client::ClientConfig;
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use rustls::{DigitallySignedStruct, Error as RustlsError};
use x509_parser::prelude::FromDer;
use x509_parser::certificate::X509Certificate;
use x509_parser::prelude::FromDer;
use crate::crypto::SecureRandom;
use crate::network::dns_overrides::resolve_socket_addr;
use crate::protocol::constants::{
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
};
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
use crate::tls_front::types::{
ParsedCertificateInfo,
ParsedServerHello,
TlsBehaviorProfile,
TlsCertPayload,
TlsExtension,
TlsFetchResult,
TlsProfileSource,
ParsedCertificateInfo, ParsedServerHello, TlsBehaviorProfile, TlsCertPayload, TlsExtension,
TlsFetchResult, TlsProfileSource,
};
use crate::transport::UpstreamStream;
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
/// No-op verifier: accept any certificate (we only need lengths and metadata).
#[derive(Debug)]
@@ -144,21 +140,27 @@ fn build_client_hello(sni: &str, rng: &SecureRandom) -> Vec<u8> {
exts.extend_from_slice(&0x000au16.to_be_bytes());
exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes());
exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes());
for g in groups { exts.extend_from_slice(&g.to_be_bytes()); }
for g in groups {
exts.extend_from_slice(&g.to_be_bytes());
}
// signature_algorithms
let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256
exts.extend_from_slice(&0x000du16.to_be_bytes());
exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes());
exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes());
for a in sig_algs { exts.extend_from_slice(&a.to_be_bytes()); }
for a in sig_algs {
exts.extend_from_slice(&a.to_be_bytes());
}
// supported_versions (TLS1.3 + TLS1.2)
let versions: [u16; 2] = [0x0304, 0x0303];
exts.extend_from_slice(&0x002bu16.to_be_bytes());
exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes());
exts.push((versions.len() * 2) as u8);
for v in versions { exts.extend_from_slice(&v.to_be_bytes()); }
for v in versions {
exts.extend_from_slice(&v.to_be_bytes());
}
// key_share (x25519)
let key = gen_key_share(rng);
@@ -273,7 +275,10 @@ fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
pos += 4;
let data = body.get(pos..pos + elen)?.to_vec();
pos += elen;
extensions.push(TlsExtension { ext_type: etype, data });
extensions.push(TlsExtension {
ext_type: etype,
data,
});
}
Some(ParsedServerHello {
@@ -394,37 +399,42 @@ async fn connect_tcp_with_upstream(
port: u16,
connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
) -> Result<TcpStream> {
scope: Option<&str>,
) -> Result<UpstreamStream> {
if let Some(manager) = upstream {
if let Some(addr) = resolve_socket_addr(host, port) {
match manager.connect(addr, None, None).await {
match manager.connect(addr, None, scope).await {
Ok(stream) => return Ok(stream),
Err(e) => {
warn!(
host = %host,
port = port,
scope = ?scope,
error = %e,
"Upstream connect failed, using direct connect"
);
}
}
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await {
if let Some(addr) = addrs.find(|a| a.is_ipv4()) {
match manager.connect(addr, None, None).await {
Ok(stream) => return Ok(stream),
Err(e) => {
warn!(
host = %host,
port = port,
error = %e,
"Upstream connect failed, using direct connect"
);
}
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await
&& let Some(addr) = addrs.find(|a| a.is_ipv4())
{
match manager.connect(addr, None, scope).await {
Ok(stream) => return Ok(stream),
Err(e) => {
warn!(
host = %host,
port = port,
scope = ?scope,
error = %e,
"Upstream connect failed, using direct connect"
);
}
}
}
}
connect_with_dns_override(host, port, connect_timeout).await
Ok(UpstreamStream::Tcp(
connect_with_dns_override(host, port, connect_timeout).await?,
))
}
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
@@ -443,9 +453,7 @@ fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8
}
// Certificate = context_len(1) + certificate_list_len(3) + entries
let body_len = 1usize
.checked_add(3)?
.checked_add(certificate_list.len())?;
let body_len = 1usize.checked_add(3)?.checked_add(certificate_list.len())?;
let mut message = Vec::with_capacity(4 + body_len);
message.push(0x0b); // HandshakeType::certificate
@@ -537,6 +545,7 @@ async fn fetch_via_raw_tls(
sni: &str,
connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8,
unix_sock: Option<&str>,
) -> Result<TlsFetchResult> {
@@ -549,7 +558,8 @@ async fn fetch_via_raw_tls(
sock = %sock_path,
"Raw TLS fetch using mask unix socket"
);
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await;
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol)
.await;
}
Ok(Err(e)) => {
warn!(
@@ -572,7 +582,7 @@ async fn fetch_via_raw_tls(
#[cfg(not(unix))]
let _ = unix_sock;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream, scope).await?;
fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await
}
@@ -616,12 +626,13 @@ where
.map(|slice| slice.to_vec())
.unwrap_or_default();
let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect();
let cert_payload = encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
TlsCertPayload {
cert_chain_der: cert_chain_der.clone(),
certificate_message,
}
});
let cert_payload =
encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
TlsCertPayload {
cert_chain_der: cert_chain_der.clone(),
certificate_message,
}
});
let total_cert_len = cert_payload
.as_ref()
@@ -675,6 +686,7 @@ async fn fetch_via_rustls(
sni: &str,
connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8,
unix_sock: Option<&str>,
) -> Result<TlsFetchResult> {
@@ -710,7 +722,7 @@ async fn fetch_via_rustls(
#[cfg(not(unix))]
let _ = unix_sock;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream, scope).await?;
fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await
}
@@ -726,6 +738,7 @@ pub async fn fetch_real_tls(
sni: &str,
connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8,
unix_sock: Option<&str>,
) -> Result<TlsFetchResult> {
@@ -735,6 +748,7 @@ pub async fn fetch_real_tls(
sni,
connect_timeout,
upstream.clone(),
scope,
proxy_protocol,
unix_sock,
)
@@ -753,6 +767,7 @@ pub async fn fetch_real_tls(
sni,
connect_timeout,
upstream,
scope,
proxy_protocol,
unix_sock,
)

View File

@@ -298,6 +298,7 @@ async fn run_update_cycle(
pool.update_runtime_reinit_policy(
cfg.general.hardswap,
cfg.general.me_pool_drain_ttl_secs,
cfg.general.me_instadrain,
cfg.general.me_pool_drain_threshold,
cfg.general.me_pool_drain_soft_evict_enabled,
cfg.general.me_pool_drain_soft_evict_grace_secs,
@@ -530,6 +531,7 @@ pub async fn me_config_updater(
pool.update_runtime_reinit_policy(
cfg.general.hardswap,
cfg.general.me_pool_drain_ttl_secs,
cfg.general.me_instadrain,
cfg.general.me_pool_drain_threshold,
cfg.general.me_pool_drain_soft_evict_enabled,
cfg.general.me_pool_drain_soft_evict_grace_secs,

View File

@@ -10,8 +10,10 @@ use tracing::{debug, info, warn};
use crate::config::MeFloorMode;
use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use crate::stats::MeWriterTeardownReason;
use super::MePool;
use super::pool::{MeFamilyRuntimeState, MeWriter};
const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff
#[allow(dead_code)]
@@ -30,6 +32,35 @@ const HEALTH_DRAIN_CLOSE_BUDGET_MIN: usize = 16;
const HEALTH_DRAIN_CLOSE_BUDGET_MAX: usize = 256;
const HEALTH_DRAIN_SOFT_EVICT_BUDGET_MIN: usize = 8;
const HEALTH_DRAIN_SOFT_EVICT_BUDGET_MAX: usize = 256;
const HEALTH_DRAIN_REAP_OPPORTUNISTIC_INTERVAL_SECS: u64 = 1;
const HEALTH_DRAIN_TIMEOUT_ENFORCER_INTERVAL_SECS: u64 = 1;
const FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD: u32 = 6;
const FAMILY_SUPPRESS_WINDOW_SECS: u64 = 120;
const FAMILY_RECOVER_PROBE_INTERVAL_SECS: u64 = 5;
const FAMILY_RECOVER_SUCCESS_STREAK_REQUIRED: u32 = 3;
#[derive(Debug, Clone)]
struct FamilyCircuitState {
state: MeFamilyRuntimeState,
state_since_at: Instant,
suppressed_until: Option<Instant>,
next_probe_at: Instant,
fail_streak: u32,
recover_success_streak: u32,
}
impl FamilyCircuitState {
fn new(now: Instant) -> Self {
Self {
state: MeFamilyRuntimeState::Healthy,
state_since_at: now,
suppressed_until: None,
next_probe_at: now,
fail_streak: 0,
recover_success_streak: 0,
}
}
}
#[derive(Debug, Clone)]
struct DcFloorPlanEntry {
@@ -69,6 +100,25 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
let mut floor_warn_next_allowed: HashMap<(i32, IpFamily), Instant> = HashMap::new();
let mut drain_warn_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut drain_soft_evict_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut family_v4_circuit = FamilyCircuitState::new(Instant::now());
let mut family_v6_circuit = FamilyCircuitState::new(Instant::now());
let init_epoch_secs = MePool::now_epoch_secs();
pool.set_family_runtime_state(
IpFamily::V4,
family_v4_circuit.state,
init_epoch_secs,
0,
family_v4_circuit.fail_streak,
family_v4_circuit.recover_success_streak,
);
pool.set_family_runtime_state(
IpFamily::V6,
family_v6_circuit.state,
init_epoch_secs,
0,
family_v6_circuit.fail_streak,
family_v6_circuit.recover_success_streak,
);
let mut degraded_interval = true;
loop {
let interval = if degraded_interval {
@@ -84,7 +134,9 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
&mut drain_soft_evict_next_allowed,
)
.await;
let v4_degraded = check_family(
let now = Instant::now();
let now_epoch_secs = MePool::now_epoch_secs();
let v4_degraded_raw = check_family(
IpFamily::V4,
&pool,
&rng,
@@ -99,29 +151,252 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
&mut adaptive_idle_since,
&mut adaptive_recover_until,
&mut floor_warn_next_allowed,
&mut drain_warn_next_allowed,
&mut drain_soft_evict_next_allowed,
)
.await;
let v6_degraded = check_family(
IpFamily::V6,
let v4_degraded = apply_family_circuit_result(
&pool,
&rng,
&mut backoff,
&mut next_attempt,
&mut inflight,
&mut outage_backoff,
&mut outage_next_attempt,
&mut single_endpoint_outage,
&mut shadow_rotate_deadline,
&mut idle_refresh_next_attempt,
&mut adaptive_idle_since,
&mut adaptive_recover_until,
&mut floor_warn_next_allowed,
)
.await;
IpFamily::V4,
&mut family_v4_circuit,
Some(v4_degraded_raw),
false,
now,
now_epoch_secs,
);
let v6_check_ran = should_run_family_check(&mut family_v6_circuit, now);
let v6_degraded_raw = if v6_check_ran {
check_family(
IpFamily::V6,
&pool,
&rng,
&mut backoff,
&mut next_attempt,
&mut inflight,
&mut outage_backoff,
&mut outage_next_attempt,
&mut single_endpoint_outage,
&mut shadow_rotate_deadline,
&mut idle_refresh_next_attempt,
&mut adaptive_idle_since,
&mut adaptive_recover_until,
&mut floor_warn_next_allowed,
&mut drain_warn_next_allowed,
&mut drain_soft_evict_next_allowed,
)
.await
} else {
false
};
let v6_degraded = apply_family_circuit_result(
&pool,
IpFamily::V6,
&mut family_v6_circuit,
if v6_check_ran {
Some(v6_degraded_raw)
} else {
None
},
true,
now,
now_epoch_secs,
);
degraded_interval = v4_degraded || v6_degraded;
}
}
pub async fn me_drain_timeout_enforcer(pool: Arc<MePool>) {
let mut drain_warn_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut drain_soft_evict_next_allowed: HashMap<u64, Instant> = HashMap::new();
loop {
tokio::time::sleep(Duration::from_secs(
HEALTH_DRAIN_TIMEOUT_ENFORCER_INTERVAL_SECS,
))
.await;
reap_draining_writers(
&pool,
&mut drain_warn_next_allowed,
&mut drain_soft_evict_next_allowed,
)
.await;
}
}
fn should_run_family_check(circuit: &mut FamilyCircuitState, now: Instant) -> bool {
match circuit.state {
MeFamilyRuntimeState::Suppressed => {
if now < circuit.next_probe_at {
return false;
}
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
true
}
_ => true,
}
}
fn apply_family_circuit_result(
pool: &Arc<MePool>,
family: IpFamily,
circuit: &mut FamilyCircuitState,
degraded: Option<bool>,
allow_suppress: bool,
now: Instant,
now_epoch_secs: u64,
) -> bool {
let Some(degraded) = degraded else {
// Preserve suppression state when probe tick is intentionally skipped.
return false;
};
let previous_state = circuit.state;
match circuit.state {
MeFamilyRuntimeState::Suppressed => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
circuit.recover_success_streak = 0;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.state_since_at = now;
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family remains suppressed due to ongoing failures"
);
} else {
circuit.fail_streak = 0;
circuit.recover_success_streak = 1;
circuit.state = MeFamilyRuntimeState::Recovering;
}
}
MeFamilyRuntimeState::Recovering => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
if allow_suppress {
circuit.state = MeFamilyRuntimeState::Suppressed;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family temporarily suppressed after repeated degradation"
);
} else {
circuit.state = MeFamilyRuntimeState::Degraded;
}
} else {
circuit.recover_success_streak = circuit.recover_success_streak.saturating_add(1);
if circuit.recover_success_streak >= FAMILY_RECOVER_SUCCESS_STREAK_REQUIRED {
circuit.fail_streak = 0;
circuit.recover_success_streak = 0;
circuit.suppressed_until = None;
circuit.state = MeFamilyRuntimeState::Healthy;
info!(
?family,
"ME family suppression lifted after stable recovery probes"
);
}
}
}
_ => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
circuit.recover_success_streak = 0;
circuit.state = MeFamilyRuntimeState::Degraded;
if allow_suppress && circuit.fail_streak >= FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD {
circuit.state = MeFamilyRuntimeState::Suppressed;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family temporarily suppressed after repeated degradation"
);
}
} else {
circuit.fail_streak = 0;
circuit.recover_success_streak = 0;
circuit.suppressed_until = None;
circuit.state = MeFamilyRuntimeState::Healthy;
}
}
}
if previous_state != circuit.state {
circuit.state_since_at = now;
}
let suppressed_until_epoch_secs = circuit
.suppressed_until
.and_then(|until| {
if until > now {
Some(
now_epoch_secs
.saturating_add(until.saturating_duration_since(now).as_secs()),
)
} else {
None
}
})
.unwrap_or(0);
let state_since_epoch_secs = if previous_state == circuit.state {
pool.family_runtime_state_since_epoch_secs(family)
} else {
now_epoch_secs
};
pool.set_family_runtime_state(
family,
circuit.state,
state_since_epoch_secs,
suppressed_until_epoch_secs,
circuit.fail_streak,
circuit.recover_success_streak,
);
!matches!(circuit.state, MeFamilyRuntimeState::Suppressed) && degraded
}
fn draining_writer_timeout_expired(
pool: &MePool,
writer: &MeWriter,
now_epoch_secs: u64,
drain_ttl_secs: u64,
) -> bool {
if pool
.me_instadrain
.load(std::sync::atomic::Ordering::Relaxed)
{
return true;
}
let deadline_epoch_secs = writer
.drain_deadline_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if deadline_epoch_secs != 0 {
return now_epoch_secs >= deadline_epoch_secs;
}
if drain_ttl_secs == 0 {
return false;
}
let drain_started_at_epoch_secs = writer
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if drain_started_at_epoch_secs == 0 {
return false;
}
now_epoch_secs.saturating_sub(drain_started_at_epoch_secs) > drain_ttl_secs
}
pub(super) async fn reap_draining_writers(
pool: &Arc<MePool>,
warn_next_allowed: &mut HashMap<u64, Instant>,
@@ -137,11 +412,16 @@ pub(super) async fn reap_draining_writers(
let activity = pool.registry.writer_activity_snapshot().await;
let mut draining_writers = Vec::new();
let mut empty_writer_ids = Vec::<u64>::new();
let mut timeout_expired_writer_ids = Vec::<u64>::new();
let mut force_close_writer_ids = Vec::<u64>::new();
for writer in writers {
if !writer.draining.load(std::sync::atomic::Ordering::Relaxed) {
continue;
}
if draining_writer_timeout_expired(pool, &writer, now_epoch_secs, drain_ttl_secs) {
timeout_expired_writer_ids.push(writer.id);
continue;
}
if activity
.bound_clients_by_writer
.get(&writer.id)
@@ -207,14 +487,6 @@ pub(super) async fn reap_draining_writers(
"ME draining writer remains non-empty past drain TTL"
);
}
let deadline_epoch_secs = writer
.drain_deadline_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if deadline_epoch_secs != 0 && now_epoch_secs >= deadline_epoch_secs {
warn!(writer_id = writer.id, "Drain timeout, force-closing");
force_close_writer_ids.push(writer.id);
active_draining_writer_ids.remove(&writer.id);
}
}
warn_next_allowed.retain(|writer_id, _| active_draining_writer_ids.contains(writer_id));
@@ -299,11 +571,22 @@ pub(super) async fn reap_draining_writers(
}
}
let close_budget = health_drain_close_budget();
let mut closed_writer_ids = HashSet::<u64>::new();
for writer_id in timeout_expired_writer_ids {
if !closed_writer_ids.insert(writer_id) {
continue;
}
pool.stats.increment_pool_force_close_total();
pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapTimeoutExpired)
.await;
pool.stats
.increment_me_draining_writers_reap_progress_total();
}
let requested_force_close = force_close_writer_ids.len();
let requested_empty_close = empty_writer_ids.len();
let requested_close_total = requested_force_close.saturating_add(requested_empty_close);
let mut closed_writer_ids = HashSet::<u64>::new();
let close_budget = health_drain_close_budget();
let mut closed_total = 0usize;
for writer_id in force_close_writer_ids {
if closed_total >= close_budget {
@@ -313,7 +596,10 @@ pub(super) async fn reap_draining_writers(
continue;
}
pool.stats.increment_pool_force_close_total();
pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapThresholdForce)
.await;
pool.stats
.increment_me_draining_writers_reap_progress_total();
closed_total = closed_total.saturating_add(1);
}
for writer_id in empty_writer_ids {
@@ -323,7 +609,10 @@ pub(super) async fn reap_draining_writers(
if !closed_writer_ids.insert(writer_id) {
continue;
}
pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapEmpty)
.await;
pool.stats
.increment_me_draining_writers_reap_progress_total();
closed_total = closed_total.saturating_add(1);
}
@@ -392,6 +681,8 @@ async fn check_family(
adaptive_idle_since: &mut HashMap<(i32, IpFamily), Instant>,
adaptive_recover_until: &mut HashMap<(i32, IpFamily), Instant>,
floor_warn_next_allowed: &mut HashMap<(i32, IpFamily), Instant>,
drain_warn_next_allowed: &mut HashMap<u64, Instant>,
drain_soft_evict_next_allowed: &mut HashMap<u64, Instant>,
) -> bool {
let enabled = match family {
IpFamily::V4 => pool.decision.ipv4_me,
@@ -472,8 +763,15 @@ async fn check_family(
floor_plan.active_writers_current,
floor_plan.warm_writers_current,
);
let mut next_drain_reap_at = Instant::now();
for (dc, endpoints) in dc_endpoints {
if Instant::now() >= next_drain_reap_at {
reap_draining_writers(pool, drain_warn_next_allowed, drain_soft_evict_next_allowed)
.await;
next_drain_reap_at = Instant::now()
+ Duration::from_secs(HEALTH_DRAIN_REAP_OPPORTUNISTIC_INTERVAL_SECS);
}
if endpoints.is_empty() {
continue;
}
@@ -617,6 +915,12 @@ async fn check_family(
let mut restored = 0usize;
for _ in 0..missing {
if Instant::now() >= next_drain_reap_at {
reap_draining_writers(pool, drain_warn_next_allowed, drain_soft_evict_next_allowed)
.await;
next_drain_reap_at = Instant::now()
+ Duration::from_secs(HEALTH_DRAIN_REAP_OPPORTUNISTIC_INTERVAL_SECS);
}
if reconnect_budget == 0 {
break;
}
@@ -1468,6 +1772,187 @@ async fn maybe_rotate_single_endpoint_shadow(
);
}
/// Last-resort safety net for draining writers stuck past their deadline.
///
/// Runs every `TICK_SECS` and force-closes any draining writer whose
/// `drain_deadline_epoch_secs` has been exceeded by more than a threshold.
///
/// Two thresholds:
/// - `SOFT_THRESHOLD_SECS` (60s): writers with no bound clients
/// - `HARD_THRESHOLD_SECS` (300s): writers WITH bound clients (unconditional)
///
/// Intentionally kept trivial and independent of pool config to minimise
/// the probability of panicking itself. Uses `SystemTime` directly
/// as a fallback clock source and timeouts on every lock acquisition
/// and writer removal so one stuck writer cannot block the rest.
pub async fn me_zombie_writer_watchdog(pool: Arc<MePool>) {
use std::time::{SystemTime, UNIX_EPOCH};
const TICK_SECS: u64 = 30;
const SOFT_THRESHOLD_SECS: u64 = 60;
const HARD_THRESHOLD_SECS: u64 = 300;
const LOCK_TIMEOUT_SECS: u64 = 5;
const REMOVE_TIMEOUT_SECS: u64 = 10;
const HARD_DETACH_TIMEOUT_STREAK: u8 = 3;
let mut removal_timeout_streak = HashMap::<u64, u8>::new();
loop {
tokio::time::sleep(Duration::from_secs(TICK_SECS)).await;
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(_) => continue,
};
// Phase 1: collect zombie IDs under a short read-lock with timeout.
let zombie_ids_with_meta: Vec<(u64, bool)> = {
let Ok(ws) = tokio::time::timeout(
Duration::from_secs(LOCK_TIMEOUT_SECS),
pool.writers.read(),
)
.await
else {
warn!("zombie_watchdog: writers read-lock timeout, skipping tick");
continue;
};
ws.iter()
.filter(|w| w.draining.load(std::sync::atomic::Ordering::Relaxed))
.filter_map(|w| {
let deadline = w
.drain_deadline_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if deadline == 0 {
return None;
}
let overdue = now.saturating_sub(deadline);
if overdue == 0 {
return None;
}
let started = w
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
let drain_age = now.saturating_sub(started);
if drain_age > HARD_THRESHOLD_SECS {
return Some((w.id, true));
}
if overdue > SOFT_THRESHOLD_SECS {
return Some((w.id, false));
}
None
})
.collect()
};
// read lock released here
if zombie_ids_with_meta.is_empty() {
removal_timeout_streak.clear();
continue;
}
let mut active_zombie_ids = HashSet::<u64>::with_capacity(zombie_ids_with_meta.len());
for (writer_id, _) in &zombie_ids_with_meta {
active_zombie_ids.insert(*writer_id);
}
removal_timeout_streak.retain(|writer_id, _| active_zombie_ids.contains(writer_id));
warn!(
zombie_count = zombie_ids_with_meta.len(),
soft_threshold_secs = SOFT_THRESHOLD_SECS,
hard_threshold_secs = HARD_THRESHOLD_SECS,
"Zombie draining writers detected by watchdog, force-closing"
);
// Phase 2: remove each writer individually with a timeout.
// One stuck removal cannot block the rest.
for (writer_id, had_clients) in &zombie_ids_with_meta {
let result = tokio::time::timeout(
Duration::from_secs(REMOVE_TIMEOUT_SECS),
pool.remove_writer_and_close_clients(
*writer_id,
MeWriterTeardownReason::WatchdogStuckDraining,
),
)
.await;
match result {
Ok(true) => {
removal_timeout_streak.remove(writer_id);
pool.stats.increment_pool_force_close_total();
pool.stats
.increment_me_draining_writers_reap_progress_total();
info!(
writer_id,
had_clients,
"Zombie writer removed by watchdog"
);
}
Ok(false) => {
removal_timeout_streak.remove(writer_id);
debug!(
writer_id,
had_clients,
"Zombie writer watchdog removal became no-op"
);
}
Err(_) => {
pool.stats.increment_me_writer_teardown_timeout_total();
let streak = removal_timeout_streak
.entry(*writer_id)
.and_modify(|value| *value = value.saturating_add(1))
.or_insert(1);
warn!(
writer_id,
had_clients,
timeout_streak = *streak,
"Zombie writer removal timed out"
);
if *streak < HARD_DETACH_TIMEOUT_STREAK {
continue;
}
pool.stats.increment_me_writer_teardown_escalation_total();
let hard_detach = tokio::time::timeout(
Duration::from_secs(REMOVE_TIMEOUT_SECS),
pool.remove_draining_writer_hard_detach(
*writer_id,
MeWriterTeardownReason::WatchdogStuckDraining,
),
)
.await;
match hard_detach {
Ok(true) => {
removal_timeout_streak.remove(writer_id);
pool.stats.increment_pool_force_close_total();
pool.stats
.increment_me_draining_writers_reap_progress_total();
info!(
writer_id,
had_clients,
"Zombie writer hard-detached after repeated timeouts"
);
}
Ok(false) => {
removal_timeout_streak.remove(writer_id);
debug!(
writer_id,
had_clients,
"Zombie hard-detach skipped (writer already gone or no longer draining)"
);
}
Err(_) => {
pool.stats.increment_me_writer_teardown_timeout_total();
warn!(
writer_id,
had_clients,
"Zombie hard-detach timed out, will retry next tick"
);
}
}
}
}
}
}
}
#[cfg(test)]
mod tests {
use std::collections::HashMap;
@@ -1479,13 +1964,19 @@ mod tests {
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
use super::reap_draining_writers;
use super::{
FamilyCircuitState, apply_family_circuit_result, reap_draining_writers,
should_run_family_check,
};
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use crate::network::probe::NetworkDecision;
use crate::stats::Stats;
use crate::transport::middle_proxy::codec::WriterCommand;
use crate::transport::middle_proxy::pool::{MePool, MeWriter, WriterContour};
use crate::transport::middle_proxy::pool::{
MeFamilyRuntimeState, MePool, MeWriter, WriterContour,
};
use crate::transport::middle_proxy::registry::ConnMeta;
async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
@@ -1544,6 +2035,7 @@ mod tests {
general.me_adaptive_floor_max_warm_writers_global,
general.hardswap,
general.me_pool_drain_ttl_secs,
general.me_instadrain,
general.me_pool_drain_threshold,
general.me_pool_drain_soft_evict_enabled,
general.me_pool_drain_soft_evict_grace_secs,
@@ -1574,6 +2066,8 @@ mod tests {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)
@@ -1660,4 +2154,47 @@ mod tests {
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
}
#[tokio::test]
async fn suppressed_family_probe_skip_preserves_suppressed_state() {
let pool = make_pool(0).await;
let now = Instant::now();
let now_epoch_secs = MePool::now_epoch_secs();
let suppressed_until_epoch_secs = now_epoch_secs.saturating_add(60);
pool.set_family_runtime_state(
IpFamily::V6,
MeFamilyRuntimeState::Suppressed,
now_epoch_secs,
suppressed_until_epoch_secs,
7,
0,
);
let mut circuit = FamilyCircuitState {
state: MeFamilyRuntimeState::Suppressed,
state_since_at: now,
suppressed_until: Some(now + Duration::from_secs(60)),
next_probe_at: now + Duration::from_secs(5),
fail_streak: 7,
recover_success_streak: 0,
};
assert!(!should_run_family_check(&mut circuit, now));
assert!(!apply_family_circuit_result(
&pool,
IpFamily::V6,
&mut circuit,
None,
true,
now,
now_epoch_secs,
));
assert_eq!(circuit.state, MeFamilyRuntimeState::Suppressed);
assert_eq!(circuit.fail_streak, 7);
assert_eq!(circuit.recover_success_streak, 0);
assert_eq!(
pool.family_runtime_state(IpFamily::V6),
MeFamilyRuntimeState::Suppressed,
);
}
}

View File

@@ -81,6 +81,7 @@ async fn make_pool(
general.me_adaptive_floor_max_warm_writers_global,
general.hardswap,
general.me_pool_drain_ttl_secs,
general.me_instadrain,
general.me_pool_drain_threshold,
general.me_pool_drain_soft_evict_enabled,
general.me_pool_drain_soft_evict_grace_secs,
@@ -111,6 +112,8 @@ async fn make_pool(
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
);
@@ -211,7 +214,7 @@ async fn reap_draining_writers_respects_threshold_across_multiple_overflow_cycle
insert_draining_writer(
&pool,
writer_id,
now_epoch_secs.saturating_sub(600).saturating_add(writer_id),
now_epoch_secs.saturating_sub(20),
1,
0,
)
@@ -228,7 +231,7 @@ async fn reap_draining_writers_respects_threshold_across_multiple_overflow_cycle
}
assert_eq!(writer_count(&pool).await, threshold as usize);
assert_eq!(sorted_writer_ids(&pool).await, vec![58, 59, 60]);
assert_eq!(sorted_writer_ids(&pool).await, vec![1, 2, 3]);
}
#[tokio::test]
@@ -313,7 +316,12 @@ async fn reap_draining_writers_maintains_warn_state_subset_property_under_bulk_c
let ids = sorted_writer_ids(&pool).await;
for writer_id in ids.into_iter().take(3) {
let _ = pool.remove_writer_and_close_clients(writer_id).await;
let _ = pool
.remove_writer_and_close_clients(
writer_id,
crate::stats::MeWriterTeardownReason::ReapEmpty,
)
.await;
}
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;

View File

@@ -80,6 +80,7 @@ async fn make_pool(
general.me_adaptive_floor_max_warm_writers_global,
general.hardswap,
general.me_pool_drain_ttl_secs,
general.me_instadrain,
general.me_pool_drain_threshold,
general.me_pool_drain_soft_evict_enabled,
general.me_pool_drain_soft_evict_grace_secs,
@@ -110,6 +111,8 @@ async fn make_pool(
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
);

View File

@@ -4,6 +4,7 @@ use std::sync::Arc;
use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU32, AtomicU64, Ordering};
use std::time::{Duration, Instant};
use bytes::Bytes;
use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken;
@@ -11,7 +12,9 @@ use super::codec::WriterCommand;
use super::health::{health_drain_close_budget, reap_draining_writers};
use super::pool::{MePool, MeWriter, WriterContour};
use super::registry::ConnMeta;
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
use crate::config::{
GeneralConfig, MeBindStaleMode, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode,
};
use crate::crypto::SecureRandom;
use crate::network::probe::NetworkDecision;
use crate::stats::Stats;
@@ -73,6 +76,7 @@ async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
general.me_adaptive_floor_max_warm_writers_global,
general.hardswap,
general.me_pool_drain_ttl_secs,
general.me_instadrain,
general.me_pool_drain_threshold,
general.me_pool_drain_soft_evict_enabled,
general.me_pool_drain_soft_evict_grace_secs,
@@ -103,6 +107,8 @@ async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
general.me_warn_rate_limit_ms,
MeRouteNoWriterMode::default(),
general.me_route_no_writer_wait_ms,
general.me_route_hybrid_max_wait_ms,
general.me_route_blocking_send_timeout_ms,
general.me_route_inline_recovery_attempts,
general.me_route_inline_recovery_wait_ms,
)
@@ -177,15 +183,23 @@ async fn current_writer_ids(pool: &Arc<MePool>) -> Vec<u64> {
async fn reap_draining_writers_drops_warn_state_for_removed_writer() {
let pool = make_pool(128).await;
let now_epoch_secs = MePool::now_epoch_secs();
let conn_ids =
insert_draining_writer(&pool, 7, now_epoch_secs.saturating_sub(180), 1, 0).await;
let conn_ids = insert_draining_writer(
&pool,
7,
now_epoch_secs.saturating_sub(180),
1,
now_epoch_secs.saturating_add(3_600),
)
.await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert!(warn_next_allowed.contains_key(&7));
let _ = pool.remove_writer_and_close_clients(7).await;
let _ = pool
.remove_writer_and_close_clients(7, crate::stats::MeWriterTeardownReason::ReapEmpty)
.await;
assert!(pool.registry.get_writer(conn_ids[0]).await.is_none());
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
@@ -207,6 +221,89 @@ async fn reap_draining_writers_removes_empty_draining_writers() {
assert_eq!(current_writer_ids(&pool).await, vec![3]);
}
#[tokio::test]
async fn reap_draining_writers_does_not_block_on_stuck_writer_close_signal() {
let pool = make_pool(128).await;
let now_epoch_secs = MePool::now_epoch_secs();
let (blocked_tx, blocked_rx) = mpsc::channel::<WriterCommand>(1);
assert!(
blocked_tx
.try_send(WriterCommand::Data(Bytes::from_static(b"stuck")))
.is_ok()
);
let blocked_rx_guard = tokio::spawn(async move {
let _hold_rx = blocked_rx;
tokio::time::sleep(Duration::from_secs(30)).await;
});
let blocked_writer_id = 90u64;
let blocked_writer = MeWriter {
id: blocked_writer_id,
addr: SocketAddr::new(
IpAddr::V4(Ipv4Addr::LOCALHOST),
4500 + blocked_writer_id as u16,
),
source_ip: IpAddr::V4(Ipv4Addr::LOCALHOST),
writer_dc: 2,
generation: 1,
contour: Arc::new(AtomicU8::new(WriterContour::Draining.as_u8())),
created_at: Instant::now() - Duration::from_secs(blocked_writer_id),
tx: blocked_tx.clone(),
cancel: CancellationToken::new(),
degraded: Arc::new(AtomicBool::new(false)),
rtt_ema_ms_x10: Arc::new(AtomicU32::new(0)),
draining: Arc::new(AtomicBool::new(true)),
draining_started_at_epoch_secs: Arc::new(AtomicU64::new(
now_epoch_secs.saturating_sub(120),
)),
drain_deadline_epoch_secs: Arc::new(AtomicU64::new(0)),
allow_drain_fallback: Arc::new(AtomicBool::new(false)),
};
pool.writers.write().await.push(blocked_writer);
pool.registry
.register_writer(blocked_writer_id, blocked_tx)
.await;
pool.conn_count.fetch_add(1, Ordering::Relaxed);
insert_draining_writer(&pool, 91, now_epoch_secs.saturating_sub(110), 0, 0).await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
let reap_res = tokio::time::timeout(
Duration::from_millis(500),
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed),
)
.await;
blocked_rx_guard.abort();
assert!(reap_res.is_ok(), "reap should not block on close signal");
assert!(current_writer_ids(&pool).await.is_empty());
assert_eq!(pool.stats.get_me_writer_close_signal_drop_total(), 2);
assert_eq!(pool.stats.get_me_writer_close_signal_channel_full_total(), 1);
assert_eq!(pool.stats.get_me_draining_writers_reap_progress_total(), 2);
let activity = pool.registry.writer_activity_snapshot().await;
assert!(!activity.bound_clients_by_writer.contains_key(&blocked_writer_id));
assert!(!activity.bound_clients_by_writer.contains_key(&91));
let (probe_conn_id, _rx) = pool.registry.register().await;
assert!(
!pool.registry
.bind_writer(
probe_conn_id,
blocked_writer_id,
ConnMeta {
target_dc: 2,
client_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 6400),
our_addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443),
proto_flags: 0,
},
)
.await
);
let _ = pool.registry.unregister(probe_conn_id).await;
}
#[tokio::test]
async fn reap_draining_writers_overflow_closes_oldest_non_empty_writers() {
let pool = make_pool(2).await;
@@ -245,17 +342,17 @@ async fn reap_draining_writers_deadline_force_close_applies_under_threshold() {
#[tokio::test]
async fn reap_draining_writers_limits_closes_per_health_tick() {
let pool = make_pool(128).await;
let pool = make_pool(1).await;
let now_epoch_secs = MePool::now_epoch_secs();
let close_budget = health_drain_close_budget();
let writer_total = close_budget.saturating_add(19);
let writer_total = close_budget.saturating_add(20);
for writer_id in 1..=writer_total as u64 {
insert_draining_writer(
&pool,
writer_id,
now_epoch_secs.saturating_sub(20),
1,
now_epoch_secs.saturating_sub(1),
0,
)
.await;
}
@@ -278,8 +375,8 @@ async fn reap_draining_writers_backlog_drains_across_ticks() {
&pool,
writer_id,
now_epoch_secs.saturating_sub(20),
1,
now_epoch_secs.saturating_sub(1),
0,
0,
)
.await;
}
@@ -307,7 +404,7 @@ async fn reap_draining_writers_threshold_backlog_converges_to_threshold() {
insert_draining_writer(
&pool,
writer_id,
now_epoch_secs.saturating_sub(200).saturating_add(writer_id),
now_epoch_secs.saturating_sub(20),
1,
0,
)
@@ -343,27 +440,27 @@ async fn reap_draining_writers_threshold_zero_preserves_non_expired_non_empty_wr
#[tokio::test]
async fn reap_draining_writers_prioritizes_force_close_before_empty_cleanup() {
let pool = make_pool(128).await;
let pool = make_pool(1).await;
let now_epoch_secs = MePool::now_epoch_secs();
let close_budget = health_drain_close_budget();
for writer_id in 1..=close_budget as u64 {
for writer_id in 1..=close_budget.saturating_add(1) as u64 {
insert_draining_writer(
&pool,
writer_id,
now_epoch_secs.saturating_sub(20),
1,
now_epoch_secs.saturating_sub(1),
0,
)
.await;
}
let empty_writer_id = close_budget as u64 + 1;
let empty_writer_id = close_budget.saturating_add(2) as u64;
insert_draining_writer(&pool, empty_writer_id, now_epoch_secs.saturating_sub(20), 0, 0).await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert_eq!(current_writer_ids(&pool).await, vec![empty_writer_id]);
assert_eq!(current_writer_ids(&pool).await, vec![1, empty_writer_id]);
}
#[tokio::test]
@@ -432,7 +529,12 @@ async fn reap_draining_writers_warn_state_never_exceeds_live_draining_population
let existing_writer_ids = current_writer_ids(&pool).await;
for writer_id in existing_writer_ids.into_iter().take(4) {
let _ = pool.remove_writer_and_close_clients(writer_id).await;
let _ = pool
.remove_writer_and_close_clients(
writer_id,
crate::stats::MeWriterTeardownReason::ReapEmpty,
)
.await;
}
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert!(warn_next_allowed.len() <= pool.writers.read().await.len());
@@ -485,7 +587,14 @@ async fn reap_draining_writers_soft_evicts_stuck_writer_with_per_writer_cap() {
.store(1, Ordering::Relaxed);
let now_epoch_secs = MePool::now_epoch_secs();
insert_draining_writer(&pool, 77, now_epoch_secs.saturating_sub(240), 3, 0).await;
insert_draining_writer(
&pool,
77,
now_epoch_secs.saturating_sub(240),
3,
now_epoch_secs.saturating_add(3_600),
)
.await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
@@ -509,7 +618,14 @@ async fn reap_draining_writers_soft_evict_respects_cooldown_per_writer() {
.store(60_000, Ordering::Relaxed);
let now_epoch_secs = MePool::now_epoch_secs();
insert_draining_writer(&pool, 88, now_epoch_secs.saturating_sub(240), 3, 0).await;
insert_draining_writer(
&pool,
88,
now_epoch_secs.saturating_sub(240),
3,
now_epoch_secs.saturating_add(3_600),
)
.await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
@@ -522,12 +638,40 @@ async fn reap_draining_writers_soft_evict_respects_cooldown_per_writer() {
assert_eq!(pool.stats.get_pool_drain_soft_evict_writer_total(), 1);
}
#[tokio::test]
async fn reap_draining_writers_instadrain_removes_non_expired_writers_immediately() {
let pool = make_pool(0).await;
pool.me_instadrain.store(true, Ordering::Relaxed);
let now_epoch_secs = MePool::now_epoch_secs();
insert_draining_writer(&pool, 101, now_epoch_secs.saturating_sub(5), 1, 0).await;
insert_draining_writer(&pool, 102, now_epoch_secs.saturating_sub(4), 1, 0).await;
let mut warn_next_allowed = HashMap::new();
let mut soft_evict_next_allowed = HashMap::new();
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert!(current_writer_ids(&pool).await.is_empty());
}
#[test]
fn general_config_default_drain_threshold_remains_enabled() {
assert_eq!(GeneralConfig::default().me_pool_drain_threshold, 128);
assert_eq!(GeneralConfig::default().me_pool_drain_threshold, 32);
assert!(GeneralConfig::default().me_pool_drain_soft_evict_enabled);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_per_writer,
1
GeneralConfig::default().me_pool_drain_soft_evict_grace_secs,
10
);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_per_writer,
2
);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_budget_per_core,
16
);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_cooldown_ms,
1000
);
assert_eq!(GeneralConfig::default().me_bind_stale_mode, MeBindStaleMode::Never);
}

View File

@@ -30,7 +30,7 @@ mod health_adversarial_tests;
use bytes::Bytes;
pub use health::me_health_monitor;
pub use health::{me_drain_timeout_enforcer, me_health_monitor, me_zombie_writer_watchdog};
#[allow(unused_imports)]
pub use ping::{run_me_ping, format_sample_line, format_me_route, MePingReport, MePingSample, MePingFamily};
pub use pool::MePool;

View File

@@ -7,6 +7,7 @@ use tokio::net::UdpSocket;
use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::SecureRandom;
use crate::error::ProxyError;
use crate::transport::shadowsocks::sanitize_shadowsocks_url;
use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
use super::MePool;
@@ -40,7 +41,11 @@ pub fn format_sample_line(sample: &MePingSample) -> String {
let sign = if sample.dc >= 0 { "+" } else { "-" };
let addr = format!("{}:{}", sample.addr.ip(), sample.addr.port());
match (sample.connect_ms, sample.handshake_ms.as_ref(), sample.error.as_ref()) {
match (
sample.connect_ms,
sample.handshake_ms.as_ref(),
sample.error.as_ref(),
) {
(Some(conn), Some(hs), None) => format!(
" {sign} {addr}\tPing: {:.0} ms / RPC: {:.0} ms / OK",
conn, hs
@@ -121,6 +126,7 @@ fn route_from_egress(egress: Option<UpstreamEgressInfo>) -> Option<String> {
None => route,
})
}
UpstreamRouteKind::Shadowsocks => Some("shadowsocks".to_string()),
}
}
@@ -232,6 +238,9 @@ pub async fn format_me_route(
}
UpstreamType::Socks4 { address, .. } => format!("socks4://{address}"),
UpstreamType::Socks5 { address, .. } => format!("socks5://{address}"),
UpstreamType::Shadowsocks { url, .. } => sanitize_shadowsocks_url(url)
.map(|address| format!("shadowsocks://{address}"))
.unwrap_or_else(|_| "shadowsocks://invalid".to_string()),
};
}
@@ -254,6 +263,12 @@ pub async fn format_me_route(
if has_socks5 {
kinds.push("socks5");
}
if enabled_upstreams
.iter()
.any(|u| matches!(u.upstream_type, UpstreamType::Shadowsocks { .. }))
{
kinds.push("shadowsocks");
}
format!("mixed upstreams ({})", kinds.join(", "))
}
@@ -335,7 +350,10 @@ pub async fn run_me_ping(pool: &Arc<MePool>, rng: &SecureRandom) -> Vec<MePingRe
Ok((stream, conn_rtt, upstream_egress)) => {
connect_ms = Some(conn_rtt);
route = route_from_egress(upstream_egress);
match pool.handshake_only(stream, addr, upstream_egress, rng).await {
match pool
.handshake_only(stream, addr, upstream_egress, rng)
.await
{
Ok(hs) => {
handshake_ms = Some(hs.handshake_ms);
// drop halves to close

View File

@@ -18,6 +18,8 @@ use crate::transport::UpstreamManager;
use super::ConnRegistry;
use super::codec::WriterCommand;
const ME_FORCE_CLOSE_SAFETY_FALLBACK_SECS: u64 = 300;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(super) struct RefillDcKey {
pub dc: i32,
@@ -72,6 +74,64 @@ impl WriterContour {
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum MeFamilyRuntimeState {
Healthy = 0,
Degraded = 1,
Suppressed = 2,
Recovering = 3,
}
impl MeFamilyRuntimeState {
pub(crate) fn from_u8(value: u8) -> Self {
match value {
1 => Self::Degraded,
2 => Self::Suppressed,
3 => Self::Recovering,
_ => Self::Healthy,
}
}
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::Healthy => "healthy",
Self::Degraded => "degraded",
Self::Suppressed => "suppressed",
Self::Recovering => "recovering",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum MeDrainGateReason {
Open = 0,
CoverageQuorum = 1,
Redundancy = 2,
SuppressionActive = 3,
}
impl MeDrainGateReason {
pub(crate) fn from_u8(value: u8) -> Self {
match value {
1 => Self::CoverageQuorum,
2 => Self::Redundancy,
3 => Self::SuppressionActive,
_ => Self::Open,
}
}
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::Open => "open",
Self::CoverageQuorum => "coverage_quorum",
Self::Redundancy => "redundancy",
Self::SuppressionActive => "suppression_active",
}
}
}
#[derive(Debug, Clone)]
pub struct SecretSnapshot {
pub epoch: u64,
@@ -171,6 +231,7 @@ pub struct MePool {
pub(super) endpoint_quarantine: Arc<Mutex<HashMap<SocketAddr, Instant>>>,
pub(super) kdf_material_fingerprint: Arc<RwLock<HashMap<SocketAddr, (u64, u16)>>>,
pub(super) me_pool_drain_ttl_secs: AtomicU64,
pub(super) me_instadrain: AtomicBool,
pub(super) me_pool_drain_threshold: AtomicU64,
pub(super) me_pool_drain_soft_evict_enabled: AtomicBool,
pub(super) me_pool_drain_soft_evict_grace_secs: AtomicU64,
@@ -193,11 +254,27 @@ pub struct MePool {
pub(super) me_reader_route_data_wait_ms: Arc<AtomicU64>,
pub(super) me_route_no_writer_mode: AtomicU8,
pub(super) me_route_no_writer_wait: Duration,
pub(super) me_route_hybrid_max_wait: Duration,
pub(super) me_route_blocking_send_timeout: Duration,
pub(super) me_route_inline_recovery_attempts: u32,
pub(super) me_route_inline_recovery_wait: Duration,
pub(super) me_health_interval_ms_unhealthy: AtomicU64,
pub(super) me_health_interval_ms_healthy: AtomicU64,
pub(super) me_warn_rate_limit_ms: AtomicU64,
pub(super) me_family_v4_runtime_state: AtomicU8,
pub(super) me_family_v6_runtime_state: AtomicU8,
pub(super) me_family_v4_state_since_epoch_secs: AtomicU64,
pub(super) me_family_v6_state_since_epoch_secs: AtomicU64,
pub(super) me_family_v4_suppressed_until_epoch_secs: AtomicU64,
pub(super) me_family_v6_suppressed_until_epoch_secs: AtomicU64,
pub(super) me_family_v4_fail_streak: AtomicU32,
pub(super) me_family_v6_fail_streak: AtomicU32,
pub(super) me_family_v4_recover_success_streak: AtomicU32,
pub(super) me_family_v6_recover_success_streak: AtomicU32,
pub(super) me_last_drain_gate_route_quorum_ok: AtomicBool,
pub(super) me_last_drain_gate_redundancy_ok: AtomicBool,
pub(super) me_last_drain_gate_block_reason: AtomicU8,
pub(super) me_last_drain_gate_updated_at_epoch_secs: AtomicU64,
pub(super) runtime_ready: AtomicBool,
pool_size: usize,
pub(super) preferred_endpoints_by_dc: Arc<RwLock<HashMap<i32, Vec<SocketAddr>>>>,
@@ -226,6 +303,14 @@ impl MePool {
.as_secs()
}
fn normalize_force_close_secs(force_close_secs: u64) -> u64 {
if force_close_secs == 0 {
ME_FORCE_CLOSE_SAFETY_FALLBACK_SECS
} else {
force_close_secs
}
}
pub fn new(
proxy_tag: Option<Vec<u8>>,
proxy_secret: Vec<u8>,
@@ -277,6 +362,7 @@ impl MePool {
me_adaptive_floor_max_warm_writers_global: u32,
hardswap: bool,
me_pool_drain_ttl_secs: u64,
me_instadrain: bool,
me_pool_drain_threshold: u64,
me_pool_drain_soft_evict_enabled: bool,
me_pool_drain_soft_evict_grace_secs: u64,
@@ -307,6 +393,8 @@ impl MePool {
me_warn_rate_limit_ms: u64,
me_route_no_writer_mode: MeRouteNoWriterMode,
me_route_no_writer_wait_ms: u64,
me_route_hybrid_max_wait_ms: u64,
me_route_blocking_send_timeout_ms: u64,
me_route_inline_recovery_attempts: u32,
me_route_inline_recovery_wait_ms: u64,
) -> Arc<Self> {
@@ -458,6 +546,7 @@ impl MePool {
endpoint_quarantine: Arc::new(Mutex::new(HashMap::new())),
kdf_material_fingerprint: Arc::new(RwLock::new(HashMap::new())),
me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs),
me_instadrain: AtomicBool::new(me_instadrain),
me_pool_drain_threshold: AtomicU64::new(me_pool_drain_threshold),
me_pool_drain_soft_evict_enabled: AtomicBool::new(me_pool_drain_soft_evict_enabled),
me_pool_drain_soft_evict_grace_secs: AtomicU64::new(me_pool_drain_soft_evict_grace_secs),
@@ -470,7 +559,9 @@ impl MePool {
me_pool_drain_soft_evict_cooldown_ms: AtomicU64::new(
me_pool_drain_soft_evict_cooldown_ms.max(1),
),
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs),
me_pool_force_close_secs: AtomicU64::new(Self::normalize_force_close_secs(
me_pool_force_close_secs,
)),
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(
me_pool_min_fresh_ratio,
)),
@@ -490,11 +581,29 @@ impl MePool {
me_reader_route_data_wait_ms: Arc::new(AtomicU64::new(me_reader_route_data_wait_ms)),
me_route_no_writer_mode: AtomicU8::new(me_route_no_writer_mode.as_u8()),
me_route_no_writer_wait: Duration::from_millis(me_route_no_writer_wait_ms),
me_route_hybrid_max_wait: Duration::from_millis(me_route_hybrid_max_wait_ms),
me_route_blocking_send_timeout: Duration::from_millis(
me_route_blocking_send_timeout_ms,
),
me_route_inline_recovery_attempts,
me_route_inline_recovery_wait: Duration::from_millis(me_route_inline_recovery_wait_ms),
me_health_interval_ms_unhealthy: AtomicU64::new(me_health_interval_ms_unhealthy.max(1)),
me_health_interval_ms_healthy: AtomicU64::new(me_health_interval_ms_healthy.max(1)),
me_warn_rate_limit_ms: AtomicU64::new(me_warn_rate_limit_ms.max(1)),
me_family_v4_runtime_state: AtomicU8::new(MeFamilyRuntimeState::Healthy as u8),
me_family_v6_runtime_state: AtomicU8::new(MeFamilyRuntimeState::Healthy as u8),
me_family_v4_state_since_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
me_family_v6_state_since_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
me_family_v4_suppressed_until_epoch_secs: AtomicU64::new(0),
me_family_v6_suppressed_until_epoch_secs: AtomicU64::new(0),
me_family_v4_fail_streak: AtomicU32::new(0),
me_family_v6_fail_streak: AtomicU32::new(0),
me_family_v4_recover_success_streak: AtomicU32::new(0),
me_family_v6_recover_success_streak: AtomicU32::new(0),
me_last_drain_gate_route_quorum_ok: AtomicBool::new(false),
me_last_drain_gate_redundancy_ok: AtomicBool::new(false),
me_last_drain_gate_block_reason: AtomicU8::new(MeDrainGateReason::Open as u8),
me_last_drain_gate_updated_at_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
runtime_ready: AtomicBool::new(false),
preferred_endpoints_by_dc: Arc::new(RwLock::new(preferred_endpoints_by_dc)),
})
@@ -512,10 +621,158 @@ impl MePool {
self.runtime_ready.load(Ordering::Relaxed)
}
pub(super) fn set_family_runtime_state(
&self,
family: IpFamily,
state: MeFamilyRuntimeState,
state_since_epoch_secs: u64,
suppressed_until_epoch_secs: u64,
fail_streak: u32,
recover_success_streak: u32,
) {
match family {
IpFamily::V4 => {
self.me_family_v4_runtime_state
.store(state as u8, Ordering::Relaxed);
self.me_family_v4_state_since_epoch_secs
.store(state_since_epoch_secs, Ordering::Relaxed);
self.me_family_v4_suppressed_until_epoch_secs
.store(suppressed_until_epoch_secs, Ordering::Relaxed);
self.me_family_v4_fail_streak
.store(fail_streak, Ordering::Relaxed);
self.me_family_v4_recover_success_streak
.store(recover_success_streak, Ordering::Relaxed);
}
IpFamily::V6 => {
self.me_family_v6_runtime_state
.store(state as u8, Ordering::Relaxed);
self.me_family_v6_state_since_epoch_secs
.store(state_since_epoch_secs, Ordering::Relaxed);
self.me_family_v6_suppressed_until_epoch_secs
.store(suppressed_until_epoch_secs, Ordering::Relaxed);
self.me_family_v6_fail_streak
.store(fail_streak, Ordering::Relaxed);
self.me_family_v6_recover_success_streak
.store(recover_success_streak, Ordering::Relaxed);
}
}
}
pub(crate) fn family_runtime_state(&self, family: IpFamily) -> MeFamilyRuntimeState {
match family {
IpFamily::V4 => MeFamilyRuntimeState::from_u8(
self.me_family_v4_runtime_state.load(Ordering::Relaxed),
),
IpFamily::V6 => MeFamilyRuntimeState::from_u8(
self.me_family_v6_runtime_state.load(Ordering::Relaxed),
),
}
}
pub(crate) fn family_runtime_state_since_epoch_secs(&self, family: IpFamily) -> u64 {
match family {
IpFamily::V4 => self
.me_family_v4_state_since_epoch_secs
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_state_since_epoch_secs
.load(Ordering::Relaxed),
}
}
pub(crate) fn family_suppressed_until_epoch_secs(&self, family: IpFamily) -> u64 {
match family {
IpFamily::V4 => self
.me_family_v4_suppressed_until_epoch_secs
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_suppressed_until_epoch_secs
.load(Ordering::Relaxed),
}
}
pub(crate) fn family_fail_streak(&self, family: IpFamily) -> u32 {
match family {
IpFamily::V4 => self.me_family_v4_fail_streak.load(Ordering::Relaxed),
IpFamily::V6 => self.me_family_v6_fail_streak.load(Ordering::Relaxed),
}
}
pub(crate) fn family_recover_success_streak(&self, family: IpFamily) -> u32 {
match family {
IpFamily::V4 => self
.me_family_v4_recover_success_streak
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_recover_success_streak
.load(Ordering::Relaxed),
}
}
pub(crate) fn is_family_temporarily_suppressed(
&self,
family: IpFamily,
now_epoch_secs: u64,
) -> bool {
self.family_suppressed_until_epoch_secs(family) > now_epoch_secs
}
pub(super) fn family_enabled_for_drain_coverage(
&self,
family: IpFamily,
now_epoch_secs: u64,
) -> bool {
let configured = match family {
IpFamily::V4 => self.decision.ipv4_me,
IpFamily::V6 => self.decision.ipv6_me,
};
configured && !self.is_family_temporarily_suppressed(family, now_epoch_secs)
}
pub(super) fn set_last_drain_gate(
&self,
route_quorum_ok: bool,
redundancy_ok: bool,
block_reason: MeDrainGateReason,
updated_at_epoch_secs: u64,
) {
self.me_last_drain_gate_route_quorum_ok
.store(route_quorum_ok, Ordering::Relaxed);
self.me_last_drain_gate_redundancy_ok
.store(redundancy_ok, Ordering::Relaxed);
self.me_last_drain_gate_block_reason
.store(block_reason as u8, Ordering::Relaxed);
self.me_last_drain_gate_updated_at_epoch_secs
.store(updated_at_epoch_secs, Ordering::Relaxed);
}
pub(crate) fn last_drain_gate_route_quorum_ok(&self) -> bool {
self.me_last_drain_gate_route_quorum_ok
.load(Ordering::Relaxed)
}
pub(crate) fn last_drain_gate_redundancy_ok(&self) -> bool {
self.me_last_drain_gate_redundancy_ok
.load(Ordering::Relaxed)
}
pub(crate) fn last_drain_gate_block_reason(&self) -> MeDrainGateReason {
MeDrainGateReason::from_u8(
self.me_last_drain_gate_block_reason
.load(Ordering::Relaxed),
)
}
pub(crate) fn last_drain_gate_updated_at_epoch_secs(&self) -> u64 {
self.me_last_drain_gate_updated_at_epoch_secs
.load(Ordering::Relaxed)
}
pub fn update_runtime_reinit_policy(
&self,
hardswap: bool,
drain_ttl_secs: u64,
instadrain: bool,
pool_drain_threshold: u64,
pool_drain_soft_evict_enabled: bool,
pool_drain_soft_evict_grace_secs: u64,
@@ -560,6 +817,7 @@ impl MePool {
self.hardswap.store(hardswap, Ordering::Relaxed);
self.me_pool_drain_ttl_secs
.store(drain_ttl_secs, Ordering::Relaxed);
self.me_instadrain.store(instadrain, Ordering::Relaxed);
self.me_pool_drain_threshold
.store(pool_drain_threshold, Ordering::Relaxed);
self.me_pool_drain_soft_evict_enabled
@@ -574,8 +832,10 @@ impl MePool {
);
self.me_pool_drain_soft_evict_cooldown_ms
.store(pool_drain_soft_evict_cooldown_ms.max(1), Ordering::Relaxed);
self.me_pool_force_close_secs
.store(force_close_secs, Ordering::Relaxed);
self.me_pool_force_close_secs.store(
Self::normalize_force_close_secs(force_close_secs),
Ordering::Relaxed,
);
self.me_pool_min_fresh_ratio_permille
.store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed);
self.me_hardswap_warmup_delay_min_ms
@@ -720,12 +980,9 @@ impl MePool {
}
pub(super) fn force_close_timeout(&self) -> Option<Duration> {
let secs = self.me_pool_force_close_secs.load(Ordering::Relaxed);
if secs == 0 {
None
} else {
Some(Duration::from_secs(secs))
}
let secs =
Self::normalize_force_close_secs(self.me_pool_force_close_secs.load(Ordering::Relaxed));
Some(Duration::from_secs(secs))
}
pub(super) fn drain_soft_evict_enabled(&self) -> bool {
@@ -997,9 +1254,10 @@ impl MePool {
}
pub(super) async fn active_coverage_required_total(&self) -> usize {
let now_epoch_secs = Self::now_epoch_secs();
let mut endpoints_by_dc = HashMap::<i32, HashSet<SocketAddr>>::new();
if self.decision.ipv4_me {
if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map = self.proxy_map_v4.read().await;
for (dc, addrs) in map.iter() {
let entry = endpoints_by_dc.entry(*dc).or_default();
@@ -1009,7 +1267,7 @@ impl MePool {
}
}
if self.decision.ipv6_me {
if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map = self.proxy_map_v6.read().await;
for (dc, addrs) in map.iter() {
let entry = endpoints_by_dc.entry(*dc).or_default();

View File

@@ -74,9 +74,8 @@ impl MePool {
debug!(
%addr,
wait_ms = expiry.saturating_duration_since(now).as_millis(),
"All ME endpoints are quarantined for the DC group; retrying earliest one"
"All ME endpoints are quarantined for the DC group; waiting for quarantine expiry"
);
return vec![addr];
}
Vec::new()
@@ -165,9 +164,10 @@ impl MePool {
}
async fn endpoints_for_dc(&self, target_dc: i32) -> Vec<SocketAddr> {
let now_epoch_secs = Self::now_epoch_secs();
let mut endpoints = HashSet::<SocketAddr>::new();
if self.decision.ipv4_me {
if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map = self.proxy_map_v4.read().await;
if let Some(addrs) = map.get(&target_dc) {
for (ip, port) in addrs {
@@ -176,7 +176,7 @@ impl MePool {
}
}
if self.decision.ipv6_me {
if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map = self.proxy_map_v6.read().await;
if let Some(addrs) = map.get(&target_dc) {
for (ip, port) in addrs {

View File

@@ -11,8 +11,9 @@ use tracing::{debug, info, warn};
use std::collections::hash_map::DefaultHasher;
use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use super::pool::{MePool, WriterContour};
use super::pool::{MeDrainGateReason, MePool, WriterContour};
const ME_HARDSWAP_PENDING_TTL_SECS: u64 = 1800;
@@ -120,9 +121,10 @@ impl MePool {
}
async fn desired_dc_endpoints(&self) -> HashMap<i32, HashSet<SocketAddr>> {
let now_epoch_secs = Self::now_epoch_secs();
let mut out: HashMap<i32, HashSet<SocketAddr>> = HashMap::new();
if self.decision.ipv4_me {
if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map_v4 = self.proxy_map_v4.read().await.clone();
for (dc, addrs) in map_v4 {
let entry = out.entry(dc).or_default();
@@ -132,7 +134,7 @@ impl MePool {
}
}
if self.decision.ipv6_me {
if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map_v6 = self.proxy_map_v6.read().await.clone();
for (dc, addrs) in map_v6 {
let entry = out.entry(dc).or_default();
@@ -313,13 +315,23 @@ impl MePool {
pub async fn zero_downtime_reinit_after_map_change(self: &Arc<Self>, rng: &SecureRandom) {
let desired_by_dc = self.desired_dc_endpoints().await;
let now_epoch_secs = Self::now_epoch_secs();
let v4_suppressed = self.is_family_temporarily_suppressed(IpFamily::V4, now_epoch_secs);
let v6_suppressed = self.is_family_temporarily_suppressed(IpFamily::V6, now_epoch_secs);
if desired_by_dc.is_empty() {
warn!("ME endpoint map is empty; skipping stale writer drain");
let reason = if (self.decision.ipv4_me && v4_suppressed)
|| (self.decision.ipv6_me && v6_suppressed)
{
MeDrainGateReason::SuppressionActive
} else {
MeDrainGateReason::CoverageQuorum
};
self.set_last_drain_gate(false, false, reason, now_epoch_secs);
return;
}
let desired_map_hash = Self::desired_map_hash(&desired_by_dc);
let now_epoch_secs = Self::now_epoch_secs();
let previous_generation = self.current_generation();
let hardswap = self.hardswap.load(Ordering::Relaxed);
let generation = if hardswap {
@@ -390,7 +402,17 @@ impl MePool {
.load(Ordering::Relaxed),
);
let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs);
let mut route_quorum_ok = coverage_ratio >= min_ratio;
let mut redundancy_ok = missing_dc.is_empty();
let mut redundancy_missing_dc = missing_dc.clone();
let mut gate_coverage_ratio = coverage_ratio;
if !hardswap && coverage_ratio < min_ratio {
self.set_last_drain_gate(
false,
redundancy_ok,
MeDrainGateReason::CoverageQuorum,
now_epoch_secs,
);
warn!(
previous_generation,
generation,
@@ -411,7 +433,17 @@ impl MePool {
.collect();
let (fresh_coverage_ratio, fresh_missing_dc) =
Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs);
if !fresh_missing_dc.is_empty() {
route_quorum_ok = fresh_coverage_ratio >= min_ratio;
redundancy_ok = fresh_missing_dc.is_empty();
redundancy_missing_dc = fresh_missing_dc.clone();
gate_coverage_ratio = fresh_coverage_ratio;
if fresh_coverage_ratio < min_ratio {
self.set_last_drain_gate(
false,
redundancy_ok,
MeDrainGateReason::CoverageQuorum,
now_epoch_secs,
);
warn!(
previous_generation,
generation,
@@ -421,13 +453,16 @@ impl MePool {
);
return;
}
} else if !missing_dc.is_empty() {
}
self.set_last_drain_gate(route_quorum_ok, redundancy_ok, MeDrainGateReason::Open, now_epoch_secs);
if !redundancy_ok {
warn!(
missing_dc = ?missing_dc,
// Keep stale writers alive when fresh coverage is incomplete.
"ME reinit coverage incomplete; keeping stale writers"
missing_dc = ?redundancy_missing_dc,
coverage_ratio = format_args!("{gate_coverage_ratio:.3}"),
min_ratio = format_args!("{min_ratio:.3}"),
"ME reinit proceeds with weighted quorum while some DC groups remain uncovered"
);
return;
}
if hardswap {

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap;
use std::time::Instant;
use super::pool::{MePool, RefillDcKey};
use super::pool::{MeDrainGateReason, MePool, RefillDcKey};
use crate::network::IpFamily;
#[derive(Clone, Debug)]
@@ -36,6 +36,24 @@ pub(crate) struct MeApiNatStunSnapshot {
pub stun_backoff_remaining_ms: Option<u64>,
}
#[derive(Clone, Debug)]
pub(crate) struct MeApiFamilyStateSnapshot {
pub family: &'static str,
pub state: &'static str,
pub state_since_epoch_secs: u64,
pub suppressed_until_epoch_secs: Option<u64>,
pub fail_streak: u32,
pub recover_success_streak: u32,
}
#[derive(Clone, Debug)]
pub(crate) struct MeApiDrainGateSnapshot {
pub route_quorum_ok: bool,
pub redundancy_ok: bool,
pub block_reason: &'static str,
pub updated_at_epoch_secs: u64,
}
impl MePool {
pub(crate) async fn api_refill_snapshot(&self) -> MeApiRefillSnapshot {
let inflight_endpoints_total = self.refill_inflight.lock().await.len();
@@ -125,4 +143,35 @@ impl MePool {
stun_backoff_remaining_ms,
}
}
pub(crate) fn api_family_state_snapshot(&self) -> Vec<MeApiFamilyStateSnapshot> {
[IpFamily::V4, IpFamily::V6]
.into_iter()
.map(|family| {
let state = self.family_runtime_state(family);
let suppressed_until = self.family_suppressed_until_epoch_secs(family);
MeApiFamilyStateSnapshot {
family: match family {
IpFamily::V4 => "v4",
IpFamily::V6 => "v6",
},
state: state.as_str(),
state_since_epoch_secs: self.family_runtime_state_since_epoch_secs(family),
suppressed_until_epoch_secs: (suppressed_until != 0).then_some(suppressed_until),
fail_streak: self.family_fail_streak(family),
recover_success_streak: self.family_recover_success_streak(family),
}
})
.collect()
}
pub(crate) fn api_drain_gate_snapshot(&self) -> MeApiDrainGateSnapshot {
let reason: MeDrainGateReason = self.last_drain_gate_block_reason();
MeApiDrainGateSnapshot {
route_quorum_ok: self.last_drain_gate_route_quorum_ok(),
redundancy_ok: self.last_drain_gate_redundancy_ok(),
block_reason: reason.as_str(),
updated_at_epoch_secs: self.last_drain_gate_updated_at_epoch_secs(),
}
}
}

View File

@@ -126,6 +126,7 @@ pub(crate) struct MeApiRuntimeSnapshot {
pub me_reconnect_backoff_cap_ms: u64,
pub me_reconnect_fast_retry_count: u32,
pub me_pool_drain_ttl_secs: u64,
pub me_instadrain: bool,
pub me_pool_drain_soft_evict_enabled: bool,
pub me_pool_drain_soft_evict_grace_secs: u64,
pub me_pool_drain_soft_evict_per_writer: u8,
@@ -583,6 +584,7 @@ impl MePool {
me_reconnect_backoff_cap_ms: self.me_reconnect_backoff_cap.as_millis() as u64,
me_reconnect_fast_retry_count: self.me_reconnect_fast_retry_count,
me_pool_drain_ttl_secs: self.me_pool_drain_ttl_secs.load(Ordering::Relaxed),
me_instadrain: self.me_instadrain.load(Ordering::Relaxed),
me_pool_drain_soft_evict_enabled: self
.me_pool_drain_soft_evict_enabled
.load(Ordering::Relaxed),

View File

@@ -8,6 +8,7 @@ use bytes::Bytes;
use bytes::BytesMut;
use rand::Rng;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError;
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
@@ -15,11 +16,13 @@ use crate::config::MeBindStaleMode;
use crate::crypto::SecureRandom;
use crate::error::{ProxyError, Result};
use crate::protocol::constants::{RPC_CLOSE_EXT_U32, RPC_PING_U32};
use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason,
};
use super::codec::{RpcWriter, WriterCommand};
use super::pool::{MePool, MeWriter, WriterContour};
use super::reader::reader_loop;
use super::registry::BoundConn;
use super::wire::build_proxy_req_payload;
const ME_ACTIVE_PING_SECS: u64 = 25;
@@ -27,6 +30,12 @@ const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
const ME_IDLE_KEEPALIVE_MAX_SECS: u64 = 5;
const ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS: u64 = 700;
#[derive(Clone, Copy)]
enum WriterRemoveGuardMode {
Any,
DrainingOnly,
}
fn is_me_peer_closed_error(error: &ProxyError) -> bool {
matches!(error, ProxyError::Io(ioe) if ioe.kind() == ErrorKind::UnexpectedEof)
}
@@ -43,9 +52,16 @@ impl MePool {
for writer_id in closed_writer_ids {
if self.registry.is_writer_empty(writer_id).await {
let _ = self.remove_writer_only(writer_id).await;
let _ = self
.remove_writer_only(writer_id, MeWriterTeardownReason::PruneClosedWriter)
.await;
} else {
let _ = self.remove_writer_and_close_clients(writer_id).await;
let _ = self
.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::PruneClosedWriter,
)
.await;
}
}
}
@@ -142,6 +158,9 @@ impl MePool {
crc_mode: hs.crc_mode,
};
let cancel_wr = cancel.clone();
let cleanup_done = Arc::new(AtomicBool::new(false));
let cleanup_for_writer = cleanup_done.clone();
let pool_writer_task = Arc::downgrade(self);
tokio::spawn(async move {
loop {
tokio::select! {
@@ -159,6 +178,20 @@ impl MePool {
_ = cancel_wr.cancelled() => break,
}
}
if cleanup_for_writer
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
if let Some(pool) = pool_writer_task.upgrade() {
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::WriterTaskExit,
)
.await;
} else {
cancel_wr.cancel();
}
}
});
let writer = MeWriter {
id: writer_id,
@@ -195,7 +228,6 @@ impl MePool {
let cancel_ping = cancel.clone();
let tx_ping = tx.clone();
let ping_tracker_ping = ping_tracker.clone();
let cleanup_done = Arc::new(AtomicBool::new(false));
let cleanup_for_reader = cleanup_done.clone();
let cleanup_for_ping = cleanup_done.clone();
let keepalive_enabled = self.me_keepalive_enabled;
@@ -241,21 +273,29 @@ impl MePool {
stats_reader_close.increment_me_idle_close_by_peer_total();
info!(writer_id, "ME socket closed by peer on idle writer");
}
if let Some(pool) = pool.upgrade()
&& cleanup_for_reader
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
if cleanup_for_reader
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
if let Some(pool) = pool.upgrade() {
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::ReaderExit,
)
.await;
} else {
// Fallback for shutdown races: make writer task exit quickly so stale
// channels are observable by periodic prune.
cancel_reader_token.cancel();
}
}
if let Err(e) = res {
if !idle_close_by_peer {
warn!(error = %e, "ME reader ended");
}
}
let mut ws = writers_arc.write().await;
ws.retain(|w| w.id != writer_id);
info!(remaining = ws.len(), "Dead ME writer removed from pool");
let remaining = writers_arc.read().await.len();
debug!(writer_id, remaining, "ME reader task finished");
});
let pool_ping = Arc::downgrade(self);
@@ -312,41 +352,28 @@ impl MePool {
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_PING_U32.to_le_bytes());
p.extend_from_slice(&sent_id.to_le_bytes());
{
let mut tracker = ping_tracker_ping.lock().await;
let now_epoch_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let mut run_cleanup = false;
if let Some(pool) = pool_ping.upgrade() {
let last_cleanup_ms = pool
let now_epoch_ms = std::time::SystemTime::now()
.duration_since(std::time::UNIX_EPOCH)
.unwrap_or_default()
.as_millis() as u64;
let mut run_cleanup = false;
if let Some(pool) = pool_ping.upgrade() {
let last_cleanup_ms = pool
.ping_tracker_last_cleanup_epoch_ms
.load(Ordering::Relaxed);
if now_epoch_ms.saturating_sub(last_cleanup_ms) >= 30_000
&& pool
.ping_tracker_last_cleanup_epoch_ms
.load(Ordering::Relaxed);
if now_epoch_ms.saturating_sub(last_cleanup_ms) >= 30_000
&& pool
.ping_tracker_last_cleanup_epoch_ms
.compare_exchange(
last_cleanup_ms,
now_epoch_ms,
Ordering::AcqRel,
Ordering::Relaxed,
)
.is_ok()
{
run_cleanup = true;
}
.compare_exchange(
last_cleanup_ms,
now_epoch_ms,
Ordering::AcqRel,
Ordering::Relaxed,
)
.is_ok()
{
run_cleanup = true;
}
if run_cleanup {
let before = tracker.len();
tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120));
let expired = before.saturating_sub(tracker.len());
if expired > 0 {
stats_ping.increment_me_keepalive_timeout_by(expired as u64);
}
}
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
}
ping_id = ping_id.wrapping_add(1);
stats_ping.increment_me_keepalive_sent();
@@ -363,10 +390,24 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::PingSendFail,
)
.await;
}
break;
}
let mut tracker = ping_tracker_ping.lock().await;
if run_cleanup {
let before = tracker.len();
tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120));
let expired = before.saturating_sub(tracker.len());
if expired > 0 {
stats_ping.increment_me_keepalive_timeout_by(expired as u64);
}
}
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
}
});
@@ -446,7 +487,11 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::SignalSendFail,
)
.await;
}
break;
}
@@ -480,7 +525,11 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
pool.remove_writer_and_close_clients(writer_id).await;
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::SignalSendFail,
)
.await;
}
break;
}
@@ -493,23 +542,83 @@ impl MePool {
Ok(())
}
pub(crate) async fn remove_writer_and_close_clients(self: &Arc<Self>, writer_id: u64) {
let conns = self.remove_writer_only(writer_id).await;
for bound in conns {
let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await;
let _ = self.registry.unregister(bound.conn_id).await;
}
pub(crate) async fn remove_writer_and_close_clients(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
// Full client cleanup now happens inside `registry.writer_lost` to keep
// writer reap/remove paths strictly non-blocking per connection.
self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::Normal,
WriterRemoveGuardMode::Any,
)
.await
}
async fn remove_writer_only(self: &Arc<Self>, writer_id: u64) -> Vec<BoundConn> {
pub(super) async fn remove_draining_writer_hard_detach(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::HardDetach,
WriterRemoveGuardMode::DrainingOnly,
)
.await
}
async fn remove_writer_only(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::Normal,
WriterRemoveGuardMode::Any,
)
.await
}
// Authoritative teardown primitive shared by normal cleanup and watchdog path.
// Lock-order invariant:
// 1) mutate `writers` under pool write lock,
// 2) release pool lock,
// 3) run registry/metrics/refill side effects.
// `registry.writer_lost` must never run while `writers` lock is held.
async fn remove_writer_with_mode(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
guard_mode: WriterRemoveGuardMode,
) -> bool {
let started_at = Instant::now();
self.stats
.increment_me_writer_teardown_attempt_total(reason, mode);
let mut close_tx: Option<mpsc::Sender<WriterCommand>> = None;
let mut removed_addr: Option<SocketAddr> = None;
let mut removed_dc: Option<i32> = None;
let mut removed_uptime: Option<Duration> = None;
let mut trigger_refill = false;
let mut removed = false;
{
let mut ws = self.writers.write().await;
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
if matches!(guard_mode, WriterRemoveGuardMode::DrainingOnly)
&& !ws[pos].draining.load(Ordering::Relaxed)
{
self.stats.increment_me_writer_teardown_noop_total();
self.stats
.observe_me_writer_teardown_duration(mode, started_at.elapsed());
return false;
}
let w = ws.remove(pos);
let was_draining = w.draining.load(Ordering::Relaxed);
if was_draining {
@@ -526,27 +635,65 @@ impl MePool {
}
close_tx = Some(w.tx.clone());
self.conn_count.fetch_sub(1, Ordering::Relaxed);
removed = true;
}
}
let conns = self.registry.writer_lost(writer_id).await;
// State invariant:
// - writer is removed from `self.writers` (pool visibility),
// - writer is removed from registry routing/binding maps via `writer_lost`.
// The close command below is only a best-effort accelerator for task shutdown.
// Cleanup progress must never depend on command-channel availability.
let _ = self.registry.writer_lost(writer_id).await;
{
let mut tracker = self.ping_tracker.lock().await;
tracker.retain(|_, (_, wid)| *wid != writer_id);
}
self.rtt_stats.lock().await.remove(&writer_id);
if let Some(tx) = close_tx {
let _ = tx.send(WriterCommand::Close).await;
match tx.try_send(WriterCommand::Close) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
self.stats.increment_me_writer_close_signal_drop_total();
self.stats
.increment_me_writer_close_signal_channel_full_total();
self.stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull,
);
debug!(
writer_id,
"Skipping close signal for removed writer: command channel is full"
);
}
Err(TrySendError::Closed(_)) => {
self.stats.increment_me_writer_close_signal_drop_total();
self.stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelClosed,
);
debug!(
writer_id,
"Skipping close signal for removed writer: command channel is closed"
);
}
}
}
if trigger_refill
&& let Some(addr) = removed_addr
&& let Some(writer_dc) = removed_dc
{
if let Some(addr) = removed_addr {
if let Some(uptime) = removed_uptime {
self.maybe_quarantine_flapping_endpoint(addr, uptime).await;
}
self.trigger_immediate_refill_for_dc(addr, writer_dc);
if trigger_refill
&& let Some(writer_dc) = removed_dc
{
self.trigger_immediate_refill_for_dc(addr, writer_dc);
}
}
conns
if removed {
self.stats.increment_me_writer_teardown_success_total(mode);
} else {
self.stats.increment_me_writer_teardown_noop_total();
}
self.stats
.observe_me_writer_teardown_duration(mode, started_at.elapsed());
removed
}
pub(crate) async fn mark_writer_draining_with_timeout(

View File

@@ -8,6 +8,7 @@ use bytes::{Bytes, BytesMut};
use tokio::io::AsyncReadExt;
use tokio::net::TcpStream;
use tokio::sync::{Mutex, mpsc};
use tokio::sync::mpsc::error::TrySendError;
use tokio_util::sync::CancellationToken;
use tracing::{debug, trace, warn};
@@ -173,12 +174,12 @@ pub(crate) async fn reader_loop(
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
debug!(cid, "RPC_CLOSE_EXT from ME");
reg.route(cid, MeResponse::Close).await;
let _ = reg.route_nowait(cid, MeResponse::Close).await;
reg.unregister(cid).await;
} else if pt == RPC_CLOSE_CONN_U32 && body.len() >= 8 {
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
debug!(cid, "RPC_CLOSE_CONN from ME");
reg.route(cid, MeResponse::Close).await;
let _ = reg.route_nowait(cid, MeResponse::Close).await;
reg.unregister(cid).await;
} else if pt == RPC_PING_U32 && body.len() >= 8 {
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
@@ -186,13 +187,15 @@ pub(crate) async fn reader_loop(
let mut pong = Vec::with_capacity(12);
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
pong.extend_from_slice(&ping_id.to_le_bytes());
if tx
.send(WriterCommand::DataAndFlush(Bytes::from(pong)))
.await
.is_err()
{
warn!("PONG send failed");
break;
match tx.try_send(WriterCommand::DataAndFlush(Bytes::from(pong))) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
debug!(ping_id, "PONG dropped: writer command channel is full");
}
Err(TrySendError::Closed(_)) => {
warn!("PONG send failed: writer channel closed");
break;
}
}
} else if pt == RPC_PONG_U32 && body.len() >= 8 {
let ping_id = i64::from_le_bytes(body[0..8].try_into().unwrap());
@@ -232,6 +235,13 @@ async fn send_close_conn(tx: &mpsc::Sender<WriterCommand>, conn_id: u64) {
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_CLOSE_CONN_U32.to_le_bytes());
p.extend_from_slice(&conn_id.to_le_bytes());
let _ = tx.send(WriterCommand::DataAndFlush(Bytes::from(p))).await;
match tx.try_send(WriterCommand::DataAndFlush(Bytes::from(p))) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
debug!(conn_id, "ME close_conn signal skipped: writer command channel is full");
}
Err(TrySendError::Closed(_)) => {
debug!(conn_id, "ME close_conn signal skipped: writer command channel is closed");
}
}
}

View File

@@ -169,6 +169,7 @@ impl ConnRegistry {
None
}
#[allow(dead_code)]
pub async fn route(&self, id: u64, resp: MeResponse) -> RouteResult {
let tx = {
let inner = self.inner.read().await;
@@ -445,30 +446,38 @@ impl ConnRegistry {
}
pub async fn writer_lost(&self, writer_id: u64) -> Vec<BoundConn> {
let mut inner = self.inner.write().await;
inner.writers.remove(&writer_id);
inner.last_meta_for_writer.remove(&writer_id);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
let conns = inner
.conns_for_writer
.remove(&writer_id)
.unwrap_or_default()
.into_iter()
.collect::<Vec<_>>();
let mut close_txs = Vec::<mpsc::Sender<MeResponse>>::new();
let mut out = Vec::new();
for conn_id in conns {
if inner.writer_for_conn.get(&conn_id).copied() != Some(writer_id) {
continue;
}
inner.writer_for_conn.remove(&conn_id);
if let Some(m) = inner.meta.get(&conn_id) {
out.push(BoundConn {
conn_id,
meta: m.clone(),
});
{
let mut inner = self.inner.write().await;
inner.writers.remove(&writer_id);
inner.last_meta_for_writer.remove(&writer_id);
inner.writer_idle_since_epoch_secs.remove(&writer_id);
let conns = inner
.conns_for_writer
.remove(&writer_id)
.unwrap_or_default()
.into_iter()
.collect::<Vec<_>>();
for conn_id in conns {
if inner.writer_for_conn.get(&conn_id).copied() != Some(writer_id) {
continue;
}
inner.writer_for_conn.remove(&conn_id);
if let Some(client_tx) = inner.map.remove(&conn_id) {
close_txs.push(client_tx);
}
if let Some(meta) = inner.meta.remove(&conn_id) {
out.push(BoundConn { conn_id, meta });
}
}
}
for client_tx in close_txs {
let _ = client_tx.try_send(MeResponse::Close);
}
out
}
@@ -491,6 +500,7 @@ impl ConnRegistry {
#[cfg(test)]
mod tests {
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::time::Duration;
use super::ConnMeta;
use super::ConnRegistry;
@@ -663,6 +673,39 @@ mod tests {
assert!(registry.is_writer_empty(20).await);
}
#[tokio::test]
async fn writer_lost_removes_bound_conn_from_registry_and_signals_close() {
let registry = ConnRegistry::new();
let (conn_id, mut rx) = registry.register().await;
let (writer_tx, _writer_rx) = tokio::sync::mpsc::channel(8);
registry.register_writer(10, writer_tx).await;
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::LOCALHOST), 443);
assert!(
registry
.bind_writer(
conn_id,
10,
ConnMeta {
target_dc: 2,
client_addr: addr,
our_addr: addr,
proto_flags: 0,
},
)
.await
);
let lost = registry.writer_lost(10).await;
assert_eq!(lost.len(), 1);
assert_eq!(lost[0].conn_id, conn_id);
assert!(registry.get_writer(conn_id).await.is_none());
assert!(registry.get_meta(conn_id).await.is_none());
assert_eq!(registry.unregister(conn_id).await, None);
let close = tokio::time::timeout(Duration::from_millis(50), rx.recv()).await;
assert!(matches!(close, Ok(Some(MeResponse::Close))));
}
#[tokio::test]
async fn bind_writer_rejects_unregistered_writer() {
let registry = ConnRegistry::new();

View File

@@ -6,6 +6,7 @@ use std::sync::atomic::Ordering;
use std::time::{Duration, Instant};
use bytes::Bytes;
use tokio::sync::mpsc;
use tokio::sync::mpsc::error::TrySendError;
use tracing::{debug, warn};
@@ -13,6 +14,7 @@ use crate::config::{MeRouteNoWriterMode, MeWriterPickMode};
use crate::error::{ProxyError, Result};
use crate::network::IpFamily;
use crate::protocol::constants::{RPC_CLOSE_CONN_U32, RPC_CLOSE_EXT_U32};
use crate::stats::MeWriterTeardownReason;
use super::MePool;
use super::codec::WriterCommand;
@@ -29,6 +31,29 @@ const PICK_PENALTY_DRAINING: u64 = 600;
const PICK_PENALTY_STALE: u64 = 300;
const PICK_PENALTY_DEGRADED: u64 = 250;
enum TimedSendError<T> {
Closed(T),
Timeout(T),
}
async fn send_writer_command_with_timeout(
tx: &mpsc::Sender<WriterCommand>,
cmd: WriterCommand,
timeout: Duration,
) -> std::result::Result<(), TimedSendError<WriterCommand>> {
if timeout.is_zero() {
return tx.send(cmd).await.map_err(|err| TimedSendError::Closed(err.0));
}
match tokio::time::timeout(timeout, tx.reserve()).await {
Ok(Ok(permit)) => {
permit.send(cmd);
Ok(())
}
Ok(Err(_)) => Err(TimedSendError::Closed(cmd)),
Err(_) => Err(TimedSendError::Timeout(cmd)),
}
}
impl MePool {
/// Send RPC_PROXY_REQ. `tag_override`: per-user ad_tag (from access.user_ad_tags); if None, uses pool default.
pub async fn send_proxy_req(
@@ -78,8 +103,18 @@ impl MePool {
let mut hybrid_last_recovery_at: Option<Instant> = None;
let hybrid_wait_step = self.me_route_no_writer_wait.max(Duration::from_millis(50));
let mut hybrid_wait_current = hybrid_wait_step;
let hybrid_deadline = Instant::now() + self.me_route_hybrid_max_wait;
loop {
if matches!(no_writer_mode, MeRouteNoWriterMode::HybridAsyncPersistent)
&& Instant::now() >= hybrid_deadline
{
self.stats.increment_me_no_writer_failfast_total();
return Err(ProxyError::Proxy(
"No ME writer available in hybrid wait window".into(),
));
}
let mut skip_writer_id: Option<u64> = None;
let current_meta = self
.registry
.get_meta(conn_id)
@@ -90,16 +125,42 @@ impl MePool {
match current.tx.try_send(WriterCommand::Data(current_payload.clone())) {
Ok(()) => return Ok(()),
Err(TrySendError::Full(cmd)) => {
if current.tx.send(cmd).await.is_ok() {
return Ok(());
match send_writer_command_with_timeout(
&current.tx,
cmd,
self.me_route_blocking_send_timeout,
)
.await
{
Ok(()) => return Ok(()),
Err(TimedSendError::Closed(_)) => {
warn!(writer_id = current.writer_id, "ME writer channel closed");
self.remove_writer_and_close_clients(
current.writer_id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue;
}
Err(TimedSendError::Timeout(_)) => {
debug!(
conn_id,
writer_id = current.writer_id,
timeout_ms = self.me_route_blocking_send_timeout.as_millis()
as u64,
"ME writer send timed out for bound writer, trying reroute"
);
skip_writer_id = Some(current.writer_id);
}
}
warn!(writer_id = current.writer_id, "ME writer channel closed");
self.remove_writer_and_close_clients(current.writer_id).await;
continue;
}
Err(TrySendError::Closed(_)) => {
warn!(writer_id = current.writer_id, "ME writer channel closed");
self.remove_writer_and_close_clients(current.writer_id).await;
self.remove_writer_and_close_clients(
current.writer_id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue;
}
}
@@ -200,6 +261,9 @@ impl MePool {
.candidate_indices_for_dc(&writers_snapshot, routed_dc, true)
.await;
}
if let Some(skip_writer_id) = skip_writer_id {
candidate_indices.retain(|idx| writers_snapshot[*idx].id != skip_writer_id);
}
if candidate_indices.is_empty() {
let pick_mode = self.writer_pick_mode();
match no_writer_mode {
@@ -403,7 +467,11 @@ impl MePool {
Err(TrySendError::Closed(_)) => {
self.stats.increment_me_writer_pick_closed_total(pick_mode);
warn!(writer_id = w.id, "ME writer channel closed");
self.remove_writer_and_close_clients(w.id).await;
self.remove_writer_and_close_clients(
w.id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue;
}
}
@@ -422,7 +490,13 @@ impl MePool {
self.stats.increment_me_writer_pick_blocking_fallback_total();
let effective_our_addr = SocketAddr::new(w.source_ip, our_addr.port());
let (payload, meta) = build_routed_payload(effective_our_addr);
match w.tx.send(WriterCommand::Data(payload.clone())).await {
match send_writer_command_with_timeout(
&w.tx,
WriterCommand::Data(payload.clone()),
self.me_route_blocking_send_timeout,
)
.await
{
Ok(()) => {
self.stats
.increment_me_writer_pick_success_fallback_total(pick_mode);
@@ -439,10 +513,23 @@ impl MePool {
}
return Ok(());
}
Err(_) => {
Err(TimedSendError::Closed(_)) => {
self.stats.increment_me_writer_pick_closed_total(pick_mode);
warn!(writer_id = w.id, "ME writer channel closed (blocking)");
self.remove_writer_and_close_clients(w.id).await;
self.remove_writer_and_close_clients(
w.id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
}
Err(TimedSendError::Timeout(_)) => {
self.stats.increment_me_writer_pick_full_total(pick_mode);
debug!(
conn_id,
writer_id = w.id,
timeout_ms = self.me_route_blocking_send_timeout.as_millis() as u64,
"ME writer blocking fallback send timed out"
);
}
}
}
@@ -573,13 +660,23 @@ impl MePool {
let mut p = Vec::with_capacity(12);
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
p.extend_from_slice(&conn_id.to_le_bytes());
if w.tx
.send(WriterCommand::DataAndFlush(Bytes::from(p)))
.await
.is_err()
{
debug!("ME close write failed");
self.remove_writer_and_close_clients(w.writer_id).await;
match w.tx.try_send(WriterCommand::DataAndFlush(Bytes::from(p))) {
Ok(()) => {}
Err(TrySendError::Full(_)) => {
debug!(
conn_id,
writer_id = w.writer_id,
"ME close skipped: writer command channel is full"
);
}
Err(TrySendError::Closed(_)) => {
debug!("ME close write failed");
self.remove_writer_and_close_clients(
w.writer_id,
MeWriterTeardownReason::CloseRpcChannelClosed,
)
.await;
}
}
} else {
debug!(conn_id, "ME close skipped (writer missing)");
@@ -596,8 +693,12 @@ impl MePool {
p.extend_from_slice(&conn_id.to_le_bytes());
match w.tx.try_send(WriterCommand::DataAndFlush(Bytes::from(p))) {
Ok(()) => {}
Err(TrySendError::Full(cmd)) => {
let _ = tokio::time::timeout(Duration::from_millis(50), w.tx.send(cmd)).await;
Err(TrySendError::Full(_)) => {
debug!(
conn_id,
writer_id = w.writer_id,
"ME close_conn skipped: writer command channel is full"
);
}
Err(TrySendError::Closed(_)) => {
debug!(conn_id, "ME close_conn skipped: writer channel closed");

View File

@@ -2,6 +2,7 @@
pub mod pool;
pub mod proxy_protocol;
pub mod shadowsocks;
pub mod socket;
pub mod socks;
pub mod upstream;
@@ -14,5 +15,8 @@ pub use socket::*;
#[allow(unused_imports)]
pub use socks::*;
#[allow(unused_imports)]
pub use upstream::{DcPingResult, StartupPingResult, UpstreamEgressInfo, UpstreamManager, UpstreamRouteKind};
pub use upstream::{
DcPingResult, StartupPingResult, UpstreamEgressInfo, UpstreamManager, UpstreamRouteKind,
UpstreamStream,
};
pub mod middle_proxy;

View File

@@ -0,0 +1,60 @@
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;
use shadowsocks::{
ProxyClientStream,
config::{ServerConfig, ServerType},
context::Context,
net::ConnectOpts,
};
use crate::error::{ProxyError, Result};
pub(crate) type ShadowsocksStream = ProxyClientStream<shadowsocks::net::TcpStream>;
fn parse_server_config(url: &str, connect_timeout: Duration) -> Result<ServerConfig> {
let mut config = ServerConfig::from_url(url)
.map_err(|error| ProxyError::Config(format!("invalid shadowsocks url: {error}")))?;
if config.plugin().is_some() {
return Err(ProxyError::Config(
"shadowsocks plugins are not supported".to_string(),
));
}
config.set_timeout(connect_timeout);
Ok(config)
}
pub(crate) fn sanitize_shadowsocks_url(url: &str) -> Result<String> {
Ok(parse_server_config(url, Duration::from_secs(1))?
.addr()
.to_string())
}
fn connect_opts_for_interface(interface: &Option<String>) -> ConnectOpts {
let mut opts = ConnectOpts::default();
if let Some(interface) = interface {
if let Ok(ip) = interface.parse::<IpAddr>() {
opts.bind_local_addr = Some(SocketAddr::new(ip, 0));
} else {
opts.bind_interface = Some(interface.clone());
}
}
opts
}
pub(crate) async fn connect_shadowsocks(
url: &str,
interface: &Option<String>,
target: SocketAddr,
connect_timeout: Duration,
) -> Result<ShadowsocksStream> {
let config = parse_server_config(url, connect_timeout)?;
let context = Context::new_shared(ServerType::Local);
let opts = connect_opts_for_interface(interface);
ProxyClientStream::connect_with_opts(context, &config, target, &opts)
.await
.map_err(ProxyError::Io)
}

View File

@@ -4,22 +4,28 @@
#![allow(deprecated)]
use rand::Rng;
use std::collections::{BTreeSet, HashMap};
use std::net::{SocketAddr, IpAddr};
use std::net::{IpAddr, SocketAddr};
use std::pin::Pin;
use std::sync::Arc;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::task::{Context, Poll};
use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio::sync::RwLock;
use tokio::time::Instant;
use rand::Rng;
use tracing::{debug, warn, info, trace};
use tracing::{debug, info, trace, warn};
use crate::config::{UpstreamConfig, UpstreamType};
use crate::error::{Result, ProxyError};
use crate::error::{ProxyError, Result};
use crate::network::dns_overrides::{resolve_socket_addr, split_host_port};
use crate::protocol::constants::{TG_DATACENTERS_V4, TG_DATACENTERS_V6, TG_DATACENTER_PORT};
use crate::protocol::constants::{TG_DATACENTER_PORT, TG_DATACENTERS_V4, TG_DATACENTERS_V6};
use crate::stats::Stats;
use crate::transport::shadowsocks::{
ShadowsocksStream, connect_shadowsocks, sanitize_shadowsocks_url,
};
use crate::transport::socket::{create_outgoing_socket_bound, resolve_interface_ip};
use crate::transport::socks::{connect_socks4, connect_socks5};
@@ -47,7 +53,10 @@ struct LatencyEma {
impl LatencyEma {
const fn new(alpha: f64) -> Self {
Self { value_ms: None, alpha }
Self {
value_ms: None,
alpha,
}
}
fn update(&mut self, sample_ms: f64) {
@@ -131,11 +140,17 @@ impl UpstreamState {
return Some(ms);
}
let (sum, count) = self.dc_latency.iter()
let (sum, count) = self
.dc_latency
.iter()
.filter_map(|l| l.get())
.fold((0.0, 0u32), |(s, c), v| (s + v, c + 1));
if count > 0 { Some(sum / count as f64) } else { None }
if count > 0 {
Some(sum / count as f64)
} else {
None
}
}
}
@@ -158,11 +173,78 @@ pub struct StartupPingResult {
pub both_available: bool,
}
pub enum UpstreamStream {
Tcp(TcpStream),
Shadowsocks(Box<ShadowsocksStream>),
}
impl std::fmt::Debug for UpstreamStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Tcp(_) => f.write_str("UpstreamStream::Tcp(..)"),
Self::Shadowsocks(_) => f.write_str("UpstreamStream::Shadowsocks(..)"),
}
}
}
impl UpstreamStream {
pub fn into_tcp(self) -> Result<TcpStream> {
match self {
Self::Tcp(stream) => Ok(stream),
Self::Shadowsocks(_) => Err(ProxyError::Config(
"shadowsocks upstreams are not supported when general.use_middle_proxy = true"
.to_string(),
)),
}
}
}
impl AsyncRead for UpstreamStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_read(cx, buf),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_read(cx, buf),
}
}
}
impl AsyncWrite for UpstreamStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_write(cx, buf),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_flush(cx),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_flush(cx),
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_shutdown(cx),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_shutdown(cx),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UpstreamRouteKind {
Direct,
Socks4,
Socks5,
Shadowsocks,
}
#[derive(Debug, Clone)]
@@ -194,6 +276,7 @@ pub struct UpstreamApiSummarySnapshot {
pub direct_total: usize,
pub socks4_total: usize,
pub socks5_total: usize,
pub shadowsocks_total: usize,
}
#[derive(Debug, Clone)]
@@ -253,7 +336,8 @@ impl UpstreamManager {
connect_failfast_hard_errors: bool,
stats: Arc<Stats>,
) -> Self {
let states = configs.into_iter()
let states = configs
.into_iter()
.filter(|c| c.enabled)
.map(UpstreamState::new)
.collect();
@@ -311,20 +395,13 @@ impl UpstreamManager {
summary.unhealthy_total += 1;
}
let (route_kind, address) = match &upstream.config.upstream_type {
UpstreamType::Direct { .. } => {
summary.direct_total += 1;
(UpstreamRouteKind::Direct, "direct".to_string())
}
UpstreamType::Socks4 { address, .. } => {
summary.socks4_total += 1;
(UpstreamRouteKind::Socks4, address.clone())
}
UpstreamType::Socks5 { address, .. } => {
summary.socks5_total += 1;
(UpstreamRouteKind::Socks5, address.clone())
}
};
let (route_kind, address) = Self::describe_upstream(&upstream.config.upstream_type);
match route_kind {
UpstreamRouteKind::Direct => summary.direct_total += 1,
UpstreamRouteKind::Socks4 => summary.socks4_total += 1,
UpstreamRouteKind::Socks5 => summary.socks5_total += 1,
UpstreamRouteKind::Shadowsocks => summary.shadowsocks_total += 1,
}
let mut dc = Vec::with_capacity(NUM_DCS);
for dc_idx in 0..NUM_DCS {
@@ -352,6 +429,18 @@ impl UpstreamManager {
Some(UpstreamApiSnapshot { summary, upstreams })
}
fn describe_upstream(upstream_type: &UpstreamType) -> (UpstreamRouteKind, String) {
match upstream_type {
UpstreamType::Direct { .. } => (UpstreamRouteKind::Direct, "direct".to_string()),
UpstreamType::Socks4 { address, .. } => (UpstreamRouteKind::Socks4, address.clone()),
UpstreamType::Socks5 { address, .. } => (UpstreamRouteKind::Socks5, address.clone()),
UpstreamType::Shadowsocks { url, .. } => (
UpstreamRouteKind::Shadowsocks,
sanitize_shadowsocks_url(url).unwrap_or_else(|_| "invalid".to_string()),
),
}
}
pub fn api_policy_snapshot(&self) -> UpstreamApiPolicySnapshot {
UpstreamApiPolicySnapshot {
connect_retry_attempts: self.connect_retry_attempts,
@@ -539,44 +628,44 @@ impl UpstreamManager {
// Scope filter:
// If scope is set: only scoped and matched items
// If scope is not set: only unscoped items
let filtered_upstreams : Vec<usize> = upstreams.iter()
let filtered_upstreams: Vec<usize> = upstreams
.iter()
.enumerate()
.filter(|(_, u)| {
scope.map_or(
u.config.scopes.is_empty(),
|req_scope| {
u.config.scopes
.split(',')
.map(str::trim)
.any(|s| s == req_scope)
}
)
scope.map_or(u.config.scopes.is_empty(), |req_scope| {
u.config
.scopes
.split(',')
.map(str::trim)
.any(|s| s == req_scope)
})
})
.map(|(i, _)| i)
.collect();
// Healthy filter
let healthy: Vec<usize> = filtered_upstreams.iter()
let healthy: Vec<usize> = filtered_upstreams
.iter()
.filter(|&&i| upstreams[i].healthy)
.copied()
.collect();
if filtered_upstreams.is_empty() {
if Self::should_emit_warn(
self.no_upstreams_warn_epoch_ms.as_ref(),
5_000,
) {
warn!(scope = scope, "No upstreams available! Using first (direct?)");
if Self::should_emit_warn(self.no_upstreams_warn_epoch_ms.as_ref(), 5_000) {
warn!(
scope = scope,
"No upstreams available! Using first (direct?)"
);
}
return None;
}
if healthy.is_empty() {
if Self::should_emit_warn(
self.no_healthy_warn_epoch_ms.as_ref(),
5_000,
) {
warn!(scope = scope, "No healthy upstreams available! Using random.");
if Self::should_emit_warn(self.no_healthy_warn_epoch_ms.as_ref(), 5_000) {
warn!(
scope = scope,
"No healthy upstreams available! Using random."
);
}
return Some(filtered_upstreams[rand::rng().gen_range(0..filtered_upstreams.len())]);
}
@@ -585,14 +674,18 @@ impl UpstreamManager {
return Some(healthy[0]);
}
let weights: Vec<(usize, f64)> = healthy.iter().map(|&i| {
let base = upstreams[i].config.weight as f64;
let latency_factor = upstreams[i].effective_latency(dc_idx)
.map(|ms| if ms > 1.0 { 1000.0 / ms } else { 1000.0 })
.unwrap_or(1.0);
let weights: Vec<(usize, f64)> = healthy
.iter()
.map(|&i| {
let base = upstreams[i].config.weight as f64;
let latency_factor = upstreams[i]
.effective_latency(dc_idx)
.map(|ms| if ms > 1.0 { 1000.0 / ms } else { 1000.0 })
.unwrap_or(1.0);
(i, base * latency_factor)
}).collect();
(i, base * latency_factor)
})
.collect();
let total: f64 = weights.iter().map(|(_, w)| w).sum();
@@ -620,8 +713,34 @@ impl UpstreamManager {
}
/// Connect to target through a selected upstream.
pub async fn connect(&self, target: SocketAddr, dc_idx: Option<i16>, scope: Option<&str>) -> Result<TcpStream> {
let (stream, _) = self.connect_with_details(target, dc_idx, scope).await?;
pub async fn connect(
&self,
target: SocketAddr,
dc_idx: Option<i16>,
scope: Option<&str>,
) -> Result<UpstreamStream> {
let idx = self
.select_upstream(dc_idx, scope)
.await
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
let mut upstream = {
let guard = self.upstreams.read().await;
guard[idx].config.clone()
};
if let Some(s) = scope {
upstream.selected_scope = s.to_string();
}
let bind_rr = {
let guard = self.upstreams.read().await;
guard.get(idx).map(|u| u.bind_rr.clone())
};
let (stream, _) = self
.connect_selected_upstream(idx, upstream, target, dc_idx, bind_rr)
.await?;
Ok(stream)
}
@@ -632,7 +751,9 @@ impl UpstreamManager {
dc_idx: Option<i16>,
scope: Option<&str>,
) -> Result<(TcpStream, UpstreamEgressInfo)> {
let idx = self.select_upstream(dc_idx, scope).await
let idx = self
.select_upstream(dc_idx, scope)
.await
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
let mut upstream = {
@@ -650,6 +771,20 @@ impl UpstreamManager {
guard.get(idx).map(|u| u.bind_rr.clone())
};
let (stream, egress) = self
.connect_selected_upstream(idx, upstream, target, dc_idx, bind_rr)
.await?;
Ok((stream.into_tcp()?, egress))
}
async fn connect_selected_upstream(
&self,
idx: usize,
upstream: UpstreamConfig,
target: SocketAddr,
dc_idx: Option<i16>,
bind_rr: Option<Arc<AtomicUsize>>,
) -> Result<(UpstreamStream, UpstreamEgressInfo)> {
let connect_started_at = Instant::now();
let mut last_error: Option<ProxyError> = None;
let mut attempts_used = 0u32;
@@ -662,8 +797,8 @@ impl UpstreamManager {
break;
}
let remaining_budget = self.connect_budget.saturating_sub(elapsed);
let attempt_timeout = Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS)
.min(remaining_budget);
let attempt_timeout =
Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS).min(remaining_budget);
if attempt_timeout.is_zero() {
last_error = Some(ProxyError::ConnectionTimeout {
addr: target.to_string(),
@@ -786,9 +921,12 @@ impl UpstreamManager {
target: SocketAddr,
bind_rr: Option<Arc<AtomicUsize>>,
connect_timeout: Duration,
) -> Result<(TcpStream, UpstreamEgressInfo)> {
) -> Result<(UpstreamStream, UpstreamEgressInfo)> {
match &config.upstream_type {
UpstreamType::Direct { interface, bind_addresses } => {
UpstreamType::Direct {
interface,
bind_addresses,
} => {
let bind_ip = Self::resolve_bind_address(
interface,
bind_addresses,
@@ -796,9 +934,7 @@ impl UpstreamManager {
bind_rr.as_deref(),
true,
);
if bind_ip.is_none()
&& bind_addresses.as_ref().is_some_and(|v| !v.is_empty())
{
if bind_ip.is_none() && bind_addresses.as_ref().is_some_and(|v| !v.is_empty()) {
return Err(ProxyError::Config(format!(
"No valid bind_addresses for target family {target}"
)));
@@ -813,8 +949,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?;
match socket.connect(&target.into()) {
Ok(()) => {},
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {},
Ok(()) => {}
Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)),
}
@@ -836,7 +974,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok();
Ok((
stream,
UpstreamStream::Tcp(stream),
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Direct,
@@ -846,8 +984,12 @@ impl UpstreamManager {
socks_proxy_addr: None,
},
))
},
UpstreamType::Socks4 { address, interface, user_id } => {
}
UpstreamType::Socks4 {
address,
interface,
user_id,
} => {
// Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port
let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() {
// IP:port format - use socket with optional interface binding
@@ -863,8 +1005,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?;
match socket.connect(&proxy_addr.into()) {
Ok(()) => {},
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {},
Ok(()) => {}
Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)),
}
@@ -888,14 +1032,16 @@ impl UpstreamManager {
// Hostname:port format - use tokio DNS resolution
// Note: interface binding is not supported for hostnames
if interface.is_some() {
warn!("SOCKS4 interface binding is not supported for hostname addresses, ignoring");
warn!(
"SOCKS4 interface binding is not supported for hostname addresses, ignoring"
);
}
Self::connect_hostname_with_dns_override(address, connect_timeout).await?
};
// replace socks user_id with config.selected_scope, if set
let scope: Option<&str> = Some(config.selected_scope.as_str())
.filter(|s| !s.is_empty());
let scope: Option<&str> =
Some(config.selected_scope.as_str()).filter(|s| !s.is_empty());
let _user_id: Option<&str> = scope.or(user_id.as_deref());
let bound = match tokio::time::timeout(
@@ -915,7 +1061,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok();
let socks_proxy_addr = stream.peer_addr().ok();
Ok((
stream,
UpstreamStream::Tcp(stream),
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Socks4,
@@ -925,8 +1071,13 @@ impl UpstreamManager {
socks_proxy_addr,
},
))
},
UpstreamType::Socks5 { address, interface, username, password } => {
}
UpstreamType::Socks5 {
address,
interface,
username,
password,
} => {
// Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port
let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() {
// IP:port format - use socket with optional interface binding
@@ -942,8 +1093,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?;
match socket.connect(&proxy_addr.into()) {
Ok(()) => {},
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {},
Ok(()) => {}
Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)),
}
@@ -967,15 +1120,17 @@ impl UpstreamManager {
// Hostname:port format - use tokio DNS resolution
// Note: interface binding is not supported for hostnames
if interface.is_some() {
warn!("SOCKS5 interface binding is not supported for hostname addresses, ignoring");
warn!(
"SOCKS5 interface binding is not supported for hostname addresses, ignoring"
);
}
Self::connect_hostname_with_dns_override(address, connect_timeout).await?
};
debug!(config = ?config, "Socks5 connection");
// replace socks user:pass with config.selected_scope, if set
let scope: Option<&str> = Some(config.selected_scope.as_str())
.filter(|s| !s.is_empty());
let scope: Option<&str> =
Some(config.selected_scope.as_str()).filter(|s| !s.is_empty());
let _username: Option<&str> = scope.or(username.as_deref());
let _password: Option<&str> = scope.or(password.as_deref());
@@ -996,7 +1151,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok();
let socks_proxy_addr = stream.peer_addr().ok();
Ok((
stream,
UpstreamStream::Tcp(stream),
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Socks5,
@@ -1006,7 +1161,22 @@ impl UpstreamManager {
socks_proxy_addr,
},
))
},
}
UpstreamType::Shadowsocks { url, interface } => {
let stream = connect_shadowsocks(url, interface, target, connect_timeout).await?;
let local_addr = stream.get_ref().local_addr().ok();
Ok((
UpstreamStream::Shadowsocks(Box::new(stream)),
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Shadowsocks,
local_addr,
direct_bind_ip: None,
socks_bound_addr: None,
socks_proxy_addr: None,
},
))
}
}
}
@@ -1023,7 +1193,9 @@ impl UpstreamManager {
) -> Vec<StartupPingResult> {
let upstreams: Vec<(usize, UpstreamConfig, Arc<AtomicUsize>)> = {
let guard = self.upstreams.read().await;
guard.iter().enumerate()
guard
.iter()
.enumerate()
.map(|(i, u)| (i, u.config.clone(), u.bind_rr.clone()))
.collect()
};
@@ -1051,6 +1223,11 @@ impl UpstreamManager {
}
UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address),
UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address),
UpstreamType::Shadowsocks { url, .. } => {
let address =
sanitize_shadowsocks_url(url).unwrap_or_else(|_| "invalid".to_string());
format!("shadowsocks://{address}")
}
};
let mut v6_results = Vec::with_capacity(NUM_DCS);
@@ -1061,8 +1238,14 @@ impl UpstreamManager {
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v6)
).await;
self.ping_single_dc(
*upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr_v6,
),
)
.await;
let ping_result = match result {
Ok(Ok(rtt_ms)) => {
@@ -1112,8 +1295,14 @@ impl UpstreamManager {
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v4)
).await;
self.ping_single_dc(
*upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr_v4,
),
)
.await;
let ping_result = match result {
Ok(Ok(rtt_ms)) => {
@@ -1162,7 +1351,7 @@ impl UpstreamManager {
Err(_) => {
warn!(dc = %dc_key, "Invalid dc_overrides key, skipping");
continue;
},
}
_ => continue,
};
let dc_idx = dc_num as usize;
@@ -1175,8 +1364,14 @@ impl UpstreamManager {
}
let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr)
).await;
self.ping_single_dc(
*upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr,
),
)
.await;
let ping_result = match result {
Ok(Ok(rtt_ms)) => DcPingResult {
@@ -1205,7 +1400,9 @@ impl UpstreamManager {
v4_results.push(ping_result);
}
}
Err(_) => warn!(dc = %dc_idx, addr = %addr_str, "Invalid dc_overrides address, skipping"),
Err(_) => {
warn!(dc = %dc_idx, addr = %addr_str, "Invalid dc_overrides address, skipping")
}
}
}
}
@@ -1381,12 +1578,8 @@ impl UpstreamManager {
ipv6_enabled: bool,
dc_overrides: HashMap<String, Vec<String>>,
) {
let groups = Self::build_health_check_groups(
prefer_ipv6,
ipv4_enabled,
ipv6_enabled,
&dc_overrides,
);
let groups =
Self::build_health_check_groups(prefer_ipv6, ipv4_enabled, ipv6_enabled, &dc_overrides);
let required_healthy_groups = Self::required_healthy_group_count(groups.len());
let mut endpoint_rotation: HashMap<(usize, i16, bool), usize> = HashMap::new();
@@ -1416,13 +1609,16 @@ impl UpstreamManager {
let mut group_ok = false;
let mut group_rtt_ms = None;
for (is_primary, endpoints) in [(true, &group.primary), (false, &group.fallback)] {
for (is_primary, endpoints) in
[(true, &group.primary), (false, &group.fallback)]
{
if endpoints.is_empty() {
continue;
}
let rotation_key = (i, group.dc_idx, is_primary);
let start_idx = *endpoint_rotation.entry(rotation_key).or_insert(0) % endpoints.len();
let start_idx =
*endpoint_rotation.entry(rotation_key).or_insert(0) % endpoints.len();
let mut next_idx = (start_idx + 1) % endpoints.len();
for step in 0..endpoints.len() {
@@ -1544,8 +1740,7 @@ impl UpstreamManager {
return None;
}
UpstreamState::dc_array_idx(dc_idx)
.map(|idx| guard[0].dc_ip_pref[idx])
UpstreamState::dc_array_idx(dc_idx).map(|idx| guard[0].dc_ip_pref[idx])
}
/// Get preferred DC address based on config preference
@@ -1566,6 +1761,12 @@ impl UpstreamManager {
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Arc;
use crate::stats::Stats;
const TEST_SHADOWSOCKS_URL: &str =
"ss://2022-blake3-aes-256-gcm:MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDE=@127.0.0.1:8388";
#[test]
fn required_healthy_group_count_applies_three_group_threshold() {
@@ -1596,15 +1797,18 @@ mod tests {
assert!(dc2.primary.iter().all(|addr| addr.is_ipv6()));
assert!(dc2.fallback.iter().all(|addr| addr.is_ipv4()));
assert!(dc2
.primary
.contains(&"[2001:db8::10]:443".parse::<SocketAddr>().unwrap()));
assert!(dc2
.fallback
.contains(&"203.0.113.10:443".parse::<SocketAddr>().unwrap()));
assert!(dc2
.fallback
.contains(&"203.0.113.11:443".parse::<SocketAddr>().unwrap()));
assert!(
dc2.primary
.contains(&"[2001:db8::10]:443".parse::<SocketAddr>().unwrap())
);
assert!(
dc2.fallback
.contains(&"203.0.113.10:443".parse::<SocketAddr>().unwrap())
);
assert!(
dc2.fallback
.contains(&"203.0.113.11:443".parse::<SocketAddr>().unwrap())
);
}
#[test]
@@ -1626,12 +1830,14 @@ mod tests {
.expect("override-only dc group must be present");
assert_eq!(dc9.primary.len(), 2);
assert!(dc9
.primary
.contains(&"198.51.100.1:443".parse::<SocketAddr>().unwrap()));
assert!(dc9
.primary
.contains(&"198.51.100.2:443".parse::<SocketAddr>().unwrap()));
assert!(
dc9.primary
.contains(&"198.51.100.1:443".parse::<SocketAddr>().unwrap())
);
assert!(
dc9.primary
.contains(&"198.51.100.2:443".parse::<SocketAddr>().unwrap())
);
assert!(dc9.fallback.is_empty());
}
@@ -1678,4 +1884,36 @@ mod tests {
assert_eq!(bind, None);
}
#[test]
fn api_snapshot_reports_shadowsocks_as_sanitized_route() {
let manager = UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Shadowsocks {
url: TEST_SHADOWSOCKS_URL.to_string(),
interface: None,
},
weight: 2,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
100,
1000,
1,
false,
Arc::new(Stats::new()),
);
let snapshot = manager.try_api_snapshot().expect("snapshot");
assert_eq!(snapshot.summary.configured_total, 1);
assert_eq!(snapshot.summary.shadowsocks_total, 1);
assert_eq!(snapshot.upstreams.len(), 1);
assert_eq!(
snapshot.upstreams[0].route_kind,
UpstreamRouteKind::Shadowsocks
);
assert_eq!(snapshot.upstreams[0].address, "127.0.0.1:8388");
}
}