Compare commits

...

57 Commits

Author SHA1 Message Date
Alexey
9de8b2f0bf Update release.yml 2026-03-22 10:36:54 +03:00
Alexey
4e5b67bae8 Update release.yml 2026-03-22 10:28:06 +03:00
Alexey
73f218b62a Update release.yml 2026-03-22 00:27:16 +03:00
Alexey
13ff3af1db Update release.yml 2026-03-22 00:18:54 +03:00
Alexey
77f717e3d1 Merge pull request #534 from telemt/workflow
Update release.yml
2026-03-22 00:16:11 +03:00
Alexey
db3e246390 Update release.yml 2026-03-22 00:15:56 +03:00
Alexey
b74ba38d40 Merge pull request #533 from telemt/workflow
Workflow
2026-03-22 00:10:38 +03:00
Alexey
269fce839f Update Dockerfile 2026-03-22 00:10:19 +03:00
Alexey
5a4072c964 Update release.yml 2026-03-22 00:08:16 +03:00
Alexey
a95678988a Merge pull request #530 from telemt/workflow
Update release.yml
2026-03-21 21:45:23 +03:00
Alexey
b17482ede3 Update release.yml 2026-03-21 21:45:01 +03:00
Alexey
e7a1d26e6e Merge pull request #526 from telemt/workflow
Update release.yml
2026-03-21 16:48:53 +03:00
Alexey
b91c6cb339 Update release.yml 2026-03-21 16:48:42 +03:00
Alexey
c4e7f54cbe Merge pull request #524 from telemt/workflow
Update release.yml
2026-03-21 16:31:15 +03:00
Alexey
f85205d48d Update release.yml 2026-03-21 16:31:05 +03:00
Alexey
d767ec02ee Update release.yml 2026-03-21 16:24:06 +03:00
Alexey
88a4c652b6 Merge pull request #523 from telemt/workflow
Update release.yml
2026-03-21 16:18:48 +03:00
Alexey
ea2d964502 Update release.yml 2026-03-21 16:18:24 +03:00
Alexey
3055637571 Merge pull request #522 from telemt/workflow
Update release.yml
2026-03-21 16:04:56 +03:00
Alexey
19b84b9d73 Update release.yml 2026-03-21 16:03:54 +03:00
Alexey
6ead8b1922 Merge pull request #521 from telemt/workflow
Update release.yml
2026-03-21 15:58:36 +03:00
Alexey
63aa1038c0 Update release.yml 2026-03-21 15:58:25 +03:00
Alexey
24594e648e Merge pull request #519 from telemt/workflow
Update release.yml
2026-03-21 15:21:47 +03:00
Alexey
e8b38ea860 Update release.yml 2026-03-21 15:21:25 +03:00
Alexey
f3598cf309 Merge pull request #514 from M1h4n1k/patch-1
docs: fix typo in ru QUICK_START
2026-03-21 10:22:52 +03:00
Michael Karpov
777b15b1da Update section title for Docker usage
Изменено название раздела с 'Запуск в Docker Compose' на 'Запуск без Docker Compose'.
2026-03-20 22:23:36 +02:00
Alexey
99ba2f7bbc Add Shadowsocks upstream support: merge pull request #430 from hunmar/feat/shadowsocks-upstream
Add Shadowsocks upstream support
2026-03-20 18:35:28 +03:00
Maxim Myalin
e14dd07220 Merge branch 'main' into feat/shadowsocks-upstream 2026-03-20 17:08:47 +03:00
Maxim Myalin
d93a4fbd53 Merge remote-tracking branch 'origin/main' into feat/shadowsocks-upstream
# Conflicts:
#	src/tls_front/fetcher.rs
2026-03-20 17:07:47 +03:00
Alexey
2798039ab8 Merge pull request #507 from dzhus/patch-2
Fix typo in systemd service metadata
2026-03-20 17:04:41 +03:00
Alexey
342b0119dd Merge pull request #509 from telemt/bump
Update Cargo.toml
2026-03-20 16:27:39 +03:00
Alexey
2605929b93 Update Cargo.toml 2026-03-20 16:26:57 +03:00
Alexey
36814b6355 ME Draining on Dual-Stack + TLS Fetcher Upstream Selection: merge pull request #508 from telemt/flow
ME Draining on Dual-Stack + TLS Fetcher Upstream Selection
2026-03-20 16:24:17 +03:00
Alexey
269ba537ad ME Draining on Dual-Stack
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 16:07:12 +03:00
Alexey
5c0eb6dbe8 TLS Fetcher Upstream Selection
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 16:05:24 +03:00
Maxim Myalin
66867d3f5b Merge branch 'main' into feat/shadowsocks-upstream
# Conflicts:
#	Cargo.lock
#	src/api/runtime_stats.rs
2026-03-20 15:22:36 +03:00
Dmitry Dzhus
db36945293 Fix typo in systemd service metadata 2026-03-20 12:00:41 +00:00
Alexey
dd07fa9453 Merge pull request #505 from telemt/flow-me
Teardown Monitoring in API and Metrics
2026-03-20 12:59:39 +03:00
Alexey
bb1a372ac4 Merge branch 'main' into flow-me 2026-03-20 12:59:32 +03:00
Alexey
6661401a34 Merge pull request #506 from telemt/about-releases
Update README.md
2026-03-20 12:59:09 +03:00
Alexey
cd65fb432b Update README.md 2026-03-20 12:58:55 +03:00
Alexey
caf0717789 Merge branch 'main' into flow-me 2026-03-20 12:57:27 +03:00
Alexey
4a610d83a3 Update Cargo.toml
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:56:13 +03:00
Alexey
aba4205dcc Teardown Monitoring in Metrics
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:46:35 +03:00
Alexey
ef9b7b1492 Teardown Monitoring in API
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:45:53 +03:00
Alexey
d112f15b90 ME Writers Anti-stuck + Quarantine fixes + ME Writers Advanced Cleanup + Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy: merge pull request #504 from telemt/flow-me
ME Writers Anti-stuck + Quarantine fixes + ME Writers Advanced Cleanup + Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy
2026-03-20 12:41:45 +03:00
Alexey
b55b264345 Merge branch 'main' into flow-me 2026-03-20 12:20:51 +03:00
Alexey
f61d25ebe0 Authoritative Teardown + Orphan Watchdog + Force-Close Safery Policy
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:11:47 +03:00
Alexey
ed4d1167dd ME Writers Advanced Cleanup
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 12:09:23 +03:00
Alexey
dc6948cf39 Merge pull request #502 from telemt/about-releases
Update README.md
2026-03-20 11:25:19 +03:00
Alexey
4f11aa0772 Update README.md 2026-03-20 11:25:07 +03:00
Alexey
e40361b171 Cargo.toml + Cargo.lock
Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com>
2026-03-20 00:45:04 +03:00
Alexey
1c6c73beda ME Writers Anti-stuck and Quarantine fixes
Co-Authored-By: Nook Scheel <nook@live.ru>
2026-03-20 00:41:40 +03:00
Alexey
dd8ef4d996 Merge branch 'main' into feat/shadowsocks-upstream 2026-03-19 17:19:01 +03:00
Maxim Myalin
062464175e Merge branch 'main' into feat/shadowsocks-upstream 2026-03-18 12:38:23 +03:00
Maxim Myalin
a5983c17d3 Add Docker build context ignore file 2026-03-18 12:36:48 +03:00
Maxim Myalin
def42f0baa Add Shadowsocks upstream support 2026-03-18 12:36:44 +03:00
42 changed files with 3965 additions and 617 deletions

8
.dockerignore Normal file
View File

@@ -0,0 +1,8 @@
.git
.github
target
.kilocode
cache
tlsfront
*.tar
*.tar.gz

View File

@@ -6,36 +6,34 @@ on:
- '[0-9]+.[0-9]+.[0-9]+' - '[0-9]+.[0-9]+.[0-9]+'
workflow_dispatch: workflow_dispatch:
concurrency:
group: release-${{ github.ref }}
cancel-in-progress: true
permissions: permissions:
contents: read contents: read
packages: write packages: write
env: env:
CARGO_TERM_COLOR: always CARGO_TERM_COLOR: always
BINARY_NAME: telemt
jobs: jobs:
build: # ==========================
name: Build ${{ matrix.target }} # GNU / glibc
# ==========================
build-gnu:
name: GNU ${{ matrix.target }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read
strategy: strategy:
fail-fast: false fail-fast: false
matrix: matrix:
include: include:
- target: x86_64-unknown-linux-gnu - target: x86_64-unknown-linux-gnu
artifact_name: telemt asset: telemt-x86_64-linux-gnu
asset_name: telemt-x86_64-linux-gnu
- target: aarch64-unknown-linux-gnu - target: aarch64-unknown-linux-gnu
artifact_name: telemt asset: telemt-aarch64-linux-gnu
asset_name: telemt-aarch64-linux-gnu
- target: x86_64-unknown-linux-musl
artifact_name: telemt
asset_name: telemt-x86_64-linux-musl
- target: aarch64-unknown-linux-musl
artifact_name: telemt
asset_name: telemt-aarch64-linux-musl
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
@@ -43,12 +41,20 @@ jobs:
- uses: dtolnay/rust-toolchain@v1 - uses: dtolnay/rust-toolchain@v1
with: with:
toolchain: stable toolchain: stable
targets: ${{ matrix.target }} targets: |
x86_64-unknown-linux-gnu
aarch64-unknown-linux-gnu
- name: Install cross-compilation tools - name: Install deps
run: | run: |
sudo apt-get update sudo apt-get update
sudo apt-get install -y gcc-aarch64-linux-gnu sudo apt-get install -y \
build-essential \
clang \
lld \
pkg-config \
gcc-aarch64-linux-gnu \
g++-aarch64-linux-gnu
- uses: actions/cache@v4 - uses: actions/cache@v4
with: with:
@@ -56,41 +62,183 @@ jobs:
~/.cargo/registry ~/.cargo/registry
~/.cargo/git ~/.cargo/git
target target
key: ${{ runner.os }}-${{ matrix.target }}-cargo-${{ hashFiles('**/Cargo.lock') }} key: gnu-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
restore-keys: |
${{ runner.os }}-${{ matrix.target }}-cargo-
- name: Install cross - name: Build
run: cargo install cross --git https://github.com/cross-rs/cross
- name: Build Release
env:
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
run: cross build --release --target ${{ matrix.target }}
- name: Package binary
run: | run: |
cd target/${{ matrix.target }}/release if [ "${{ matrix.target }}" = "aarch64-unknown-linux-gnu" ]; then
tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }} export CC=aarch64-linux-gnu-gcc
sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256 export CXX=aarch64-linux-gnu-g++
export CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
export CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
export RUSTFLAGS="-C linker=aarch64-linux-gnu-gcc"
else
export CC=clang
export CXX=clang++
export CC_x86_64_unknown_linux_gnu=clang
export CXX_x86_64_unknown_linux_gnu=clang++
export RUSTFLAGS="-C linker=clang -C link-arg=-fuse-ld=lld"
fi
cargo build --release --target ${{ matrix.target }}
- name: Package
run: |
mkdir -p dist
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
cd dist
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
- uses: actions/upload-artifact@v4 - uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.asset_name }} name: ${{ matrix.asset }}
path: | path: |
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.tar.gz dist/${{ matrix.asset }}.tar.gz
target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256 dist/${{ matrix.asset }}.sha256
build-docker-image: # ==========================
needs: build # MUSL
# ==========================
build-musl:
name: MUSL ${{ matrix.target }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
permissions:
contents: read container:
packages: write image: rust:slim-bookworm
strategy:
fail-fast: false
matrix:
include:
- target: x86_64-unknown-linux-musl
asset: telemt-x86_64-linux-musl
- target: aarch64-unknown-linux-musl
asset: telemt-aarch64-linux-musl
steps: steps:
- uses: actions/checkout@v4 - uses: actions/checkout@v4
- name: Install deps
run: |
apt-get update
apt-get install -y \
musl-tools \
pkg-config \
curl
# 💾 cache toolchain
- uses: actions/cache@v4
if: matrix.target == 'aarch64-unknown-linux-musl'
with:
path: ~/.musl-aarch64
key: musl-toolchain-aarch64-v1
# 🔥 надёжная установка
- name: Install aarch64 musl toolchain
if: matrix.target == 'aarch64-unknown-linux-musl'
run: |
set -e
TOOLCHAIN_DIR="$HOME/.musl-aarch64"
ARCHIVE="aarch64-linux-musl-cross.tgz"
if [ -x "$TOOLCHAIN_DIR/bin/aarch64-linux-musl-gcc" ]; then
echo "✅ musl toolchain already installed"
else
echo "⬇️ downloading musl toolchain..."
download() {
url="$1"
echo "→ trying $url"
curl -fL \
--retry 5 \
--retry-delay 3 \
--connect-timeout 10 \
--max-time 120 \
-o "$ARCHIVE" "$url" && return 0
return 1
}
download "https://musl.cc/$ARCHIVE" || \
download "https://more.musl.cc/$ARCHIVE" || \
{ echo "❌ failed to download musl toolchain"; exit 1; }
mkdir -p "$TOOLCHAIN_DIR"
tar -xzf "$ARCHIVE" --strip-components=1 -C "$TOOLCHAIN_DIR"
fi
echo "$TOOLCHAIN_DIR/bin" >> $GITHUB_PATH
- name: Add rust target
run: rustup target add ${{ matrix.target }}
- uses: actions/cache@v4
with:
path: |
/usr/local/cargo/registry
/usr/local/cargo/git
target
key: musl-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
- name: Build
run: |
if [ "${{ matrix.target }}" = "aarch64-unknown-linux-musl" ]; then
export CC=aarch64-linux-musl-gcc
export CC_aarch64_unknown_linux_musl=aarch64-linux-musl-gcc
export RUSTFLAGS="-C target-feature=+crt-static -C linker=aarch64-linux-musl-gcc"
else
export CC=musl-gcc
export CC_x86_64_unknown_linux_musl=musl-gcc
export RUSTFLAGS="-C target-feature=+crt-static"
fi
cargo build --release --target ${{ matrix.target }}
- name: Package
run: |
mkdir -p dist
BIN=target/${{ matrix.target }}/release/${{ env.BINARY_NAME }}
cp "$BIN" dist/${{ env.BINARY_NAME }}-${{ matrix.target }}
cd dist
tar -czf ${{ matrix.asset }}.tar.gz ${{ env.BINARY_NAME }}-${{ matrix.target }}
sha256sum ${{ matrix.asset }}.tar.gz > ${{ matrix.asset }}.sha256
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.asset }}
path: |
dist/${{ matrix.asset }}.tar.gz
dist/${{ matrix.asset }}.sha256
# ==========================
# Docker
# ==========================
docker:
name: Docker
runs-on: ubuntu-latest
needs: [build-gnu, build-musl]
continue-on-error: true
steps:
- uses: actions/checkout@v4
- uses: actions/download-artifact@v4
with:
path: artifacts
- name: Extract binaries
run: |
mkdir dist
find artifacts -name "*.tar.gz" -exec tar -xzf {} -C dist \;
cp dist/telemt-x86_64-unknown-linux-musl dist/telemt || true
- uses: docker/setup-qemu-action@v3 - uses: docker/setup-qemu-action@v3
- uses: docker/setup-buildx-action@v3 - uses: docker/setup-buildx-action@v3
@@ -105,35 +253,43 @@ jobs:
id: vars id: vars
run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT
- name: Build and push - name: Build & Push
uses: docker/build-push-action@v6 uses: docker/build-push-action@v6
with: with:
context: . context: .
push: true push: true
platforms: linux/amd64,linux/arm64
tags: | tags: |
ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }} ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }}
ghcr.io/${{ github.repository }}:latest ghcr.io/${{ github.repository }}:latest
build-args: |
BINARY=dist/telemt
# ==========================
# Release
# ==========================
release: release:
name: Create Release name: Release
needs: build
runs-on: ubuntu-latest runs-on: ubuntu-latest
needs: [build-gnu, build-musl]
permissions: permissions:
contents: write contents: write
steps: steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- uses: actions/download-artifact@v4 - uses: actions/download-artifact@v4
with: with:
path: artifacts path: artifacts
- name: Flatten artifacts
run: |
mkdir dist
find artifacts -type f -exec cp {} dist/ \;
- name: Create Release - name: Create Release
uses: softprops/action-gh-release@v2 uses: softprops/action-gh-release@v2
with: with:
files: artifacts/**/* files: dist/*
generate_release_notes: true generate_release_notes: true
draft: false draft: false
prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }} prerelease: ${{ contains(github.ref, '-rc') || contains(github.ref, '-beta') || contains(github.ref, '-alpha') }}

903
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "telemt" name = "telemt"
version = "3.3.25" version = "3.3.28"
edition = "2024" edition = "2024"
[dependencies] [dependencies]
@@ -26,6 +26,7 @@ zeroize = { version = "1.8", features = ["derive"] }
# Network # Network
socket2 = { version = "0.5", features = ["all"] } socket2 = { version = "0.5", features = ["all"] }
nix = { version = "0.28", default-features = false, features = ["net"] } nix = { version = "0.28", default-features = false, features = ["net"] }
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
# Serialization # Serialization
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
@@ -40,6 +41,7 @@ tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter"] }
parking_lot = "0.12" parking_lot = "0.12"
dashmap = "5.5" dashmap = "5.5"
arc-swap = "1.7"
lru = "0.16" lru = "0.16"
rand = "0.9" rand = "0.9"
chrono = { version = "0.4", features = ["serde"] } chrono = { version = "0.4", features = ["serde"] }

View File

@@ -1,3 +1,5 @@
# syntax=docker/dockerfile:1
# ========================== # ==========================
# Stage 1: Build # Stage 1: Build
# ========================== # ==========================
@@ -5,36 +7,87 @@ FROM rust:1.88-slim-bookworm AS builder
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
pkg-config \ pkg-config \
ca-certificates \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
WORKDIR /build WORKDIR /build
# Depcache
COPY Cargo.toml Cargo.lock* ./ COPY Cargo.toml Cargo.lock* ./
RUN mkdir src && echo 'fn main() {}' > src/main.rs && \ RUN mkdir src && echo 'fn main() {}' > src/main.rs && \
cargo build --release 2>/dev/null || true && \ cargo build --release 2>/dev/null || true && \
rm -rf src rm -rf src
# Build
COPY . . COPY . .
RUN cargo build --release && strip target/release/telemt RUN cargo build --release && strip target/release/telemt
# ========================== # ==========================
# Stage 2: Runtime # Stage 2: Compress (strip + UPX)
# ========================== # ==========================
FROM debian:bookworm-slim FROM debian:12-slim AS minimal
RUN apt-get update && apt-get install -y --no-install-recommends \
upx \
binutils \
&& rm -rf /var/lib/apt/lists/*
COPY --from=builder /build/target/release/telemt /telemt
RUN strip /telemt || true
RUN upx --best --lzma /telemt || true
# ==========================
# Stage 3: Debug base
# ==========================
FROM debian:12-slim AS debug-base
RUN apt-get update && apt-get install -y --no-install-recommends \ RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \ ca-certificates \
tzdata \
curl \
iproute2 \
busybox \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
RUN useradd -r -s /usr/sbin/nologin telemt # ==========================
# Stage 4: Debug image
# ==========================
FROM debug-base AS debug
WORKDIR /app WORKDIR /app
COPY --from=builder /build/target/release/telemt /app/telemt COPY --from=minimal /telemt /app/telemt
COPY config.toml /app/config.toml COPY config.toml /app/config.toml
RUN chown -R telemt:telemt /app USER root
USER telemt
EXPOSE 443
EXPOSE 9090
EXPOSE 9091
ENTRYPOINT ["/app/telemt"]
CMD ["config.toml"]
# ==========================
# Stage 5: Production (distroless)
# ==========================
FROM gcr.io/distroless/base-debian12 AS prod
WORKDIR /app
COPY --from=minimal /telemt /app/telemt
COPY config.toml /app/config.toml
# TLS + timezone + shell
COPY --from=debug-base /etc/ssl/certs /etc/ssl/certs
COPY --from=debug-base /usr/share/zoneinfo /usr/share/zoneinfo
COPY --from=debug-base /bin/busybox /bin/busybox
RUN ["/bin/busybox", "--install", "-s", "/bin"]
# distroless user
USER nonroot:nonroot
EXPOSE 443 EXPOSE 443
EXPOSE 9090 EXPOSE 9090

View File

@@ -19,9 +19,9 @@
### 🇷🇺 RU ### 🇷🇺 RU
#### Релиз 3.3.15 Semistable #### О релизах
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) по итогам работы в продакшн признан одним из самых стабильных и рекомендуется к использованию, когда cutting-edge фичи некритичны! [3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) даёт баланс стабильности и передового функционала, а так же последние исправления по безопасности и багам
Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX** Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **API**, **статистики**, **UX**
@@ -40,9 +40,9 @@
### 🇬🇧 EN ### 🇬🇧 EN
#### Release 3.3.15 Semistable #### About releases
[3.3.15](https://github.com/telemt/telemt/releases/tag/3.3.15) is, based on the results of his work in production, recognized as one of the most stable and recommended for use when cutting-edge features are not so necessary! [3.3.27](https://github.com/telemt/telemt/releases/tag/3.3.27) provides a balance of stability and advanced functionality, as well as the latest security and bug fixes
We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX** We are looking forward to your feedback and improvement proposals — especially regarding **API**, **statistics**, **UX**

View File

@@ -497,13 +497,14 @@ Note: the request contract is defined, but the corresponding route currently ret
| `direct_total` | `usize` | Direct-route upstream entries. | | `direct_total` | `usize` | Direct-route upstream entries. |
| `socks4_total` | `usize` | SOCKS4 upstream entries. | | `socks4_total` | `usize` | SOCKS4 upstream entries. |
| `socks5_total` | `usize` | SOCKS5 upstream entries. | | `socks5_total` | `usize` | SOCKS5 upstream entries. |
| `shadowsocks_total` | `usize` | Shadowsocks upstream entries. |
#### `RuntimeUpstreamQualityUpstreamData` #### `RuntimeUpstreamQualityUpstreamData`
| Field | Type | Description | | Field | Type | Description |
| --- | --- | --- | | --- | --- | --- |
| `upstream_id` | `usize` | Runtime upstream index. | | `upstream_id` | `usize` | Runtime upstream index. |
| `route_kind` | `string` | `direct`, `socks4`, `socks5`. | | `route_kind` | `string` | `direct`, `socks4`, `socks5`, `shadowsocks`. |
| `address` | `string` | Upstream address (`direct` literal for direct route kind). | | `address` | `string` | Upstream address (`direct` literal for direct route kind, `host:port` only for proxied upstreams). |
| `weight` | `u16` | Selection weight. | | `weight` | `u16` | Selection weight. |
| `scopes` | `string` | Configured scope selector. | | `scopes` | `string` | Configured scope selector. |
| `healthy` | `bool` | Current health flag. | | `healthy` | `bool` | Current health flag. |
@@ -757,13 +758,14 @@ Note: the request contract is defined, but the corresponding route currently ret
| `direct_total` | `usize` | Number of direct upstream entries. | | `direct_total` | `usize` | Number of direct upstream entries. |
| `socks4_total` | `usize` | Number of SOCKS4 upstream entries. | | `socks4_total` | `usize` | Number of SOCKS4 upstream entries. |
| `socks5_total` | `usize` | Number of SOCKS5 upstream entries. | | `socks5_total` | `usize` | Number of SOCKS5 upstream entries. |
| `shadowsocks_total` | `usize` | Number of Shadowsocks upstream entries. |
#### `UpstreamStatus` #### `UpstreamStatus`
| Field | Type | Description | | Field | Type | Description |
| --- | --- | --- | | --- | --- | --- |
| `upstream_id` | `usize` | Runtime upstream index. | | `upstream_id` | `usize` | Runtime upstream index. |
| `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`. | | `route_kind` | `string` | Upstream route kind: `direct`, `socks4`, `socks5`, `shadowsocks`. |
| `address` | `string` | Upstream address (`direct` for direct route kind). Authentication fields are intentionally omitted. | | `address` | `string` | Upstream address (`direct` for direct route kind, `host:port` for Shadowsocks). Authentication fields are intentionally omitted. |
| `weight` | `u16` | Selection weight. | | `weight` | `u16` | Selection weight. |
| `scopes` | `string` | Configured scope selector string. | | `scopes` | `string` | Configured scope selector string. |
| `healthy` | `bool` | Current health flag. | | `healthy` | `bool` | Current health flag. |

View File

@@ -120,3 +120,17 @@ password = "pass" # Password for Auth on SOCKS-server
weight = 1 # Set Weight for Scenarios weight = 1 # Set Weight for Scenarios
enabled = true enabled = true
``` ```
#### Shadowsocks as Upstream
Requires `use_middle_proxy = false`.
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
weight = 1
enabled = true
```

View File

@@ -121,3 +121,16 @@ weight = 1 # Set Weight for Scenarios
enabled = true enabled = true
``` ```
#### Shadowsocks как Upstream
Требует `use_middle_proxy = false`.
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@1.2.3.4:8388"
weight = 1
enabled = true
```

View File

@@ -178,7 +178,7 @@ docker compose down
> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`) > - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`)
> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host` > - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host`
**Запуск в Docker Compose** **Запуск без Docker Compose**
```bash ```bash
docker build -t telemt:local . docker build -t telemt:local .
docker run --name telemt --restart unless-stopped \ docker run --name telemt --restart unless-stopped \

View File

@@ -82,7 +82,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung | | Feld | Gilt für | Typ | Pflicht | Default | Bedeutung |
|---|---|---|---|---|---| |---|---|---|---|---|---|
| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. | | `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | ja | n/a | Upstream-Transporttyp. |
| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. | | `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. |
| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. | | `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. |
| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. | | `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. |
@@ -95,6 +95,8 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
| `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. | | `interface` | `socks5` | `Option<String>` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. |
| `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. | | `username` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Benutzername. |
| `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. | | `password` | `socks5` | `Option<String>` | nein | `null` | SOCKS5 Passwort. |
| `url` | `shadowsocks` | `String` | ja | n/a | Shadowsocks-SIP002-URL (`ss://...`). In Runtime-APIs wird nur `host:port` offengelegt. |
| `interface` | `shadowsocks` | `Option<String>` | nein | `null` | Optionales ausgehendes Bind-Interface oder lokale Literal-IP. |
### Runtime-Regeln (wichtig) ### Runtime-Regeln (wichtig)
@@ -115,6 +117,7 @@ Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüss
8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet. 8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet.
9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial. 9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial.
10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie. 10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie.
11. `shadowsocks`-Upstreams erfordern `general.use_middle_proxy = false`. Mit aktiviertem ME-Modus schlägt das Laden der Config sofort fehl.
## Upstream-Konfigurationsbeispiele ## Upstream-Konfigurationsbeispiele
@@ -150,7 +153,20 @@ weight = 2
enabled = true enabled = true
``` ```
### Beispiel 4: Gemischte Upstreams mit Scopes ### Beispiel 4: Shadowsocks-Upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Beispiel 5: Gemischte Upstreams mit Scopes
```toml ```toml
[[upstreams]] [[upstreams]]

View File

@@ -82,7 +82,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
| Field | Applies to | Type | Required | Default | Meaning | | Field | Applies to | Type | Required | Default | Meaning |
|---|---|---|---|---|---| |---|---|---|---|---|---|
| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. | | `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | yes | n/a | Upstream transport type. |
| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. | | `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. |
| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. | | `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. |
| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. | | `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. |
@@ -95,6 +95,8 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
| `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. | | `interface` | `socks5` | `Option<String>` | no | `null` | Used only for SOCKS server `ip:port` dial path. |
| `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. | | `username` | `socks5` | `Option<String>` | no | `null` | SOCKS5 username auth. |
| `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. | | `password` | `socks5` | `Option<String>` | no | `null` | SOCKS5 password auth. |
| `url` | `shadowsocks` | `String` | yes | n/a | Shadowsocks SIP002 URL (`ss://...`). Only `host:port` is exposed in runtime APIs. |
| `interface` | `shadowsocks` | `Option<String>` | no | `null` | Optional outgoing bind interface or literal local IP. |
### Runtime rules (important) ### Runtime rules (important)
@@ -115,6 +117,7 @@ Defaults below are code defaults (used when a key is omitted), not necessarily v
8. In ME mode, the selected upstream is also used for ME TCP dial path. 8. In ME mode, the selected upstream is also used for ME TCP dial path.
9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material. 9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material.
10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family. 10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family.
11. `shadowsocks` upstreams require `general.use_middle_proxy = false`. Config load fails fast if ME mode is enabled.
## Upstream Configuration Examples ## Upstream Configuration Examples
@@ -150,7 +153,20 @@ weight = 2
enabled = true enabled = true
``` ```
### Example 4: Mixed upstreams with scopes ### Example 4: Shadowsocks upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Example 5: Mixed upstreams with scopes
```toml ```toml
[[upstreams]] [[upstreams]]

View File

@@ -82,7 +82,7 @@
| Поле | Применимость | Тип | Обязательно | Default | Назначение | | Поле | Применимость | Тип | Обязательно | Default | Назначение |
|---|---|---|---|---|---| |---|---|---|---|---|---|
| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. | | `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5" \| "shadowsocks"` | да | n/a | Тип upstream транспорта. |
| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. | | `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. |
| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. | | `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. |
| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. | | `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. |
@@ -95,6 +95,8 @@
| `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. | | `interface` | `socks5` | `Option<String>` | нет | `null` | Используется только если `address` задан как `ip:port`. |
| `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. | | `username` | `socks5` | `Option<String>` | нет | `null` | Логин SOCKS5 auth. |
| `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. | | `password` | `socks5` | `Option<String>` | нет | `null` | Пароль SOCKS5 auth. |
| `url` | `shadowsocks` | `String` | да | n/a | Shadowsocks SIP002 URL (`ss://...`). В runtime API раскрывается только `host:port`. |
| `interface` | `shadowsocks` | `Option<String>` | нет | `null` | Необязательный исходящий bind-интерфейс или literal локальный IP. |
### Runtime-правила ### Runtime-правила
@@ -115,6 +117,7 @@
8. В ME-режиме выбранный upstream также используется для ME TCP dial path. 8. В ME-режиме выбранный upstream также используется для ME TCP dial path.
9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала. 9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала.
10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family. 10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family.
11. `shadowsocks` upstream требует `general.use_middle_proxy = false`. При включенном ME-режиме конфиг отклоняется при загрузке.
## Примеры конфигурации Upstreams ## Примеры конфигурации Upstreams
@@ -150,7 +153,20 @@ weight = 2
enabled = true enabled = true
``` ```
### Пример 4: смешанные upstream с scopes ### Пример 4: Shadowsocks upstream
```toml
[general]
use_middle_proxy = false
[[upstreams]]
type = "shadowsocks"
url = "ss://2022-blake3-aes-256-gcm:BASE64_KEY@198.51.100.50:8388"
weight = 2
enabled = true
```
### Пример 5: смешанные upstream с scopes
```toml ```toml
[[upstreams]] [[upstreams]]

View File

@@ -134,6 +134,7 @@ pub(super) struct UpstreamSummaryData {
pub(super) direct_total: usize, pub(super) direct_total: usize,
pub(super) socks4_total: usize, pub(super) socks4_total: usize,
pub(super) socks5_total: usize, pub(super) socks5_total: usize,
pub(super) shadowsocks_total: usize,
} }
#[derive(Serialize, Clone)] #[derive(Serialize, Clone)]
@@ -205,6 +206,16 @@ pub(super) struct ZeroPoolData {
pub(super) refill_failed_total: u64, pub(super) refill_failed_total: u64,
pub(super) writer_restored_same_endpoint_total: u64, pub(super) writer_restored_same_endpoint_total: u64,
pub(super) writer_restored_fallback_total: u64, pub(super) writer_restored_fallback_total: u64,
pub(super) teardown_attempt_total_normal: u64,
pub(super) teardown_attempt_total_hard_detach: u64,
pub(super) teardown_success_total_normal: u64,
pub(super) teardown_success_total_hard_detach: u64,
pub(super) teardown_timeout_total: u64,
pub(super) teardown_escalation_total: u64,
pub(super) teardown_noop_total: u64,
pub(super) teardown_cleanup_side_effect_failures_total: u64,
pub(super) teardown_duration_count_total: u64,
pub(super) teardown_duration_sum_seconds_total: f64,
} }
#[derive(Serialize, Clone)] #[derive(Serialize, Clone)]

View File

@@ -4,6 +4,9 @@ use std::time::{SystemTime, UNIX_EPOCH};
use serde::Serialize; use serde::Serialize;
use crate::config::ProxyConfig; use crate::config::ProxyConfig;
use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
};
use super::ApiShared; use super::ApiShared;
@@ -98,6 +101,50 @@ pub(super) struct RuntimeMeQualityCountersData {
pub(super) reconnect_success_total: u64, pub(super) reconnect_success_total: u64,
} }
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownAttemptData {
pub(super) reason: &'static str,
pub(super) mode: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownSuccessData {
pub(super) mode: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownSideEffectData {
pub(super) step: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownDurationBucketData {
pub(super) le_seconds: &'static str,
pub(super) total: u64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownDurationData {
pub(super) mode: &'static str,
pub(super) count: u64,
pub(super) sum_seconds: f64,
pub(super) buckets: Vec<RuntimeMeQualityTeardownDurationBucketData>,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityTeardownData {
pub(super) attempts: Vec<RuntimeMeQualityTeardownAttemptData>,
pub(super) success: Vec<RuntimeMeQualityTeardownSuccessData>,
pub(super) timeout_total: u64,
pub(super) escalation_total: u64,
pub(super) noop_total: u64,
pub(super) cleanup_side_effect_failures: Vec<RuntimeMeQualityTeardownSideEffectData>,
pub(super) duration: Vec<RuntimeMeQualityTeardownDurationData>,
}
#[derive(Serialize)] #[derive(Serialize)]
pub(super) struct RuntimeMeQualityRouteDropData { pub(super) struct RuntimeMeQualityRouteDropData {
pub(super) no_conn_total: u64, pub(super) no_conn_total: u64,
@@ -107,6 +154,25 @@ pub(super) struct RuntimeMeQualityRouteDropData {
pub(super) queue_full_high_total: u64, pub(super) queue_full_high_total: u64,
} }
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityFamilyStateData {
pub(super) family: &'static str,
pub(super) state: &'static str,
pub(super) state_since_epoch_secs: u64,
#[serde(skip_serializing_if = "Option::is_none")]
pub(super) suppressed_until_epoch_secs: Option<u64>,
pub(super) fail_streak: u32,
pub(super) recover_success_streak: u32,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityDrainGateData {
pub(super) route_quorum_ok: bool,
pub(super) redundancy_ok: bool,
pub(super) block_reason: &'static str,
pub(super) updated_at_epoch_secs: u64,
}
#[derive(Serialize)] #[derive(Serialize)]
pub(super) struct RuntimeMeQualityDcRttData { pub(super) struct RuntimeMeQualityDcRttData {
pub(super) dc: i16, pub(super) dc: i16,
@@ -120,7 +186,10 @@ pub(super) struct RuntimeMeQualityDcRttData {
#[derive(Serialize)] #[derive(Serialize)]
pub(super) struct RuntimeMeQualityPayload { pub(super) struct RuntimeMeQualityPayload {
pub(super) counters: RuntimeMeQualityCountersData, pub(super) counters: RuntimeMeQualityCountersData,
pub(super) teardown: RuntimeMeQualityTeardownData,
pub(super) route_drops: RuntimeMeQualityRouteDropData, pub(super) route_drops: RuntimeMeQualityRouteDropData,
pub(super) family_states: Vec<RuntimeMeQualityFamilyStateData>,
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>, pub(super) dc_rtt: Vec<RuntimeMeQualityDcRttData>,
} }
@@ -159,6 +228,7 @@ pub(super) struct RuntimeUpstreamQualitySummaryData {
pub(super) direct_total: usize, pub(super) direct_total: usize,
pub(super) socks4_total: usize, pub(super) socks4_total: usize,
pub(super) socks5_total: usize, pub(super) socks5_total: usize,
pub(super) shadowsocks_total: usize,
} }
#[derive(Serialize)] #[derive(Serialize)]
@@ -361,6 +431,19 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
}; };
let status = pool.api_status_snapshot().await; let status = pool.api_status_snapshot().await;
let family_states = pool
.api_family_state_snapshot()
.into_iter()
.map(|entry| RuntimeMeQualityFamilyStateData {
family: entry.family,
state: entry.state,
state_since_epoch_secs: entry.state_since_epoch_secs,
suppressed_until_epoch_secs: entry.suppressed_until_epoch_secs,
fail_streak: entry.fail_streak,
recover_success_streak: entry.recover_success_streak,
})
.collect();
let drain_gate_snapshot = pool.api_drain_gate_snapshot();
RuntimeMeQualityData { RuntimeMeQualityData {
enabled: true, enabled: true,
reason: None, reason: None,
@@ -374,6 +457,7 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(), reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
reconnect_success_total: shared.stats.get_me_reconnect_success(), reconnect_success_total: shared.stats.get_me_reconnect_success(),
}, },
teardown: build_runtime_me_teardown_data(shared),
route_drops: RuntimeMeQualityRouteDropData { route_drops: RuntimeMeQualityRouteDropData {
no_conn_total: shared.stats.get_me_route_drop_no_conn(), no_conn_total: shared.stats.get_me_route_drop_no_conn(),
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(), channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
@@ -381,6 +465,13 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(), queue_full_base_total: shared.stats.get_me_route_drop_queue_full_base(),
queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(), queue_full_high_total: shared.stats.get_me_route_drop_queue_full_high(),
}, },
family_states,
drain_gate: RuntimeMeQualityDrainGateData {
route_quorum_ok: drain_gate_snapshot.route_quorum_ok,
redundancy_ok: drain_gate_snapshot.redundancy_ok,
block_reason: drain_gate_snapshot.block_reason,
updated_at_epoch_secs: drain_gate_snapshot.updated_at_epoch_secs,
},
dc_rtt: status dc_rtt: status
.dcs .dcs
.into_iter() .into_iter()
@@ -397,6 +488,81 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
} }
} }
fn build_runtime_me_teardown_data(shared: &ApiShared) -> RuntimeMeQualityTeardownData {
let attempts = MeWriterTeardownReason::ALL
.iter()
.copied()
.flat_map(|reason| {
MeWriterTeardownMode::ALL
.iter()
.copied()
.map(move |mode| RuntimeMeQualityTeardownAttemptData {
reason: reason.as_str(),
mode: mode.as_str(),
total: shared.stats.get_me_writer_teardown_attempt_total(reason, mode),
})
})
.collect();
let success = MeWriterTeardownMode::ALL
.iter()
.copied()
.map(|mode| RuntimeMeQualityTeardownSuccessData {
mode: mode.as_str(),
total: shared.stats.get_me_writer_teardown_success_total(mode),
})
.collect();
let cleanup_side_effect_failures = MeWriterCleanupSideEffectStep::ALL
.iter()
.copied()
.map(|step| RuntimeMeQualityTeardownSideEffectData {
step: step.as_str(),
total: shared
.stats
.get_me_writer_cleanup_side_effect_failures_total(step),
})
.collect();
let duration = MeWriterTeardownMode::ALL
.iter()
.copied()
.map(|mode| {
let count = shared.stats.get_me_writer_teardown_duration_count(mode);
let mut buckets: Vec<RuntimeMeQualityTeardownDurationBucketData> = Stats::me_writer_teardown_duration_bucket_labels()
.iter()
.enumerate()
.map(|(bucket_idx, label)| RuntimeMeQualityTeardownDurationBucketData {
le_seconds: label,
total: shared
.stats
.get_me_writer_teardown_duration_bucket_total(mode, bucket_idx),
})
.collect();
buckets.push(RuntimeMeQualityTeardownDurationBucketData {
le_seconds: "+Inf",
total: count,
});
RuntimeMeQualityTeardownDurationData {
mode: mode.as_str(),
count,
sum_seconds: shared.stats.get_me_writer_teardown_duration_sum_seconds(mode),
buckets,
}
})
.collect();
RuntimeMeQualityTeardownData {
attempts,
success,
timeout_total: shared.stats.get_me_writer_teardown_timeout_total(),
escalation_total: shared.stats.get_me_writer_teardown_escalation_total(),
noop_total: shared.stats.get_me_writer_teardown_noop_total(),
cleanup_side_effect_failures,
duration,
}
}
pub(super) async fn build_runtime_upstream_quality_data( pub(super) async fn build_runtime_upstream_quality_data(
shared: &ApiShared, shared: &ApiShared,
) -> RuntimeUpstreamQualityData { ) -> RuntimeUpstreamQualityData {
@@ -406,7 +572,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(), connect_attempt_total: shared.stats.get_upstream_connect_attempt_total(),
connect_success_total: shared.stats.get_upstream_connect_success_total(), connect_success_total: shared.stats.get_upstream_connect_success_total(),
connect_fail_total: shared.stats.get_upstream_connect_fail_total(), connect_fail_total: shared.stats.get_upstream_connect_fail_total(),
connect_failfast_hard_error_total: shared.stats.get_upstream_connect_failfast_hard_error_total(), connect_failfast_hard_error_total: shared
.stats
.get_upstream_connect_failfast_hard_error_total(),
}; };
let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else { let Some(snapshot) = shared.upstream_manager.try_api_snapshot() else {
@@ -446,6 +614,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
direct_total: snapshot.summary.direct_total, direct_total: snapshot.summary.direct_total,
socks4_total: snapshot.summary.socks4_total, socks4_total: snapshot.summary.socks4_total,
socks5_total: snapshot.summary.socks5_total, socks5_total: snapshot.summary.socks5_total,
shadowsocks_total: snapshot.summary.shadowsocks_total,
}), }),
upstreams: Some( upstreams: Some(
snapshot snapshot
@@ -457,6 +626,7 @@ pub(super) async fn build_runtime_upstream_quality_data(
crate::transport::UpstreamRouteKind::Direct => "direct", crate::transport::UpstreamRouteKind::Direct => "direct",
crate::transport::UpstreamRouteKind::Socks4 => "socks4", crate::transport::UpstreamRouteKind::Socks4 => "socks4",
crate::transport::UpstreamRouteKind::Socks5 => "socks5", crate::transport::UpstreamRouteKind::Socks5 => "socks5",
crate::transport::UpstreamRouteKind::Shadowsocks => "shadowsocks",
}, },
address: upstream.address, address: upstream.address,
weight: upstream.weight, weight: upstream.weight,
@@ -476,7 +646,9 @@ pub(super) async fn build_runtime_upstream_quality_data(
crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6", crate::transport::upstream::IpPreference::PreferV6 => "prefer_v6",
crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4", crate::transport::upstream::IpPreference::PreferV4 => "prefer_v4",
crate::transport::upstream::IpPreference::BothWork => "both_work", crate::transport::upstream::IpPreference::BothWork => "both_work",
crate::transport::upstream::IpPreference::Unavailable => "unavailable", crate::transport::upstream::IpPreference::Unavailable => {
"unavailable"
}
}, },
}) })
.collect(), .collect(),
@@ -514,14 +686,18 @@ pub(super) async fn build_runtime_nat_stun_data(shared: &ApiShared) -> RuntimeNa
live_total: snapshot.live_servers.len(), live_total: snapshot.live_servers.len(),
}, },
reflection: RuntimeNatStunReflectionBlockData { reflection: RuntimeNatStunReflectionBlockData {
v4: snapshot.reflection_v4.map(|entry| RuntimeNatStunReflectionData { v4: snapshot
addr: entry.addr.to_string(), .reflection_v4
age_secs: entry.age_secs, .map(|entry| RuntimeNatStunReflectionData {
}), addr: entry.addr.to_string(),
v6: snapshot.reflection_v6.map(|entry| RuntimeNatStunReflectionData { age_secs: entry.age_secs,
addr: entry.addr.to_string(), }),
age_secs: entry.age_secs, v6: snapshot
}), .reflection_v6
.map(|entry| RuntimeNatStunReflectionData {
addr: entry.addr.to_string(),
age_secs: entry.age_secs,
}),
}, },
stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms, stun_backoff_remaining_ms: snapshot.stun_backoff_remaining_ms,
}), }),

View File

@@ -1,5 +1,5 @@
use std::net::IpAddr;
use std::collections::HashMap; use std::collections::HashMap;
use std::net::IpAddr;
use std::sync::{Mutex, OnceLock}; use std::sync::{Mutex, OnceLock};
use std::time::{SystemTime, UNIX_EPOCH}; use std::time::{SystemTime, UNIX_EPOCH};
@@ -7,8 +7,8 @@ use serde::Serialize;
use crate::config::{ProxyConfig, UpstreamType}; use crate::config::{ProxyConfig, UpstreamType};
use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon}; use crate::network::probe::{detect_interface_ipv4, detect_interface_ipv6, is_bogon};
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
use crate::transport::UpstreamRouteKind; use crate::transport::UpstreamRouteKind;
use crate::transport::middle_proxy::{bnd_snapshot, timeskew_snapshot, upstream_bnd_snapshots};
use super::ApiShared; use super::ApiShared;
@@ -262,8 +262,8 @@ fn update_kdf_ewma(now_epoch_secs: u64, total_errors: u64) -> f64 {
let delta_errors = total_errors.saturating_sub(guard.last_total_errors); let delta_errors = total_errors.saturating_sub(guard.last_total_errors);
let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64); let instant_rate_per_min = (delta_errors as f64) * 60.0 / (dt_secs as f64);
let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS); let alpha = 1.0 - f64::exp(-(dt_secs as f64) / KDF_EWMA_TAU_SECS);
guard.ewma_errors_per_min = guard.ewma_errors_per_min guard.ewma_errors_per_min =
+ alpha * (instant_rate_per_min - guard.ewma_errors_per_min); guard.ewma_errors_per_min + alpha * (instant_rate_per_min - guard.ewma_errors_per_min);
guard.last_epoch_secs = now_epoch_secs; guard.last_epoch_secs = now_epoch_secs;
guard.last_total_errors = total_errors; guard.last_total_errors = total_errors;
guard.ewma_errors_per_min guard.ewma_errors_per_min
@@ -284,6 +284,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
UpstreamRouteKind::Direct => "direct", UpstreamRouteKind::Direct => "direct",
UpstreamRouteKind::Socks4 => "socks4", UpstreamRouteKind::Socks4 => "socks4",
UpstreamRouteKind::Socks5 => "socks5", UpstreamRouteKind::Socks5 => "socks5",
UpstreamRouteKind::Shadowsocks => "shadowsocks",
} }
} }

View File

@@ -1,7 +1,7 @@
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use crate::config::ApiConfig; use crate::config::ApiConfig;
use crate::stats::Stats; use crate::stats::{MeWriterTeardownMode, Stats};
use crate::transport::upstream::IpPreference; use crate::transport::upstream::IpPreference;
use crate::transport::UpstreamRouteKind; use crate::transport::UpstreamRouteKind;
@@ -106,6 +106,29 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
refill_failed_total: stats.get_me_refill_failed_total(), refill_failed_total: stats.get_me_refill_failed_total(),
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(), writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(), writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
teardown_attempt_total_normal: stats
.get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::Normal),
teardown_attempt_total_hard_detach: stats
.get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::HardDetach),
teardown_success_total_normal: stats
.get_me_writer_teardown_success_total(MeWriterTeardownMode::Normal),
teardown_success_total_hard_detach: stats
.get_me_writer_teardown_success_total(MeWriterTeardownMode::HardDetach),
teardown_timeout_total: stats.get_me_writer_teardown_timeout_total(),
teardown_escalation_total: stats.get_me_writer_teardown_escalation_total(),
teardown_noop_total: stats.get_me_writer_teardown_noop_total(),
teardown_cleanup_side_effect_failures_total: stats
.get_me_writer_cleanup_side_effect_failures_total_all(),
teardown_duration_count_total: stats
.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal)
.saturating_add(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::HardDetach),
),
teardown_duration_sum_seconds_total: stats
.get_me_writer_teardown_duration_sum_seconds(MeWriterTeardownMode::Normal)
+ stats.get_me_writer_teardown_duration_sum_seconds(
MeWriterTeardownMode::HardDetach,
),
}, },
desync: ZeroDesyncData { desync: ZeroDesyncData {
secure_padding_invalid_total: stats.get_secure_padding_invalid(), secure_padding_invalid_total: stats.get_secure_padding_invalid(),
@@ -138,7 +161,8 @@ fn build_zero_upstream_data(stats: &Stats) -> ZeroUpstreamData {
.get_upstream_connect_duration_success_bucket_501_1000ms(), .get_upstream_connect_duration_success_bucket_501_1000ms(),
connect_duration_success_bucket_gt_1000ms: stats connect_duration_success_bucket_gt_1000ms: stats
.get_upstream_connect_duration_success_bucket_gt_1000ms(), .get_upstream_connect_duration_success_bucket_gt_1000ms(),
connect_duration_fail_bucket_le_100ms: stats.get_upstream_connect_duration_fail_bucket_le_100ms(), connect_duration_fail_bucket_le_100ms: stats
.get_upstream_connect_duration_fail_bucket_le_100ms(),
connect_duration_fail_bucket_101_500ms: stats connect_duration_fail_bucket_101_500ms: stats
.get_upstream_connect_duration_fail_bucket_101_500ms(), .get_upstream_connect_duration_fail_bucket_101_500ms(),
connect_duration_fail_bucket_501_1000ms: stats connect_duration_fail_bucket_501_1000ms: stats
@@ -180,6 +204,7 @@ pub(super) fn build_upstreams_data(shared: &ApiShared, api_cfg: &ApiConfig) -> U
direct_total: snapshot.summary.direct_total, direct_total: snapshot.summary.direct_total,
socks4_total: snapshot.summary.socks4_total, socks4_total: snapshot.summary.socks4_total,
socks5_total: snapshot.summary.socks5_total, socks5_total: snapshot.summary.socks5_total,
shadowsocks_total: snapshot.summary.shadowsocks_total,
}; };
let upstreams = snapshot let upstreams = snapshot
.upstreams .upstreams
@@ -395,8 +420,7 @@ async fn get_minimal_payload_cached(
adaptive_floor_min_writers_multi_endpoint: runtime adaptive_floor_min_writers_multi_endpoint: runtime
.adaptive_floor_min_writers_multi_endpoint, .adaptive_floor_min_writers_multi_endpoint,
adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs, adaptive_floor_recover_grace_secs: runtime.adaptive_floor_recover_grace_secs,
adaptive_floor_writers_per_core_total: runtime adaptive_floor_writers_per_core_total: runtime.adaptive_floor_writers_per_core_total,
.adaptive_floor_writers_per_core_total,
adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override, adaptive_floor_cpu_cores_override: runtime.adaptive_floor_cpu_cores_override,
adaptive_floor_max_extra_writers_single_per_core: runtime adaptive_floor_max_extra_writers_single_per_core: runtime
.adaptive_floor_max_extra_writers_single_per_core, .adaptive_floor_max_extra_writers_single_per_core,
@@ -404,12 +428,9 @@ async fn get_minimal_payload_cached(
.adaptive_floor_max_extra_writers_multi_per_core, .adaptive_floor_max_extra_writers_multi_per_core,
adaptive_floor_max_active_writers_per_core: runtime adaptive_floor_max_active_writers_per_core: runtime
.adaptive_floor_max_active_writers_per_core, .adaptive_floor_max_active_writers_per_core,
adaptive_floor_max_warm_writers_per_core: runtime adaptive_floor_max_warm_writers_per_core: runtime.adaptive_floor_max_warm_writers_per_core,
.adaptive_floor_max_warm_writers_per_core, adaptive_floor_max_active_writers_global: runtime.adaptive_floor_max_active_writers_global,
adaptive_floor_max_active_writers_global: runtime adaptive_floor_max_warm_writers_global: runtime.adaptive_floor_max_warm_writers_global,
.adaptive_floor_max_active_writers_global,
adaptive_floor_max_warm_writers_global: runtime
.adaptive_floor_max_warm_writers_global,
adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected, adaptive_floor_cpu_cores_detected: runtime.adaptive_floor_cpu_cores_detected,
adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective, adaptive_floor_cpu_cores_effective: runtime.adaptive_floor_cpu_cores_effective,
adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw, adaptive_floor_global_cap_raw: runtime.adaptive_floor_global_cap_raw,
@@ -528,6 +549,7 @@ fn map_route_kind(value: UpstreamRouteKind) -> &'static str {
UpstreamRouteKind::Direct => "direct", UpstreamRouteKind::Direct => "direct",
UpstreamRouteKind::Socks4 => "socks4", UpstreamRouteKind::Socks4 => "socks4",
UpstreamRouteKind::Socks5 => "socks5", UpstreamRouteKind::Socks5 => "socks5",
UpstreamRouteKind::Shadowsocks => "shadowsocks",
} }
} }

View File

@@ -199,8 +199,14 @@ update_every = 43200
hardswap = false hardswap = false
me_pool_drain_ttl_secs = 90 me_pool_drain_ttl_secs = 90
me_instadrain = false me_instadrain = false
me_pool_drain_threshold = 32
me_pool_drain_soft_evict_grace_secs = 10
me_pool_drain_soft_evict_per_writer = 2
me_pool_drain_soft_evict_budget_per_core = 16
me_pool_drain_soft_evict_cooldown_ms = 1000
me_bind_stale_mode = "never"
me_pool_min_fresh_ratio = 0.8 me_pool_min_fresh_ratio = 0.8
me_reinit_drain_timeout_secs = 120 me_reinit_drain_timeout_secs = 90
[network] [network]
ipv4 = true ipv4 = true
@@ -262,7 +268,7 @@ fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
format!( format!(
r#"[Unit] r#"[Unit]
Description=Telemt MTProxy Description=Telemt MTProxy
Documentation=https://github.com/nicepkg/telemt Documentation=https://github.com/telemt/telemt
After=network-online.target After=network-online.target
Wants=network-online.target Wants=network-online.target

View File

@@ -40,10 +40,10 @@ const DEFAULT_ME_ROUTE_HYBRID_MAX_WAIT_MS: u64 = 3000;
const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250; const DEFAULT_ME_ROUTE_BLOCKING_SEND_TIMEOUT_MS: u64 = 250;
const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000; const DEFAULT_ME_C2ME_SEND_TIMEOUT_MS: u64 = 4000;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true; const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_ENABLED: bool = true;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 30; const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_GRACE_SECS: u64 = 10;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 1; const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_PER_WRITER: u8 = 2;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 8; const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_BUDGET_PER_CORE: u16 = 16;
const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 5000; const DEFAULT_ME_POOL_DRAIN_SOFT_EVICT_COOLDOWN_MS: u64 = 1000;
const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30; const DEFAULT_USER_MAX_UNIQUE_IPS_WINDOW_SECS: u64 = 30;
const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250; const DEFAULT_ACCEPT_PERMIT_TIMEOUT_MS: u64 = 250;
const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2; const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 2;
@@ -65,6 +65,10 @@ pub(crate) fn default_tls_domain() -> String {
"petrovich.ru".to_string() "petrovich.ru".to_string()
} }
pub(crate) fn default_tls_fetch_scope() -> String {
String::new()
}
pub(crate) fn default_mask_port() -> u16 { pub(crate) fn default_mask_port() -> u16 {
443 443
} }
@@ -606,7 +610,7 @@ pub(crate) fn default_proxy_secret_len_max() -> usize {
} }
pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 {
120 90
} }
pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 { pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 {
@@ -618,7 +622,7 @@ pub(crate) fn default_me_instadrain() -> bool {
} }
pub(crate) fn default_me_pool_drain_threshold() -> u64 { pub(crate) fn default_me_pool_drain_threshold() -> u64 {
128 32
} }
pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool { pub(crate) fn default_me_pool_drain_soft_evict_enabled() -> bool {

View File

@@ -623,6 +623,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
} }
if old.censorship.tls_domain != new.censorship.tls_domain if old.censorship.tls_domain != new.censorship.tls_domain
|| old.censorship.tls_domains != new.censorship.tls_domains || old.censorship.tls_domains != new.censorship.tls_domains
|| old.censorship.tls_fetch_scope != new.censorship.tls_fetch_scope
|| old.censorship.mask != new.censorship.mask || old.censorship.mask != new.censorship.mask
|| old.censorship.mask_host != new.censorship.mask_host || old.censorship.mask_host != new.censorship.mask_host
|| old.censorship.mask_port != new.censorship.mask_port || old.censorship.mask_port != new.censorship.mask_port

View File

@@ -6,8 +6,9 @@ use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use rand::Rng; use rand::Rng;
use serde::{Deserialize, Serialize};
use shadowsocks::config::ServerConfig as ShadowsocksServerConfig;
use tracing::warn; use tracing::warn;
use serde::{Serialize, Deserialize};
use crate::error::{ProxyError, Result}; use crate::error::{ProxyError, Result};
@@ -122,13 +123,37 @@ fn sanitize_ad_tag(ad_tag: &mut Option<String>) {
}; };
if !is_valid_ad_tag(tag) { if !is_valid_ad_tag(tag) {
warn!( warn!("Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled");
"Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled"
);
*ad_tag = None; *ad_tag = None;
} }
} }
fn validate_upstreams(config: &ProxyConfig) -> Result<()> {
let has_enabled_shadowsocks = config.upstreams.iter().any(|upstream| {
upstream.enabled && matches!(upstream.upstream_type, UpstreamType::Shadowsocks { .. })
});
if has_enabled_shadowsocks && config.general.use_middle_proxy {
return Err(ProxyError::Config(
"shadowsocks upstreams require general.use_middle_proxy = false".to_string(),
));
}
for upstream in &config.upstreams {
if let UpstreamType::Shadowsocks { url, .. } = &upstream.upstream_type {
let parsed = ShadowsocksServerConfig::from_url(url)
.map_err(|error| ProxyError::Config(format!("invalid shadowsocks url: {error}")))?;
if parsed.plugin().is_some() {
return Err(ProxyError::Config(
"shadowsocks plugins are not supported".to_string(),
));
}
}
}
Ok(())
}
// ============= Main Config ============= // ============= Main Config =============
#[derive(Debug, Clone, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Serialize, Deserialize, Default)]
@@ -180,7 +205,8 @@ impl ProxyConfig {
pub(crate) fn load_with_metadata<P: AsRef<Path>>(path: P) -> Result<LoadedConfig> { pub(crate) fn load_with_metadata<P: AsRef<Path>>(path: P) -> Result<LoadedConfig> {
let path = path.as_ref(); let path = path.as_ref();
let content = std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?; let content =
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
let base_dir = path.parent().unwrap_or(Path::new(".")); let base_dir = path.parent().unwrap_or(Path::new("."));
let mut source_files = BTreeSet::new(); let mut source_files = BTreeSet::new();
source_files.insert(normalize_config_path(path)); source_files.insert(normalize_config_path(path));
@@ -207,15 +233,17 @@ impl ProxyConfig {
.map(|table| table.contains_key("stun_servers")) .map(|table| table.contains_key("stun_servers"))
.unwrap_or(false); .unwrap_or(false);
let mut config: ProxyConfig = let mut config: ProxyConfig = parsed_toml
parsed_toml.try_into().map_err(|e| ProxyError::Config(e.to_string()))?; .try_into()
.map_err(|e| ProxyError::Config(e.to_string()))?;
if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) { if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) {
config.general.update_every = None; config.general.update_every = None;
} }
let legacy_nat_stun = config.general.middle_proxy_nat_stun.take(); let legacy_nat_stun = config.general.middle_proxy_nat_stun.take();
let legacy_nat_stun_servers = std::mem::take(&mut config.general.middle_proxy_nat_stun_servers); let legacy_nat_stun_servers =
std::mem::take(&mut config.general.middle_proxy_nat_stun_servers);
let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty(); let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty();
if stun_servers_is_explicit { if stun_servers_is_explicit {
let mut explicit_stun_servers = Vec::new(); let mut explicit_stun_servers = Vec::new();
@@ -225,7 +253,9 @@ impl ProxyConfig {
config.network.stun_servers = explicit_stun_servers; config.network.stun_servers = explicit_stun_servers;
if legacy_nat_stun_used { if legacy_nat_stun_used {
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set"); warn!(
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set"
);
} }
} else { } else {
// Keep the default STUN pool unless network.stun_servers is explicitly overridden. // Keep the default STUN pool unless network.stun_servers is explicitly overridden.
@@ -240,7 +270,9 @@ impl ProxyConfig {
config.network.stun_servers = unified_stun_servers; config.network.stun_servers = unified_stun_servers;
if legacy_nat_stun_used { if legacy_nat_stun_used {
warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"); warn!(
"general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"
);
} }
} }
@@ -378,13 +410,15 @@ impl ProxyConfig {
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) { if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
return Err(ProxyError::Config( return Err(ProxyError::Config(
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]".to_string(), "general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
.to_string(),
)); ));
} }
if !(8192..=2 * 1024 * 1024).contains(&config.general.direct_relay_copy_buf_s2c_bytes) { if !(8192..=2 * 1024 * 1024).contains(&config.general.direct_relay_copy_buf_s2c_bytes) {
return Err(ProxyError::Config( return Err(ProxyError::Config(
"general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]".to_string(), "general.direct_relay_copy_buf_s2c_bytes must be within [8192, 2097152]"
.to_string(),
)); ));
} }
@@ -633,7 +667,8 @@ impl ProxyConfig {
if !(1..=100).contains(&config.general.me_route_backpressure_high_watermark_pct) { if !(1..=100).contains(&config.general.me_route_backpressure_high_watermark_pct) {
return Err(ProxyError::Config( return Err(ProxyError::Config(
"general.me_route_backpressure_high_watermark_pct must be within [1, 100]".to_string(), "general.me_route_backpressure_high_watermark_pct must be within [1, 100]"
.to_string(),
)); ));
} }
@@ -779,6 +814,9 @@ impl ProxyConfig {
config.censorship.mask_host = Some(config.censorship.tls_domain.clone()); config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
} }
// Normalize optional TLS fetch scope: whitespace-only values disable scoped routing.
config.censorship.tls_fetch_scope = config.censorship.tls_fetch_scope.trim().to_string();
// Merge primary + extra TLS domains, deduplicate (primary always first). // Merge primary + extra TLS domains, deduplicate (primary always first).
if !config.censorship.tls_domains.is_empty() { if !config.censorship.tls_domains.is_empty() {
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len()); let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
@@ -813,11 +851,15 @@ impl ProxyConfig {
crate::network::dns_overrides::validate_entries(&config.network.dns_overrides)?; crate::network::dns_overrides::validate_entries(&config.network.dns_overrides)?;
if config.general.use_middle_proxy && config.network.ipv6 == Some(true) { if config.general.use_middle_proxy && config.network.ipv6 == Some(true) {
warn!("IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME"); warn!(
"IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME"
);
} }
// Random fake_cert_len only when default is in use. // Random fake_cert_len only when default is in use.
if !config.censorship.tls_emulation && config.censorship.fake_cert_len == default_fake_cert_len() { if !config.censorship.tls_emulation
&& config.censorship.fake_cert_len == default_fake_cert_len()
{
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096); config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
} }
@@ -827,8 +869,7 @@ impl ProxyConfig {
let listen_tcp = config.server.listen_tcp.unwrap_or_else(|| { let listen_tcp = config.server.listen_tcp.unwrap_or_else(|| {
if config.server.listen_unix_sock.is_some() { if config.server.listen_unix_sock.is_some() {
// Unix socket present: TCP only if user explicitly set addresses or listeners. // Unix socket present: TCP only if user explicitly set addresses or listeners.
config.server.listen_addr_ipv4.is_some() config.server.listen_addr_ipv4.is_some() || !config.server.listeners.is_empty()
|| !config.server.listeners.is_empty()
} else { } else {
true true
} }
@@ -836,7 +877,9 @@ impl ProxyConfig {
// Migration: Populate listeners if empty (skip when listen_tcp = false). // Migration: Populate listeners if empty (skip when listen_tcp = false).
if config.server.listeners.is_empty() && listen_tcp { if config.server.listeners.is_empty() && listen_tcp {
let ipv4_str = config.server.listen_addr_ipv4 let ipv4_str = config
.server
.listen_addr_ipv4
.as_deref() .as_deref()
.unwrap_or("0.0.0.0"); .unwrap_or("0.0.0.0");
if let Ok(ipv4) = ipv4_str.parse::<IpAddr>() { if let Ok(ipv4) = ipv4_str.parse::<IpAddr>() {
@@ -878,7 +921,10 @@ impl ProxyConfig {
// Migration: Populate upstreams if empty (Default Direct). // Migration: Populate upstreams if empty (Default Direct).
if config.upstreams.is_empty() { if config.upstreams.is_empty() {
config.upstreams.push(UpstreamConfig { config.upstreams.push(UpstreamConfig {
upstream_type: UpstreamType::Direct { interface: None, bind_addresses: None }, upstream_type: UpstreamType::Direct {
interface: None,
bind_addresses: None,
},
weight: 1, weight: 1,
enabled: true, enabled: true,
scopes: String::new(), scopes: String::new(),
@@ -892,6 +938,8 @@ impl ProxyConfig {
.entry("203".to_string()) .entry("203".to_string())
.or_insert_with(|| vec!["91.105.192.100:443".to_string()]); .or_insert_with(|| vec!["91.105.192.100:443".to_string()]);
validate_upstreams(&config)?;
Ok(LoadedConfig { Ok(LoadedConfig {
config, config,
source_files: source_files.into_iter().collect(), source_files: source_files.into_iter().collect(),
@@ -938,6 +986,9 @@ impl ProxyConfig {
mod tests { mod tests {
use super::*; use super::*;
const TEST_SHADOWSOCKS_URL: &str =
"ss://2022-blake3-aes-256-gcm:MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDE=@127.0.0.1:8388";
#[test] #[test]
fn serde_defaults_remain_unchanged_for_present_sections() { fn serde_defaults_remain_unchanged_for_present_sections() {
let toml = r#" let toml = r#"
@@ -967,10 +1018,7 @@ mod tests {
cfg.general.me_init_retry_attempts, cfg.general.me_init_retry_attempts,
default_me_init_retry_attempts() default_me_init_retry_attempts()
); );
assert_eq!( assert_eq!(cfg.general.me2dc_fallback, default_me2dc_fallback());
cfg.general.me2dc_fallback,
default_me2dc_fallback()
);
assert_eq!( assert_eq!(
cfg.general.proxy_config_v4_cache_path, cfg.general.proxy_config_v4_cache_path,
default_proxy_config_v4_cache_path() default_proxy_config_v4_cache_path()
@@ -1279,11 +1327,12 @@ mod tests {
let path = dir.join("telemt_dc_override_test.toml"); let path = dir.join("telemt_dc_override_test.toml");
std::fs::write(&path, toml).unwrap(); std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap(); let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg assert!(
.dc_overrides cfg.dc_overrides
.get("203") .get("203")
.map(|v| v.contains(&"91.105.192.100:443".to_string())) .map(|v| v.contains(&"91.105.192.100:443".to_string()))
.unwrap_or(false)); .unwrap_or(false)
);
let _ = std::fs::remove_file(path); let _ = std::fs::remove_file(path);
} }
@@ -1470,11 +1519,9 @@ mod tests {
let path = dir.join("telemt_me_adaptive_floor_min_writers_out_of_range_test.toml"); let path = dir.join("telemt_me_adaptive_floor_min_writers_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap(); std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string(); let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!( assert!(err.contains(
err.contains( "general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]"
"general.me_adaptive_floor_min_writers_single_endpoint must be within [1, 32]" ));
)
);
let _ = std::fs::remove_file(path); let _ = std::fs::remove_file(path);
} }
@@ -2037,6 +2084,45 @@ mod tests {
let _ = std::fs::remove_file(path); let _ = std::fs::remove_file(path);
} }
#[test]
fn force_close_default_matches_drain_ttl() {
let toml = r#"
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_force_close_default_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90);
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 90);
let _ = std::fs::remove_file(path);
}
#[test]
fn force_close_zero_uses_runtime_safety_fallback() {
let toml = r#"
[general]
me_reinit_drain_timeout_secs = 0
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_force_close_zero_fallback_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 0);
assert_eq!(cfg.general.effective_me_pool_force_close_secs(), 300);
let _ = std::fs::remove_file(path);
}
#[test] #[test]
fn force_close_bumped_when_below_drain_ttl() { fn force_close_bumped_when_below_drain_ttl() {
let toml = r#" let toml = r#"
@@ -2058,6 +2144,59 @@ mod tests {
let _ = std::fs::remove_file(path); let _ = std::fs::remove_file(path);
} }
#[test]
fn tls_fetch_scope_default_is_empty() {
let toml = r#"
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_default_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg.censorship.tls_fetch_scope.is_empty());
let _ = std::fs::remove_file(path);
}
#[test]
fn tls_fetch_scope_is_trimmed_during_load() {
let toml = r#"
[censorship]
tls_domain = "example.com"
tls_fetch_scope = " me "
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_trim_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert_eq!(cfg.censorship.tls_fetch_scope, "me");
let _ = std::fs::remove_file(path);
}
#[test]
fn tls_fetch_scope_whitespace_becomes_empty() {
let toml = r#"
[censorship]
tls_domain = "example.com"
tls_fetch_scope = " "
[access.users]
user = "00000000000000000000000000000000"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_tls_fetch_scope_blank_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(cfg.censorship.tls_fetch_scope.is_empty());
let _ = std::fs::remove_file(path);
}
#[test] #[test]
fn invalid_ad_tag_is_disabled_during_load() { fn invalid_ad_tag_is_disabled_during_load() {
let toml = r#" let toml = r#"
@@ -2101,6 +2240,124 @@ mod tests {
let _ = std::fs::remove_file(path); let _ = std::fs::remove_file(path);
} }
#[test]
fn shadowsocks_upstream_url_loads_successfully() {
let toml = format!(
r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}"
interface = "127.0.0.2"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_valid_test.toml");
std::fs::write(&path, toml).unwrap();
let cfg = ProxyConfig::load(&path).unwrap();
assert!(matches!(
&cfg.upstreams[0].upstream_type,
UpstreamType::Shadowsocks { url, interface }
if url == TEST_SHADOWSOCKS_URL && interface.as_deref() == Some("127.0.0.2")
));
let _ = std::fs::remove_file(path);
}
#[test]
fn shadowsocks_requires_direct_mode() {
let toml = format!(
r#"
[general]
use_middle_proxy = true
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_me_reject_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("shadowsocks upstreams require general.use_middle_proxy = false"));
let _ = std::fs::remove_file(path);
}
#[test]
fn invalid_shadowsocks_url_is_rejected() {
let toml = r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "not-a-valid-ss-url"
"#;
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_invalid_url_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("invalid shadowsocks url"));
let _ = std::fs::remove_file(path);
}
#[test]
fn shadowsocks_plugins_are_rejected() {
let toml = format!(
r#"
[general]
use_middle_proxy = false
[censorship]
tls_domain = "example.com"
[access.users]
user = "00000000000000000000000000000000"
[[upstreams]]
type = "shadowsocks"
url = "{url}?plugin=obfs-local%3Bobfs%3Dhttp"
"#,
url = TEST_SHADOWSOCKS_URL,
);
let dir = std::env::temp_dir();
let path = dir.join("telemt_shadowsocks_plugin_reject_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
assert!(err.contains("shadowsocks plugins are not supported"));
let _ = std::fs::remove_file(path);
}
#[test] #[test]
fn invalid_user_ad_tag_reports_access_user_ad_tags_key() { fn invalid_user_ad_tag_reports_access_user_ad_tags_key() {
let toml = r#" let toml = r#"

View File

@@ -135,8 +135,8 @@ impl MeSocksKdfPolicy {
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)]
#[serde(rename_all = "lowercase")] #[serde(rename_all = "lowercase")]
pub enum MeBindStaleMode { pub enum MeBindStaleMode {
Never,
#[default] #[default]
Never,
Ttl, Ttl,
Always, Always,
} }
@@ -855,7 +855,7 @@ pub struct GeneralConfig {
pub me_pool_min_fresh_ratio: f32, pub me_pool_min_fresh_ratio: f32,
/// Drain timeout in seconds for stale ME writers after endpoint map changes. /// Drain timeout in seconds for stale ME writers after endpoint map changes.
/// Set to 0 to keep stale writers draining indefinitely (no force-close). /// Set to 0 to use the runtime safety fallback timeout.
#[serde(default = "default_me_reinit_drain_timeout_secs")] #[serde(default = "default_me_reinit_drain_timeout_secs")]
pub me_reinit_drain_timeout_secs: u64, pub me_reinit_drain_timeout_secs: u64,
@@ -955,24 +955,38 @@ impl Default for GeneralConfig {
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(), me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(),
me_single_endpoint_shadow_writers: default_me_single_endpoint_shadow_writers(), me_single_endpoint_shadow_writers: default_me_single_endpoint_shadow_writers(),
me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(), me_single_endpoint_outage_mode_enabled: default_me_single_endpoint_outage_mode_enabled(
me_single_endpoint_outage_disable_quarantine: default_me_single_endpoint_outage_disable_quarantine(), ),
me_single_endpoint_outage_backoff_min_ms: default_me_single_endpoint_outage_backoff_min_ms(), me_single_endpoint_outage_disable_quarantine:
me_single_endpoint_outage_backoff_max_ms: default_me_single_endpoint_outage_backoff_max_ms(), default_me_single_endpoint_outage_disable_quarantine(),
me_single_endpoint_shadow_rotate_every_secs: default_me_single_endpoint_shadow_rotate_every_secs(), me_single_endpoint_outage_backoff_min_ms:
default_me_single_endpoint_outage_backoff_min_ms(),
me_single_endpoint_outage_backoff_max_ms:
default_me_single_endpoint_outage_backoff_max_ms(),
me_single_endpoint_shadow_rotate_every_secs:
default_me_single_endpoint_shadow_rotate_every_secs(),
me_floor_mode: MeFloorMode::default(), me_floor_mode: MeFloorMode::default(),
me_adaptive_floor_idle_secs: default_me_adaptive_floor_idle_secs(), me_adaptive_floor_idle_secs: default_me_adaptive_floor_idle_secs(),
me_adaptive_floor_min_writers_single_endpoint: default_me_adaptive_floor_min_writers_single_endpoint(), me_adaptive_floor_min_writers_single_endpoint:
me_adaptive_floor_min_writers_multi_endpoint: default_me_adaptive_floor_min_writers_multi_endpoint(), default_me_adaptive_floor_min_writers_single_endpoint(),
me_adaptive_floor_min_writers_multi_endpoint:
default_me_adaptive_floor_min_writers_multi_endpoint(),
me_adaptive_floor_recover_grace_secs: default_me_adaptive_floor_recover_grace_secs(), me_adaptive_floor_recover_grace_secs: default_me_adaptive_floor_recover_grace_secs(),
me_adaptive_floor_writers_per_core_total: default_me_adaptive_floor_writers_per_core_total(), me_adaptive_floor_writers_per_core_total:
default_me_adaptive_floor_writers_per_core_total(),
me_adaptive_floor_cpu_cores_override: default_me_adaptive_floor_cpu_cores_override(), me_adaptive_floor_cpu_cores_override: default_me_adaptive_floor_cpu_cores_override(),
me_adaptive_floor_max_extra_writers_single_per_core: default_me_adaptive_floor_max_extra_writers_single_per_core(), me_adaptive_floor_max_extra_writers_single_per_core:
me_adaptive_floor_max_extra_writers_multi_per_core: default_me_adaptive_floor_max_extra_writers_multi_per_core(), default_me_adaptive_floor_max_extra_writers_single_per_core(),
me_adaptive_floor_max_active_writers_per_core: default_me_adaptive_floor_max_active_writers_per_core(), me_adaptive_floor_max_extra_writers_multi_per_core:
me_adaptive_floor_max_warm_writers_per_core: default_me_adaptive_floor_max_warm_writers_per_core(), default_me_adaptive_floor_max_extra_writers_multi_per_core(),
me_adaptive_floor_max_active_writers_global: default_me_adaptive_floor_max_active_writers_global(), me_adaptive_floor_max_active_writers_per_core:
me_adaptive_floor_max_warm_writers_global: default_me_adaptive_floor_max_warm_writers_global(), default_me_adaptive_floor_max_active_writers_per_core(),
me_adaptive_floor_max_warm_writers_per_core:
default_me_adaptive_floor_max_warm_writers_per_core(),
me_adaptive_floor_max_active_writers_global:
default_me_adaptive_floor_max_active_writers_global(),
me_adaptive_floor_max_warm_writers_global:
default_me_adaptive_floor_max_warm_writers_global(),
upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(), upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(),
upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(), upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(),
upstream_connect_budget_ms: default_upstream_connect_budget_ms(), upstream_connect_budget_ms: default_upstream_connect_budget_ms(),
@@ -987,7 +1001,8 @@ impl Default for GeneralConfig {
me_socks_kdf_policy: MeSocksKdfPolicy::Strict, me_socks_kdf_policy: MeSocksKdfPolicy::Strict,
me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(), me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(),
me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(), me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(),
me_route_backpressure_high_watermark_pct: default_me_route_backpressure_high_watermark_pct(), me_route_backpressure_high_watermark_pct:
default_me_route_backpressure_high_watermark_pct(),
me_health_interval_ms_unhealthy: default_me_health_interval_ms_unhealthy(), me_health_interval_ms_unhealthy: default_me_health_interval_ms_unhealthy(),
me_health_interval_ms_healthy: default_me_health_interval_ms_healthy(), me_health_interval_ms_healthy: default_me_health_interval_ms_healthy(),
me_admission_poll_ms: default_me_admission_poll_ms(), me_admission_poll_ms: default_me_admission_poll_ms(),
@@ -1013,7 +1028,8 @@ impl Default for GeneralConfig {
me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(), me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(),
me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(), me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(),
me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(), me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(),
me_hardswap_warmup_pass_backoff_base_ms: default_me_hardswap_warmup_pass_backoff_base_ms(), me_hardswap_warmup_pass_backoff_base_ms:
default_me_hardswap_warmup_pass_backoff_base_ms(),
me_config_stable_snapshots: default_me_config_stable_snapshots(), me_config_stable_snapshots: default_me_config_stable_snapshots(),
me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(), me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(),
me_snapshot_require_http_2xx: default_me_snapshot_require_http_2xx(), me_snapshot_require_http_2xx: default_me_snapshot_require_http_2xx(),
@@ -1057,8 +1073,10 @@ impl GeneralConfig {
/// Resolve the active updater interval for ME infrastructure refresh tasks. /// Resolve the active updater interval for ME infrastructure refresh tasks.
/// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used. /// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used.
pub fn effective_update_every_secs(&self) -> u64 { pub fn effective_update_every_secs(&self) -> u64 {
self.update_every self.update_every.unwrap_or_else(|| {
.unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs)) self.proxy_secret_auto_reload_secs
.min(self.proxy_config_auto_reload_secs)
})
} }
/// Resolve periodic zero-downtime reinit interval for ME writers. /// Resolve periodic zero-downtime reinit interval for ME writers.
@@ -1068,8 +1086,13 @@ impl GeneralConfig {
/// Resolve force-close timeout for stale writers. /// Resolve force-close timeout for stale writers.
/// `me_reinit_drain_timeout_secs` remains backward-compatible alias. /// `me_reinit_drain_timeout_secs` remains backward-compatible alias.
/// A configured `0` uses the runtime safety fallback (300s).
pub fn effective_me_pool_force_close_secs(&self) -> u64 { pub fn effective_me_pool_force_close_secs(&self) -> u64 {
self.me_reinit_drain_timeout_secs if self.me_reinit_drain_timeout_secs == 0 {
300
} else {
self.me_reinit_drain_timeout_secs
}
} }
} }
@@ -1303,6 +1326,11 @@ pub struct AntiCensorshipConfig {
#[serde(default)] #[serde(default)]
pub tls_domains: Vec<String>, pub tls_domains: Vec<String>,
/// Upstream scope used for TLS front metadata fetches.
/// Empty value keeps default upstream routing behavior.
#[serde(default = "default_tls_fetch_scope")]
pub tls_fetch_scope: String,
#[serde(default = "default_true")] #[serde(default = "default_true")]
pub mask: bool, pub mask: bool,
@@ -1360,6 +1388,7 @@ impl Default for AntiCensorshipConfig {
Self { Self {
tls_domain: default_tls_domain(), tls_domain: default_tls_domain(),
tls_domains: Vec::new(), tls_domains: Vec::new(),
tls_fetch_scope: default_tls_fetch_scope(),
mask: default_true(), mask: default_true(),
mask_host: None, mask_host: None,
mask_port: default_mask_port(), mask_port: default_mask_port(),
@@ -1465,6 +1494,11 @@ pub enum UpstreamType {
#[serde(default)] #[serde(default)]
password: Option<String>, password: Option<String>,
}, },
Shadowsocks {
url: String,
#[serde(default)]
interface: Option<String>,
},
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@@ -1545,7 +1579,10 @@ impl ShowLink {
} }
impl Serialize for ShowLink { impl Serialize for ShowLink {
fn serialize<S: serde::Serializer>(&self, serializer: S) -> std::result::Result<S::Ok, S::Error> { fn serialize<S: serde::Serializer>(
&self,
serializer: S,
) -> std::result::Result<S::Ok, S::Error> {
match self { match self {
ShowLink::None => Vec::<String>::new().serialize(serializer), ShowLink::None => Vec::<String>::new().serialize(serializer),
ShowLink::All => serializer.serialize_str("*"), ShowLink::All => serializer.serialize_str("*"),
@@ -1555,7 +1592,9 @@ impl Serialize for ShowLink {
} }
impl<'de> Deserialize<'de> for ShowLink { impl<'de> Deserialize<'de> for ShowLink {
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> std::result::Result<Self, D::Error> { fn deserialize<D: serde::Deserializer<'de>>(
deserializer: D,
) -> std::result::Result<Self, D::Error> {
use serde::de; use serde::de;
struct ShowLinkVisitor; struct ShowLinkVisitor;
@@ -1571,14 +1610,14 @@ impl<'de> Deserialize<'de> for ShowLink {
if v == "*" { if v == "*" {
Ok(ShowLink::All) Ok(ShowLink::All)
} else { } else {
Err(de::Error::invalid_value( Err(de::Error::invalid_value(de::Unexpected::Str(v), &r#""*""#))
de::Unexpected::Str(v),
&r#""*""#,
))
} }
} }
fn visit_seq<A: de::SeqAccess<'de>>(self, mut seq: A) -> std::result::Result<ShowLink, A::Error> { fn visit_seq<A: de::SeqAccess<'de>>(
self,
mut seq: A,
) -> std::result::Result<ShowLink, A::Error> {
let mut names = Vec::new(); let mut names = Vec::new();
while let Some(name) = seq.next_element::<String>()? { while let Some(name) = seq.next_element::<String>()? {
names.push(name); names.push(name);

View File

@@ -332,25 +332,76 @@ pub(crate) async fn initialize_me_pool(
"Middle-End pool initialized successfully" "Middle-End pool initialized successfully"
); );
let pool_health = pool_bg.clone(); // ── Supervised background tasks ──────────────────
let rng_health = rng_bg.clone(); // Each task runs inside a nested tokio::spawn so
let min_conns = pool_size; // that a panic is caught via JoinHandle and the
tokio::spawn(async move { // outer loop restarts the task automatically.
crate::transport::middle_proxy::me_health_monitor( let pool_health = pool_bg.clone();
pool_health, let rng_health = rng_bg.clone();
rng_health, let min_conns = pool_size;
min_conns, tokio::spawn(async move {
) loop {
.await; let p = pool_health.clone();
}); let r = rng_health.clone();
let pool_drain_enforcer = pool_bg.clone(); let res = tokio::spawn(async move {
tokio::spawn(async move { crate::transport::middle_proxy::me_health_monitor(
crate::transport::middle_proxy::me_drain_timeout_enforcer( p, r, min_conns,
pool_drain_enforcer, )
) .await;
.await; })
}); .await;
break; match res {
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_drain_enforcer = pool_bg.clone();
tokio::spawn(async move {
loop {
let p = pool_drain_enforcer.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
})
.await;
match res {
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_watchdog = pool_bg.clone();
tokio::spawn(async move {
loop {
let p = pool_watchdog.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
})
.await;
match res {
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
// CRITICAL: keep the current-thread runtime
// alive. Without this, block_on() returns,
// the Runtime is dropped, and ALL spawned
// background tasks (health monitor, drain
// enforcer, zombie watchdog) are silently
// cancelled — causing the draining-writer
// leak that brought us here.
std::future::pending::<()>().await;
unreachable!();
} }
Err(e) => { Err(e) => {
startup_tracker_bg.set_me_last_error(Some(e.to_string())).await; startup_tracker_bg.set_me_last_error(Some(e.to_string())).await;
@@ -408,22 +459,64 @@ pub(crate) async fn initialize_me_pool(
"Middle-End pool initialized successfully" "Middle-End pool initialized successfully"
); );
let pool_clone = pool.clone(); // ── Supervised background tasks ──────────────────
let rng_clone = rng.clone(); let pool_clone = pool.clone();
let min_conns = pool_size; let rng_clone = rng.clone();
tokio::spawn(async move { let min_conns = pool_size;
crate::transport::middle_proxy::me_health_monitor( tokio::spawn(async move {
pool_clone, rng_clone, min_conns, loop {
) let p = pool_clone.clone();
.await; let r = rng_clone.clone();
}); let res = tokio::spawn(async move {
let pool_drain_enforcer = pool.clone(); crate::transport::middle_proxy::me_health_monitor(
tokio::spawn(async move { p, r, min_conns,
crate::transport::middle_proxy::me_drain_timeout_enforcer( )
pool_drain_enforcer, .await;
) })
.await; .await;
}); match res {
Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_health_monitor panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_drain_enforcer = pool.clone();
tokio::spawn(async move {
loop {
let p = pool_drain_enforcer.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
})
.await;
match res {
Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
let pool_watchdog = pool.clone();
tokio::spawn(async move {
loop {
let p = pool_watchdog.clone();
let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
})
.await;
match res {
Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
Err(e) => {
error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
});
break Some(pool); break Some(pool);
} }

View File

@@ -38,12 +38,15 @@ pub(crate) async fn bootstrap_tls_front(
.clone() .clone()
.unwrap_or_else(|| config.censorship.tls_domain.clone()); .unwrap_or_else(|| config.censorship.tls_domain.clone());
let mask_unix_sock = config.censorship.mask_unix_sock.clone(); let mask_unix_sock = config.censorship.mask_unix_sock.clone();
let tls_fetch_scope = (!config.censorship.tls_fetch_scope.is_empty())
.then(|| config.censorship.tls_fetch_scope.clone());
let fetch_timeout = Duration::from_secs(5); let fetch_timeout = Duration::from_secs(5);
let cache_initial = cache.clone(); let cache_initial = cache.clone();
let domains_initial = tls_domains.to_vec(); let domains_initial = tls_domains.to_vec();
let host_initial = mask_host.clone(); let host_initial = mask_host.clone();
let unix_sock_initial = mask_unix_sock.clone(); let unix_sock_initial = mask_unix_sock.clone();
let scope_initial = tls_fetch_scope.clone();
let upstream_initial = upstream_manager.clone(); let upstream_initial = upstream_manager.clone();
tokio::spawn(async move { tokio::spawn(async move {
let mut join = tokio::task::JoinSet::new(); let mut join = tokio::task::JoinSet::new();
@@ -51,6 +54,7 @@ pub(crate) async fn bootstrap_tls_front(
let cache_domain = cache_initial.clone(); let cache_domain = cache_initial.clone();
let host_domain = host_initial.clone(); let host_domain = host_initial.clone();
let unix_sock_domain = unix_sock_initial.clone(); let unix_sock_domain = unix_sock_initial.clone();
let scope_domain = scope_initial.clone();
let upstream_domain = upstream_initial.clone(); let upstream_domain = upstream_initial.clone();
join.spawn(async move { join.spawn(async move {
match crate::tls_front::fetcher::fetch_real_tls( match crate::tls_front::fetcher::fetch_real_tls(
@@ -59,6 +63,7 @@ pub(crate) async fn bootstrap_tls_front(
&domain, &domain,
fetch_timeout, fetch_timeout,
Some(upstream_domain), Some(upstream_domain),
scope_domain.as_deref(),
proxy_protocol, proxy_protocol,
unix_sock_domain.as_deref(), unix_sock_domain.as_deref(),
) )
@@ -100,6 +105,7 @@ pub(crate) async fn bootstrap_tls_front(
let domains_refresh = tls_domains.to_vec(); let domains_refresh = tls_domains.to_vec();
let host_refresh = mask_host.clone(); let host_refresh = mask_host.clone();
let unix_sock_refresh = mask_unix_sock.clone(); let unix_sock_refresh = mask_unix_sock.clone();
let scope_refresh = tls_fetch_scope.clone();
let upstream_refresh = upstream_manager.clone(); let upstream_refresh = upstream_manager.clone();
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
@@ -112,6 +118,7 @@ pub(crate) async fn bootstrap_tls_front(
let cache_domain = cache_refresh.clone(); let cache_domain = cache_refresh.clone();
let host_domain = host_refresh.clone(); let host_domain = host_refresh.clone();
let unix_sock_domain = unix_sock_refresh.clone(); let unix_sock_domain = unix_sock_refresh.clone();
let scope_domain = scope_refresh.clone();
let upstream_domain = upstream_refresh.clone(); let upstream_domain = upstream_refresh.clone();
join.spawn(async move { join.spawn(async move {
match crate::tls_front::fetcher::fetch_real_tls( match crate::tls_front::fetcher::fetch_real_tls(
@@ -120,6 +127,7 @@ pub(crate) async fn bootstrap_tls_front(
&domain, &domain,
fetch_timeout, fetch_timeout,
Some(upstream_domain), Some(upstream_domain),
scope_domain.as_deref(),
proxy_protocol, proxy_protocol,
unix_sock_domain.as_deref(), unix_sock_domain.as_deref(),
) )

View File

@@ -16,7 +16,9 @@ use tracing::{info, warn, debug};
use crate::config::ProxyConfig; use crate::config::ProxyConfig;
use crate::ip_tracker::UserIpTracker; use crate::ip_tracker::UserIpTracker;
use crate::stats::beobachten::BeobachtenStore; use crate::stats::beobachten::BeobachtenStore;
use crate::stats::Stats; use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
};
use crate::transport::{ListenOptions, create_listener}; use crate::transport::{ListenOptions, create_listener};
pub async fn serve( pub async fn serve(
@@ -1770,6 +1772,169 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
} }
); );
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_attempt_total ME writer teardown attempts by reason and mode"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_attempt_total counter");
for reason in MeWriterTeardownReason::ALL {
for mode in MeWriterTeardownMode::ALL {
let _ = writeln!(
out,
"telemt_me_writer_teardown_attempt_total{{reason=\"{}\",mode=\"{}\"}} {}",
reason.as_str(),
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_attempt_total(reason, mode)
} else {
0
}
);
}
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_success_total ME writer teardown successes by mode"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_success_total counter");
for mode in MeWriterTeardownMode::ALL {
let _ = writeln!(
out,
"telemt_me_writer_teardown_success_total{{mode=\"{}\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_success_total(mode)
} else {
0
}
);
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_timeout_total Teardown operations that timed out"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_timeout_total counter");
let _ = writeln!(
out,
"telemt_me_writer_teardown_timeout_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_timeout_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_escalation_total Watchdog teardown escalations to hard detach"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_teardown_escalation_total counter"
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_escalation_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_escalation_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_noop_total Teardown operations that became no-op"
);
let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_noop_total counter");
let _ = writeln!(
out,
"telemt_me_writer_teardown_noop_total {}",
if me_allows_normal {
stats.get_me_writer_teardown_noop_total()
} else {
0
}
);
let _ = writeln!(
out,
"# HELP telemt_me_writer_teardown_duration_seconds ME writer teardown latency histogram by mode"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_teardown_duration_seconds histogram"
);
let bucket_labels = Stats::me_writer_teardown_duration_bucket_labels();
for mode in MeWriterTeardownMode::ALL {
for (bucket_idx, label) in bucket_labels.iter().enumerate() {
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"{}\"}} {}",
mode.as_str(),
label,
if me_allows_normal {
stats.get_me_writer_teardown_duration_bucket_total(mode, bucket_idx)
} else {
0
}
);
}
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"+Inf\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_count(mode)
} else {
0
}
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_sum{{mode=\"{}\"}} {:.6}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_sum_seconds(mode)
} else {
0.0
}
);
let _ = writeln!(
out,
"telemt_me_writer_teardown_duration_seconds_count{{mode=\"{}\"}} {}",
mode.as_str(),
if me_allows_normal {
stats.get_me_writer_teardown_duration_count(mode)
} else {
0
}
);
}
let _ = writeln!(
out,
"# HELP telemt_me_writer_cleanup_side_effect_failures_total Failed cleanup side effects by step"
);
let _ = writeln!(
out,
"# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
);
for step in MeWriterCleanupSideEffectStep::ALL {
let _ = writeln!(
out,
"telemt_me_writer_cleanup_side_effect_failures_total{{step=\"{}\"}} {}",
step.as_str(),
if me_allows_normal {
stats.get_me_writer_cleanup_side_effect_failures_total(step)
} else {
0
}
);
}
let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started"); let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started");
let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter"); let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter");
let _ = writeln!( let _ = writeln!(
@@ -2175,6 +2340,17 @@ mod tests {
assert!(output.contains("# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter")); assert!(output.contains("# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"));
assert!(output.contains("# TYPE telemt_me_idle_close_by_peer_total counter")); assert!(output.contains("# TYPE telemt_me_idle_close_by_peer_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter")); assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_attempt_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_success_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_timeout_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_escalation_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_teardown_noop_total counter"));
assert!(output.contains(
"# TYPE telemt_me_writer_teardown_duration_seconds histogram"
));
assert!(output.contains(
"# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
));
assert!(output.contains("# TYPE telemt_me_writer_close_signal_drop_total counter")); assert!(output.contains("# TYPE telemt_me_writer_close_signal_drop_total counter"));
assert!(output.contains( assert!(output.contains(
"# TYPE telemt_me_writer_close_signal_channel_full_total counter" "# TYPE telemt_me_writer_close_signal_channel_full_total counter"

View File

@@ -3,8 +3,7 @@ use std::io::Write;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::Arc; use std::sync::Arc;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt, ReadHalf, WriteHalf, split};
use tokio::net::TcpStream;
use tokio::sync::watch; use tokio::sync::watch;
use tracing::{debug, info, warn}; use tracing::{debug, info, warn};
@@ -15,7 +14,7 @@ use crate::protocol::constants::*;
use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce}; use crate::proxy::handshake::{HandshakeSuccess, encrypt_tg_nonce_with_ciphers, generate_tg_nonce};
use crate::proxy::relay::relay_bidirectional; use crate::proxy::relay::relay_bidirectional;
use crate::proxy::route_mode::{ use crate::proxy::route_mode::{
RelayRouteMode, RouteCutoverState, ROUTE_SWITCH_ERROR_MSG, affected_cutover_state, ROUTE_SWITCH_ERROR_MSG, RelayRouteMode, RouteCutoverState, affected_cutover_state,
cutover_stagger_delay, cutover_stagger_delay,
}; };
use crate::proxy::adaptive_buffers; use crate::proxy::adaptive_buffers;
@@ -56,7 +55,11 @@ where
); );
let tg_stream = upstream_manager let tg_stream = upstream_manager
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty())) .connect(
dc_addr,
Some(success.dc_idx),
user.strip_prefix("scope_").filter(|s| !s.is_empty()),
)
.await?; .await?;
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake"); debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
@@ -93,11 +96,9 @@ where
); );
tokio::pin!(relay_result); tokio::pin!(relay_result);
let relay_result = loop { let relay_result = loop {
if let Some(cutover) = affected_cutover_state( if let Some(cutover) =
&route_rx, affected_cutover_state(&route_rx, RelayRouteMode::Direct, route_snapshot.generation)
RelayRouteMode::Direct, {
route_snapshot.generation,
) {
let delay = cutover_stagger_delay(session_id, cutover.generation); let delay = cutover_stagger_delay(session_id, cutover.generation);
warn!( warn!(
user = %user, user = %user,
@@ -148,7 +149,9 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
for addr_str in addrs { for addr_str in addrs {
match addr_str.parse::<SocketAddr>() { match addr_str.parse::<SocketAddr>() {
Ok(addr) => parsed.push(addr), Ok(addr) => parsed.push(addr),
Err(_) => warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring"), Err(_) => {
warn!(dc_idx = dc_idx, addr_str = %addr_str, "Invalid DC override address in config, ignoring")
}
} }
} }
@@ -170,7 +173,10 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
// Unknown DC requested by client without override: log and fall back. // Unknown DC requested by client without override: log and fall back.
if !config.dc_overrides.contains_key(&dc_key) { if !config.dc_overrides.contains_key(&dc_key) {
warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster"); warn!(
dc_idx = dc_idx,
"Requested non-standard DC with no override; falling back to default cluster"
);
if config.general.unknown_dc_file_log_enabled if config.general.unknown_dc_file_log_enabled
&& let Some(path) = &config.general.unknown_dc_log_path && let Some(path) = &config.general.unknown_dc_log_path
&& let Ok(handle) = tokio::runtime::Handle::try_current() && let Ok(handle) = tokio::runtime::Handle::try_current()
@@ -204,15 +210,15 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result<SocketAddr> {
)) ))
} }
async fn do_tg_handshake_static( async fn do_tg_handshake_static<S>(
mut stream: TcpStream, mut stream: S,
success: &HandshakeSuccess, success: &HandshakeSuccess,
config: &ProxyConfig, config: &ProxyConfig,
rng: &SecureRandom, rng: &SecureRandom,
) -> Result<( ) -> Result<(CryptoReader<ReadHalf<S>>, CryptoWriter<WriteHalf<S>>)>
CryptoReader<tokio::net::tcp::OwnedReadHalf>, where
CryptoWriter<tokio::net::tcp::OwnedWriteHalf>, S: AsyncRead + AsyncWrite + Unpin,
)> { {
let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) = generate_tg_nonce( let (nonce, _tg_enc_key, _tg_enc_iv, _tg_dec_key, _tg_dec_iv) = generate_tg_nonce(
success.proto_tag, success.proto_tag,
success.dc_idx, success.dc_idx,
@@ -235,7 +241,7 @@ async fn do_tg_handshake_static(
stream.write_all(&encrypted_nonce).await?; stream.write_all(&encrypted_nonce).await?;
stream.flush().await?; stream.flush().await?;
let (read_half, write_half) = stream.into_split(); let (read_half, write_half) = split(stream);
let max_pending = config.general.crypto_pending_buffer; let max_pending = config.general.crypto_pending_buffer;
Ok(( Ok((

View File

@@ -19,6 +19,137 @@ use tracing::debug;
use crate::config::{MeTelemetryLevel, MeWriterPickMode}; use crate::config::{MeTelemetryLevel, MeWriterPickMode};
use self::telemetry::TelemetryPolicy; use self::telemetry::TelemetryPolicy;
const ME_WRITER_TEARDOWN_MODE_COUNT: usize = 2;
const ME_WRITER_TEARDOWN_REASON_COUNT: usize = 11;
const ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT: usize = 2;
const ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT: usize = 12;
const ME_WRITER_TEARDOWN_DURATION_BUCKET_BOUNDS_MICROS: [u64; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] = [
1_000,
5_000,
10_000,
25_000,
50_000,
100_000,
250_000,
500_000,
1_000_000,
2_500_000,
5_000_000,
10_000_000,
];
const ME_WRITER_TEARDOWN_DURATION_BUCKET_LABELS: [&str; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] = [
"0.001",
"0.005",
"0.01",
"0.025",
"0.05",
"0.1",
"0.25",
"0.5",
"1",
"2.5",
"5",
"10",
];
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterTeardownMode {
Normal = 0,
HardDetach = 1,
}
impl MeWriterTeardownMode {
pub const ALL: [Self; ME_WRITER_TEARDOWN_MODE_COUNT] =
[Self::Normal, Self::HardDetach];
pub const fn as_str(self) -> &'static str {
match self {
Self::Normal => "normal",
Self::HardDetach => "hard_detach",
}
}
const fn idx(self) -> usize {
self as usize
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterTeardownReason {
ReaderExit = 0,
WriterTaskExit = 1,
PingSendFail = 2,
SignalSendFail = 3,
RouteChannelClosed = 4,
CloseRpcChannelClosed = 5,
PruneClosedWriter = 6,
ReapTimeoutExpired = 7,
ReapThresholdForce = 8,
ReapEmpty = 9,
WatchdogStuckDraining = 10,
}
impl MeWriterTeardownReason {
pub const ALL: [Self; ME_WRITER_TEARDOWN_REASON_COUNT] = [
Self::ReaderExit,
Self::WriterTaskExit,
Self::PingSendFail,
Self::SignalSendFail,
Self::RouteChannelClosed,
Self::CloseRpcChannelClosed,
Self::PruneClosedWriter,
Self::ReapTimeoutExpired,
Self::ReapThresholdForce,
Self::ReapEmpty,
Self::WatchdogStuckDraining,
];
pub const fn as_str(self) -> &'static str {
match self {
Self::ReaderExit => "reader_exit",
Self::WriterTaskExit => "writer_task_exit",
Self::PingSendFail => "ping_send_fail",
Self::SignalSendFail => "signal_send_fail",
Self::RouteChannelClosed => "route_channel_closed",
Self::CloseRpcChannelClosed => "close_rpc_channel_closed",
Self::PruneClosedWriter => "prune_closed_writer",
Self::ReapTimeoutExpired => "reap_timeout_expired",
Self::ReapThresholdForce => "reap_threshold_force",
Self::ReapEmpty => "reap_empty",
Self::WatchdogStuckDraining => "watchdog_stuck_draining",
}
}
const fn idx(self) -> usize {
self as usize
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
#[repr(u8)]
pub enum MeWriterCleanupSideEffectStep {
CloseSignalChannelFull = 0,
CloseSignalChannelClosed = 1,
}
impl MeWriterCleanupSideEffectStep {
pub const ALL: [Self; ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT] =
[Self::CloseSignalChannelFull, Self::CloseSignalChannelClosed];
pub const fn as_str(self) -> &'static str {
match self {
Self::CloseSignalChannelFull => "close_signal_channel_full",
Self::CloseSignalChannelClosed => "close_signal_channel_closed",
}
}
const fn idx(self) -> usize {
self as usize
}
}
// ============= Stats ============= // ============= Stats =============
#[derive(Default)] #[derive(Default)]
@@ -128,6 +259,18 @@ pub struct Stats {
me_draining_writers_reap_progress_total: AtomicU64, me_draining_writers_reap_progress_total: AtomicU64,
me_writer_removed_total: AtomicU64, me_writer_removed_total: AtomicU64,
me_writer_removed_unexpected_total: AtomicU64, me_writer_removed_unexpected_total: AtomicU64,
me_writer_teardown_attempt_total:
[[AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT]; ME_WRITER_TEARDOWN_REASON_COUNT],
me_writer_teardown_success_total: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_timeout_total: AtomicU64,
me_writer_teardown_escalation_total: AtomicU64,
me_writer_teardown_noop_total: AtomicU64,
me_writer_cleanup_side_effect_failures_total:
[AtomicU64; ME_WRITER_CLEANUP_SIDE_EFFECT_STEP_COUNT],
me_writer_teardown_duration_bucket_hits:
[[AtomicU64; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT + 1]; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_duration_sum_micros: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_writer_teardown_duration_count: [AtomicU64; ME_WRITER_TEARDOWN_MODE_COUNT],
me_refill_triggered_total: AtomicU64, me_refill_triggered_total: AtomicU64,
me_refill_skipped_inflight_total: AtomicU64, me_refill_skipped_inflight_total: AtomicU64,
me_refill_failed_total: AtomicU64, me_refill_failed_total: AtomicU64,
@@ -765,6 +908,74 @@ impl Stats {
self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed); self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed);
} }
} }
pub fn increment_me_writer_teardown_attempt_total(
&self,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_attempt_total[reason.idx()][mode.idx()]
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_success_total(&self, mode: MeWriterTeardownMode) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_success_total[mode.idx()].fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_timeout_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_timeout_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_escalation_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_escalation_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_teardown_noop_total(&self) {
if self.telemetry_me_allows_normal() {
self.me_writer_teardown_noop_total
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn increment_me_writer_cleanup_side_effect_failures_total(
&self,
step: MeWriterCleanupSideEffectStep,
) {
if self.telemetry_me_allows_normal() {
self.me_writer_cleanup_side_effect_failures_total[step.idx()]
.fetch_add(1, Ordering::Relaxed);
}
}
pub fn observe_me_writer_teardown_duration(
&self,
mode: MeWriterTeardownMode,
duration: Duration,
) {
if !self.telemetry_me_allows_normal() {
return;
}
let duration_micros = duration.as_micros().min(u64::MAX as u128) as u64;
let mut bucket_idx = ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT;
for (idx, upper_bound_micros) in ME_WRITER_TEARDOWN_DURATION_BUCKET_BOUNDS_MICROS
.iter()
.copied()
.enumerate()
{
if duration_micros <= upper_bound_micros {
bucket_idx = idx;
break;
}
}
self.me_writer_teardown_duration_bucket_hits[mode.idx()][bucket_idx]
.fetch_add(1, Ordering::Relaxed);
self.me_writer_teardown_duration_sum_micros[mode.idx()]
.fetch_add(duration_micros, Ordering::Relaxed);
self.me_writer_teardown_duration_count[mode.idx()].fetch_add(1, Ordering::Relaxed);
}
pub fn increment_me_refill_triggered_total(&self) { pub fn increment_me_refill_triggered_total(&self) {
if self.telemetry_me_allows_debug() { if self.telemetry_me_allows_debug() {
self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed); self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed);
@@ -1297,6 +1508,79 @@ impl Stats {
pub fn get_me_writer_removed_unexpected_total(&self) -> u64 { pub fn get_me_writer_removed_unexpected_total(&self) -> u64 {
self.me_writer_removed_unexpected_total.load(Ordering::Relaxed) self.me_writer_removed_unexpected_total.load(Ordering::Relaxed)
} }
pub fn get_me_writer_teardown_attempt_total(
&self,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
) -> u64 {
self.me_writer_teardown_attempt_total[reason.idx()][mode.idx()]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_attempt_total_by_mode(&self, mode: MeWriterTeardownMode) -> u64 {
MeWriterTeardownReason::ALL
.iter()
.copied()
.map(|reason| self.get_me_writer_teardown_attempt_total(reason, mode))
.sum()
}
pub fn get_me_writer_teardown_success_total(&self, mode: MeWriterTeardownMode) -> u64 {
self.me_writer_teardown_success_total[mode.idx()].load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_timeout_total(&self) -> u64 {
self.me_writer_teardown_timeout_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_escalation_total(&self) -> u64 {
self.me_writer_teardown_escalation_total
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_noop_total(&self) -> u64 {
self.me_writer_teardown_noop_total.load(Ordering::Relaxed)
}
pub fn get_me_writer_cleanup_side_effect_failures_total(
&self,
step: MeWriterCleanupSideEffectStep,
) -> u64 {
self.me_writer_cleanup_side_effect_failures_total[step.idx()]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_cleanup_side_effect_failures_total_all(&self) -> u64 {
MeWriterCleanupSideEffectStep::ALL
.iter()
.copied()
.map(|step| self.get_me_writer_cleanup_side_effect_failures_total(step))
.sum()
}
pub fn me_writer_teardown_duration_bucket_labels(
) -> &'static [&'static str; ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT] {
&ME_WRITER_TEARDOWN_DURATION_BUCKET_LABELS
}
pub fn get_me_writer_teardown_duration_bucket_hits(
&self,
mode: MeWriterTeardownMode,
bucket_idx: usize,
) -> u64 {
self.me_writer_teardown_duration_bucket_hits[mode.idx()][bucket_idx]
.load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_duration_bucket_total(
&self,
mode: MeWriterTeardownMode,
bucket_idx: usize,
) -> u64 {
let capped_idx = bucket_idx.min(ME_WRITER_TEARDOWN_DURATION_BUCKET_COUNT);
let mut total = 0u64;
for idx in 0..=capped_idx {
total = total.saturating_add(self.get_me_writer_teardown_duration_bucket_hits(mode, idx));
}
total
}
pub fn get_me_writer_teardown_duration_count(&self, mode: MeWriterTeardownMode) -> u64 {
self.me_writer_teardown_duration_count[mode.idx()].load(Ordering::Relaxed)
}
pub fn get_me_writer_teardown_duration_sum_seconds(&self, mode: MeWriterTeardownMode) -> f64 {
self.me_writer_teardown_duration_sum_micros[mode.idx()].load(Ordering::Relaxed) as f64
/ 1_000_000.0
}
pub fn get_me_refill_triggered_total(&self) -> u64 { pub fn get_me_refill_triggered_total(&self) -> u64 {
self.me_refill_triggered_total.load(Ordering::Relaxed) self.me_refill_triggered_total.load(Ordering::Relaxed)
} }
@@ -1801,6 +2085,79 @@ mod tests {
assert_eq!(stats.get_me_route_drop_queue_full(), 0); assert_eq!(stats.get_me_route_drop_queue_full(), 0);
} }
#[test]
fn test_teardown_counters_and_duration() {
let stats = Stats::new();
stats.increment_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal,
);
stats.increment_me_writer_teardown_success_total(MeWriterTeardownMode::Normal);
stats.observe_me_writer_teardown_duration(
MeWriterTeardownMode::Normal,
Duration::from_millis(3),
);
stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull,
);
assert_eq!(
stats.get_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal
),
1
);
assert_eq!(
stats.get_me_writer_teardown_success_total(MeWriterTeardownMode::Normal),
1
);
assert_eq!(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal),
1
);
assert!(
stats.get_me_writer_teardown_duration_sum_seconds(MeWriterTeardownMode::Normal) > 0.0
);
assert_eq!(
stats.get_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull
),
1
);
}
#[test]
fn test_teardown_counters_respect_me_silent() {
let stats = Stats::new();
stats.apply_telemetry_policy(TelemetryPolicy {
core_enabled: true,
user_enabled: true,
me_level: MeTelemetryLevel::Silent,
});
stats.increment_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal,
);
stats.increment_me_writer_teardown_timeout_total();
stats.observe_me_writer_teardown_duration(
MeWriterTeardownMode::Normal,
Duration::from_millis(1),
);
assert_eq!(
stats.get_me_writer_teardown_attempt_total(
MeWriterTeardownReason::ReaderExit,
MeWriterTeardownMode::Normal
),
0
);
assert_eq!(stats.get_me_writer_teardown_timeout_total(), 0);
assert_eq!(
stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal),
0
);
}
#[test] #[test]
fn test_replay_checker_basic() { fn test_replay_checker_basic() {
let checker = ReplayChecker::new(100, Duration::from_secs(60)); let checker = ReplayChecker::new(100, Duration::from_secs(60));

View File

@@ -7,33 +7,29 @@ use tokio::net::TcpStream;
#[cfg(unix)] #[cfg(unix)]
use tokio::net::UnixStream; use tokio::net::UnixStream;
use tokio::time::timeout; use tokio::time::timeout;
use tokio_rustls::client::TlsStream;
use tokio_rustls::TlsConnector; use tokio_rustls::TlsConnector;
use tokio_rustls::client::TlsStream;
use tracing::{debug, warn}; use tracing::{debug, warn};
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
use rustls::client::ClientConfig; use rustls::client::ClientConfig;
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
use rustls::pki_types::{CertificateDer, ServerName, UnixTime}; use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
use rustls::{DigitallySignedStruct, Error as RustlsError}; use rustls::{DigitallySignedStruct, Error as RustlsError};
use x509_parser::prelude::FromDer;
use x509_parser::certificate::X509Certificate; use x509_parser::certificate::X509Certificate;
use x509_parser::prelude::FromDer;
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::network::dns_overrides::resolve_socket_addr; use crate::network::dns_overrides::resolve_socket_addr;
use crate::protocol::constants::{ use crate::protocol::constants::{
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE,
}; };
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
use crate::tls_front::types::{ use crate::tls_front::types::{
ParsedCertificateInfo, ParsedCertificateInfo, ParsedServerHello, TlsBehaviorProfile, TlsCertPayload, TlsExtension,
ParsedServerHello, TlsFetchResult, TlsProfileSource,
TlsBehaviorProfile,
TlsCertPayload,
TlsExtension,
TlsFetchResult,
TlsProfileSource,
}; };
use crate::transport::UpstreamStream;
use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder};
/// No-op verifier: accept any certificate (we only need lengths and metadata). /// No-op verifier: accept any certificate (we only need lengths and metadata).
#[derive(Debug)] #[derive(Debug)]
@@ -144,21 +140,27 @@ fn build_client_hello(sni: &str, rng: &SecureRandom) -> Vec<u8> {
exts.extend_from_slice(&0x000au16.to_be_bytes()); exts.extend_from_slice(&0x000au16.to_be_bytes());
exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes()); exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes());
exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes()); exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes());
for g in groups { exts.extend_from_slice(&g.to_be_bytes()); } for g in groups {
exts.extend_from_slice(&g.to_be_bytes());
}
// signature_algorithms // signature_algorithms
let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256 let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256
exts.extend_from_slice(&0x000du16.to_be_bytes()); exts.extend_from_slice(&0x000du16.to_be_bytes());
exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes()); exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes());
exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes()); exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes());
for a in sig_algs { exts.extend_from_slice(&a.to_be_bytes()); } for a in sig_algs {
exts.extend_from_slice(&a.to_be_bytes());
}
// supported_versions (TLS1.3 + TLS1.2) // supported_versions (TLS1.3 + TLS1.2)
let versions: [u16; 2] = [0x0304, 0x0303]; let versions: [u16; 2] = [0x0304, 0x0303];
exts.extend_from_slice(&0x002bu16.to_be_bytes()); exts.extend_from_slice(&0x002bu16.to_be_bytes());
exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes()); exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes());
exts.push((versions.len() * 2) as u8); exts.push((versions.len() * 2) as u8);
for v in versions { exts.extend_from_slice(&v.to_be_bytes()); } for v in versions {
exts.extend_from_slice(&v.to_be_bytes());
}
// key_share (x25519) // key_share (x25519)
let key = gen_key_share(rng); let key = gen_key_share(rng);
@@ -273,7 +275,10 @@ fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
pos += 4; pos += 4;
let data = body.get(pos..pos + elen)?.to_vec(); let data = body.get(pos..pos + elen)?.to_vec();
pos += elen; pos += elen;
extensions.push(TlsExtension { ext_type: etype, data }); extensions.push(TlsExtension {
ext_type: etype,
data,
});
} }
Some(ParsedServerHello { Some(ParsedServerHello {
@@ -394,37 +399,42 @@ async fn connect_tcp_with_upstream(
port: u16, port: u16,
connect_timeout: Duration, connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>, upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
) -> Result<TcpStream> { scope: Option<&str>,
) -> Result<UpstreamStream> {
if let Some(manager) = upstream { if let Some(manager) = upstream {
if let Some(addr) = resolve_socket_addr(host, port) { if let Some(addr) = resolve_socket_addr(host, port) {
match manager.connect(addr, None, None).await { match manager.connect(addr, None, scope).await {
Ok(stream) => return Ok(stream), Ok(stream) => return Ok(stream),
Err(e) => { Err(e) => {
warn!( warn!(
host = %host, host = %host,
port = port, port = port,
scope = ?scope,
error = %e, error = %e,
"Upstream connect failed, using direct connect" "Upstream connect failed, using direct connect"
); );
} }
} }
} else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { } else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await
if let Some(addr) = addrs.find(|a| a.is_ipv4()) { && let Some(addr) = addrs.find(|a| a.is_ipv4())
match manager.connect(addr, None, None).await { {
Ok(stream) => return Ok(stream), match manager.connect(addr, None, scope).await {
Err(e) => { Ok(stream) => return Ok(stream),
warn!( Err(e) => {
host = %host, warn!(
port = port, host = %host,
error = %e, port = port,
"Upstream connect failed, using direct connect" scope = ?scope,
); error = %e,
} "Upstream connect failed, using direct connect"
);
} }
} }
} }
} }
connect_with_dns_override(host, port, connect_timeout).await Ok(UpstreamStream::Tcp(
connect_with_dns_override(host, port, connect_timeout).await?,
))
} }
fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> { fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8>> {
@@ -443,9 +453,7 @@ fn encode_tls13_certificate_message(cert_chain_der: &[Vec<u8>]) -> Option<Vec<u8
} }
// Certificate = context_len(1) + certificate_list_len(3) + entries // Certificate = context_len(1) + certificate_list_len(3) + entries
let body_len = 1usize let body_len = 1usize.checked_add(3)?.checked_add(certificate_list.len())?;
.checked_add(3)?
.checked_add(certificate_list.len())?;
let mut message = Vec::with_capacity(4 + body_len); let mut message = Vec::with_capacity(4 + body_len);
message.push(0x0b); // HandshakeType::certificate message.push(0x0b); // HandshakeType::certificate
@@ -537,6 +545,7 @@ async fn fetch_via_raw_tls(
sni: &str, sni: &str,
connect_timeout: Duration, connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>, upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8, proxy_protocol: u8,
unix_sock: Option<&str>, unix_sock: Option<&str>,
) -> Result<TlsFetchResult> { ) -> Result<TlsFetchResult> {
@@ -549,7 +558,8 @@ async fn fetch_via_raw_tls(
sock = %sock_path, sock = %sock_path,
"Raw TLS fetch using mask unix socket" "Raw TLS fetch using mask unix socket"
); );
return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await; return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol)
.await;
} }
Ok(Err(e)) => { Ok(Err(e)) => {
warn!( warn!(
@@ -572,7 +582,7 @@ async fn fetch_via_raw_tls(
#[cfg(not(unix))] #[cfg(not(unix))]
let _ = unix_sock; let _ = unix_sock;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?; let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream, scope).await?;
fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await
} }
@@ -616,12 +626,13 @@ where
.map(|slice| slice.to_vec()) .map(|slice| slice.to_vec())
.unwrap_or_default(); .unwrap_or_default();
let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect(); let cert_chain_der: Vec<Vec<u8>> = certs.iter().map(|c| c.as_ref().to_vec()).collect();
let cert_payload = encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| { let cert_payload =
TlsCertPayload { encode_tls13_certificate_message(&cert_chain_der).map(|certificate_message| {
cert_chain_der: cert_chain_der.clone(), TlsCertPayload {
certificate_message, cert_chain_der: cert_chain_der.clone(),
} certificate_message,
}); }
});
let total_cert_len = cert_payload let total_cert_len = cert_payload
.as_ref() .as_ref()
@@ -675,6 +686,7 @@ async fn fetch_via_rustls(
sni: &str, sni: &str,
connect_timeout: Duration, connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>, upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8, proxy_protocol: u8,
unix_sock: Option<&str>, unix_sock: Option<&str>,
) -> Result<TlsFetchResult> { ) -> Result<TlsFetchResult> {
@@ -710,7 +722,7 @@ async fn fetch_via_rustls(
#[cfg(not(unix))] #[cfg(not(unix))]
let _ = unix_sock; let _ = unix_sock;
let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?; let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream, scope).await?;
fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await
} }
@@ -726,6 +738,7 @@ pub async fn fetch_real_tls(
sni: &str, sni: &str,
connect_timeout: Duration, connect_timeout: Duration,
upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>, upstream: Option<std::sync::Arc<crate::transport::UpstreamManager>>,
scope: Option<&str>,
proxy_protocol: u8, proxy_protocol: u8,
unix_sock: Option<&str>, unix_sock: Option<&str>,
) -> Result<TlsFetchResult> { ) -> Result<TlsFetchResult> {
@@ -735,6 +748,7 @@ pub async fn fetch_real_tls(
sni, sni,
connect_timeout, connect_timeout,
upstream.clone(), upstream.clone(),
scope,
proxy_protocol, proxy_protocol,
unix_sock, unix_sock,
) )
@@ -753,6 +767,7 @@ pub async fn fetch_real_tls(
sni, sni,
connect_timeout, connect_timeout,
upstream, upstream,
scope,
proxy_protocol, proxy_protocol,
unix_sock, unix_sock,
) )

View File

@@ -10,9 +10,10 @@ use tracing::{debug, info, warn};
use crate::config::MeFloorMode; use crate::config::MeFloorMode;
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::network::IpFamily; use crate::network::IpFamily;
use crate::stats::MeWriterTeardownReason;
use super::MePool; use super::MePool;
use super::pool::MeWriter; use super::pool::{MeFamilyRuntimeState, MeWriter};
const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff
#[allow(dead_code)] #[allow(dead_code)]
@@ -33,6 +34,33 @@ const HEALTH_DRAIN_SOFT_EVICT_BUDGET_MIN: usize = 8;
const HEALTH_DRAIN_SOFT_EVICT_BUDGET_MAX: usize = 256; const HEALTH_DRAIN_SOFT_EVICT_BUDGET_MAX: usize = 256;
const HEALTH_DRAIN_REAP_OPPORTUNISTIC_INTERVAL_SECS: u64 = 1; const HEALTH_DRAIN_REAP_OPPORTUNISTIC_INTERVAL_SECS: u64 = 1;
const HEALTH_DRAIN_TIMEOUT_ENFORCER_INTERVAL_SECS: u64 = 1; const HEALTH_DRAIN_TIMEOUT_ENFORCER_INTERVAL_SECS: u64 = 1;
const FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD: u32 = 6;
const FAMILY_SUPPRESS_WINDOW_SECS: u64 = 120;
const FAMILY_RECOVER_PROBE_INTERVAL_SECS: u64 = 5;
const FAMILY_RECOVER_SUCCESS_STREAK_REQUIRED: u32 = 3;
#[derive(Debug, Clone)]
struct FamilyCircuitState {
state: MeFamilyRuntimeState,
state_since_at: Instant,
suppressed_until: Option<Instant>,
next_probe_at: Instant,
fail_streak: u32,
recover_success_streak: u32,
}
impl FamilyCircuitState {
fn new(now: Instant) -> Self {
Self {
state: MeFamilyRuntimeState::Healthy,
state_since_at: now,
suppressed_until: None,
next_probe_at: now,
fail_streak: 0,
recover_success_streak: 0,
}
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct DcFloorPlanEntry { struct DcFloorPlanEntry {
@@ -72,6 +100,25 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
let mut floor_warn_next_allowed: HashMap<(i32, IpFamily), Instant> = HashMap::new(); let mut floor_warn_next_allowed: HashMap<(i32, IpFamily), Instant> = HashMap::new();
let mut drain_warn_next_allowed: HashMap<u64, Instant> = HashMap::new(); let mut drain_warn_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut drain_soft_evict_next_allowed: HashMap<u64, Instant> = HashMap::new(); let mut drain_soft_evict_next_allowed: HashMap<u64, Instant> = HashMap::new();
let mut family_v4_circuit = FamilyCircuitState::new(Instant::now());
let mut family_v6_circuit = FamilyCircuitState::new(Instant::now());
let init_epoch_secs = MePool::now_epoch_secs();
pool.set_family_runtime_state(
IpFamily::V4,
family_v4_circuit.state,
init_epoch_secs,
0,
family_v4_circuit.fail_streak,
family_v4_circuit.recover_success_streak,
);
pool.set_family_runtime_state(
IpFamily::V6,
family_v6_circuit.state,
init_epoch_secs,
0,
family_v6_circuit.fail_streak,
family_v6_circuit.recover_success_streak,
);
let mut degraded_interval = true; let mut degraded_interval = true;
loop { loop {
let interval = if degraded_interval { let interval = if degraded_interval {
@@ -87,7 +134,9 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
&mut drain_soft_evict_next_allowed, &mut drain_soft_evict_next_allowed,
) )
.await; .await;
let v4_degraded = check_family( let now = Instant::now();
let now_epoch_secs = MePool::now_epoch_secs();
let v4_degraded_raw = check_family(
IpFamily::V4, IpFamily::V4,
&pool, &pool,
&rng, &rng,
@@ -106,25 +155,53 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
&mut drain_soft_evict_next_allowed, &mut drain_soft_evict_next_allowed,
) )
.await; .await;
let v6_degraded = check_family( let v4_degraded = apply_family_circuit_result(
IpFamily::V6,
&pool, &pool,
&rng, IpFamily::V4,
&mut backoff, &mut family_v4_circuit,
&mut next_attempt, Some(v4_degraded_raw),
&mut inflight, false,
&mut outage_backoff, now,
&mut outage_next_attempt, now_epoch_secs,
&mut single_endpoint_outage, );
&mut shadow_rotate_deadline,
&mut idle_refresh_next_attempt, let v6_check_ran = should_run_family_check(&mut family_v6_circuit, now);
&mut adaptive_idle_since, let v6_degraded_raw = if v6_check_ran {
&mut adaptive_recover_until, check_family(
&mut floor_warn_next_allowed, IpFamily::V6,
&mut drain_warn_next_allowed, &pool,
&mut drain_soft_evict_next_allowed, &rng,
) &mut backoff,
.await; &mut next_attempt,
&mut inflight,
&mut outage_backoff,
&mut outage_next_attempt,
&mut single_endpoint_outage,
&mut shadow_rotate_deadline,
&mut idle_refresh_next_attempt,
&mut adaptive_idle_since,
&mut adaptive_recover_until,
&mut floor_warn_next_allowed,
&mut drain_warn_next_allowed,
&mut drain_soft_evict_next_allowed,
)
.await
} else {
false
};
let v6_degraded = apply_family_circuit_result(
&pool,
IpFamily::V6,
&mut family_v6_circuit,
if v6_check_ran {
Some(v6_degraded_raw)
} else {
None
},
true,
now,
now_epoch_secs,
);
degraded_interval = v4_degraded || v6_degraded; degraded_interval = v4_degraded || v6_degraded;
} }
} }
@@ -146,6 +223,148 @@ pub async fn me_drain_timeout_enforcer(pool: Arc<MePool>) {
} }
} }
fn should_run_family_check(circuit: &mut FamilyCircuitState, now: Instant) -> bool {
match circuit.state {
MeFamilyRuntimeState::Suppressed => {
if now < circuit.next_probe_at {
return false;
}
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
true
}
_ => true,
}
}
fn apply_family_circuit_result(
pool: &Arc<MePool>,
family: IpFamily,
circuit: &mut FamilyCircuitState,
degraded: Option<bool>,
allow_suppress: bool,
now: Instant,
now_epoch_secs: u64,
) -> bool {
let Some(degraded) = degraded else {
// Preserve suppression state when probe tick is intentionally skipped.
return false;
};
let previous_state = circuit.state;
match circuit.state {
MeFamilyRuntimeState::Suppressed => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
circuit.recover_success_streak = 0;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.state_since_at = now;
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family remains suppressed due to ongoing failures"
);
} else {
circuit.fail_streak = 0;
circuit.recover_success_streak = 1;
circuit.state = MeFamilyRuntimeState::Recovering;
}
}
MeFamilyRuntimeState::Recovering => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
if allow_suppress {
circuit.state = MeFamilyRuntimeState::Suppressed;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family temporarily suppressed after repeated degradation"
);
} else {
circuit.state = MeFamilyRuntimeState::Degraded;
}
} else {
circuit.recover_success_streak = circuit.recover_success_streak.saturating_add(1);
if circuit.recover_success_streak >= FAMILY_RECOVER_SUCCESS_STREAK_REQUIRED {
circuit.fail_streak = 0;
circuit.recover_success_streak = 0;
circuit.suppressed_until = None;
circuit.state = MeFamilyRuntimeState::Healthy;
info!(
?family,
"ME family suppression lifted after stable recovery probes"
);
}
}
}
_ => {
if degraded {
circuit.fail_streak = circuit.fail_streak.saturating_add(1);
circuit.recover_success_streak = 0;
circuit.state = MeFamilyRuntimeState::Degraded;
if allow_suppress && circuit.fail_streak >= FAMILY_SUPPRESS_FAIL_STREAK_THRESHOLD {
circuit.state = MeFamilyRuntimeState::Suppressed;
let until = now + Duration::from_secs(FAMILY_SUPPRESS_WINDOW_SECS);
circuit.suppressed_until = Some(until);
circuit.next_probe_at =
now + Duration::from_secs(FAMILY_RECOVER_PROBE_INTERVAL_SECS);
warn!(
?family,
fail_streak = circuit.fail_streak,
suppress_secs = FAMILY_SUPPRESS_WINDOW_SECS,
"ME family temporarily suppressed after repeated degradation"
);
}
} else {
circuit.fail_streak = 0;
circuit.recover_success_streak = 0;
circuit.suppressed_until = None;
circuit.state = MeFamilyRuntimeState::Healthy;
}
}
}
if previous_state != circuit.state {
circuit.state_since_at = now;
}
let suppressed_until_epoch_secs = circuit
.suppressed_until
.and_then(|until| {
if until > now {
Some(
now_epoch_secs
.saturating_add(until.saturating_duration_since(now).as_secs()),
)
} else {
None
}
})
.unwrap_or(0);
let state_since_epoch_secs = if previous_state == circuit.state {
pool.family_runtime_state_since_epoch_secs(family)
} else {
now_epoch_secs
};
pool.set_family_runtime_state(
family,
circuit.state,
state_since_epoch_secs,
suppressed_until_epoch_secs,
circuit.fail_streak,
circuit.recover_success_streak,
);
!matches!(circuit.state, MeFamilyRuntimeState::Suppressed) && degraded
}
fn draining_writer_timeout_expired( fn draining_writer_timeout_expired(
pool: &MePool, pool: &MePool,
writer: &MeWriter, writer: &MeWriter,
@@ -358,7 +577,8 @@ pub(super) async fn reap_draining_writers(
continue; continue;
} }
pool.stats.increment_pool_force_close_total(); pool.stats.increment_pool_force_close_total();
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapTimeoutExpired)
.await;
pool.stats pool.stats
.increment_me_draining_writers_reap_progress_total(); .increment_me_draining_writers_reap_progress_total();
} }
@@ -376,7 +596,8 @@ pub(super) async fn reap_draining_writers(
continue; continue;
} }
pool.stats.increment_pool_force_close_total(); pool.stats.increment_pool_force_close_total();
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapThresholdForce)
.await;
pool.stats pool.stats
.increment_me_draining_writers_reap_progress_total(); .increment_me_draining_writers_reap_progress_total();
closed_total = closed_total.saturating_add(1); closed_total = closed_total.saturating_add(1);
@@ -388,7 +609,8 @@ pub(super) async fn reap_draining_writers(
if !closed_writer_ids.insert(writer_id) { if !closed_writer_ids.insert(writer_id) {
continue; continue;
} }
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(writer_id, MeWriterTeardownReason::ReapEmpty)
.await;
pool.stats pool.stats
.increment_me_draining_writers_reap_progress_total(); .increment_me_draining_writers_reap_progress_total();
closed_total = closed_total.saturating_add(1); closed_total = closed_total.saturating_add(1);
@@ -1550,6 +1772,187 @@ async fn maybe_rotate_single_endpoint_shadow(
); );
} }
/// Last-resort safety net for draining writers stuck past their deadline.
///
/// Runs every `TICK_SECS` and force-closes any draining writer whose
/// `drain_deadline_epoch_secs` has been exceeded by more than a threshold.
///
/// Two thresholds:
/// - `SOFT_THRESHOLD_SECS` (60s): writers with no bound clients
/// - `HARD_THRESHOLD_SECS` (300s): writers WITH bound clients (unconditional)
///
/// Intentionally kept trivial and independent of pool config to minimise
/// the probability of panicking itself. Uses `SystemTime` directly
/// as a fallback clock source and timeouts on every lock acquisition
/// and writer removal so one stuck writer cannot block the rest.
pub async fn me_zombie_writer_watchdog(pool: Arc<MePool>) {
use std::time::{SystemTime, UNIX_EPOCH};
const TICK_SECS: u64 = 30;
const SOFT_THRESHOLD_SECS: u64 = 60;
const HARD_THRESHOLD_SECS: u64 = 300;
const LOCK_TIMEOUT_SECS: u64 = 5;
const REMOVE_TIMEOUT_SECS: u64 = 10;
const HARD_DETACH_TIMEOUT_STREAK: u8 = 3;
let mut removal_timeout_streak = HashMap::<u64, u8>::new();
loop {
tokio::time::sleep(Duration::from_secs(TICK_SECS)).await;
let now = match SystemTime::now().duration_since(UNIX_EPOCH) {
Ok(d) => d.as_secs(),
Err(_) => continue,
};
// Phase 1: collect zombie IDs under a short read-lock with timeout.
let zombie_ids_with_meta: Vec<(u64, bool)> = {
let Ok(ws) = tokio::time::timeout(
Duration::from_secs(LOCK_TIMEOUT_SECS),
pool.writers.read(),
)
.await
else {
warn!("zombie_watchdog: writers read-lock timeout, skipping tick");
continue;
};
ws.iter()
.filter(|w| w.draining.load(std::sync::atomic::Ordering::Relaxed))
.filter_map(|w| {
let deadline = w
.drain_deadline_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
if deadline == 0 {
return None;
}
let overdue = now.saturating_sub(deadline);
if overdue == 0 {
return None;
}
let started = w
.draining_started_at_epoch_secs
.load(std::sync::atomic::Ordering::Relaxed);
let drain_age = now.saturating_sub(started);
if drain_age > HARD_THRESHOLD_SECS {
return Some((w.id, true));
}
if overdue > SOFT_THRESHOLD_SECS {
return Some((w.id, false));
}
None
})
.collect()
};
// read lock released here
if zombie_ids_with_meta.is_empty() {
removal_timeout_streak.clear();
continue;
}
let mut active_zombie_ids = HashSet::<u64>::with_capacity(zombie_ids_with_meta.len());
for (writer_id, _) in &zombie_ids_with_meta {
active_zombie_ids.insert(*writer_id);
}
removal_timeout_streak.retain(|writer_id, _| active_zombie_ids.contains(writer_id));
warn!(
zombie_count = zombie_ids_with_meta.len(),
soft_threshold_secs = SOFT_THRESHOLD_SECS,
hard_threshold_secs = HARD_THRESHOLD_SECS,
"Zombie draining writers detected by watchdog, force-closing"
);
// Phase 2: remove each writer individually with a timeout.
// One stuck removal cannot block the rest.
for (writer_id, had_clients) in &zombie_ids_with_meta {
let result = tokio::time::timeout(
Duration::from_secs(REMOVE_TIMEOUT_SECS),
pool.remove_writer_and_close_clients(
*writer_id,
MeWriterTeardownReason::WatchdogStuckDraining,
),
)
.await;
match result {
Ok(true) => {
removal_timeout_streak.remove(writer_id);
pool.stats.increment_pool_force_close_total();
pool.stats
.increment_me_draining_writers_reap_progress_total();
info!(
writer_id,
had_clients,
"Zombie writer removed by watchdog"
);
}
Ok(false) => {
removal_timeout_streak.remove(writer_id);
debug!(
writer_id,
had_clients,
"Zombie writer watchdog removal became no-op"
);
}
Err(_) => {
pool.stats.increment_me_writer_teardown_timeout_total();
let streak = removal_timeout_streak
.entry(*writer_id)
.and_modify(|value| *value = value.saturating_add(1))
.or_insert(1);
warn!(
writer_id,
had_clients,
timeout_streak = *streak,
"Zombie writer removal timed out"
);
if *streak < HARD_DETACH_TIMEOUT_STREAK {
continue;
}
pool.stats.increment_me_writer_teardown_escalation_total();
let hard_detach = tokio::time::timeout(
Duration::from_secs(REMOVE_TIMEOUT_SECS),
pool.remove_draining_writer_hard_detach(
*writer_id,
MeWriterTeardownReason::WatchdogStuckDraining,
),
)
.await;
match hard_detach {
Ok(true) => {
removal_timeout_streak.remove(writer_id);
pool.stats.increment_pool_force_close_total();
pool.stats
.increment_me_draining_writers_reap_progress_total();
info!(
writer_id,
had_clients,
"Zombie writer hard-detached after repeated timeouts"
);
}
Ok(false) => {
removal_timeout_streak.remove(writer_id);
debug!(
writer_id,
had_clients,
"Zombie hard-detach skipped (writer already gone or no longer draining)"
);
}
Err(_) => {
pool.stats.increment_me_writer_teardown_timeout_total();
warn!(
writer_id,
had_clients,
"Zombie hard-detach timed out, will retry next tick"
);
}
}
}
}
}
}
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use std::collections::HashMap; use std::collections::HashMap;
@@ -1561,13 +1964,19 @@ mod tests {
use tokio::sync::mpsc; use tokio::sync::mpsc;
use tokio_util::sync::CancellationToken; use tokio_util::sync::CancellationToken;
use super::reap_draining_writers; use super::{
FamilyCircuitState, apply_family_circuit_result, reap_draining_writers,
should_run_family_check,
};
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode}; use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode};
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use crate::network::probe::NetworkDecision; use crate::network::probe::NetworkDecision;
use crate::stats::Stats; use crate::stats::Stats;
use crate::transport::middle_proxy::codec::WriterCommand; use crate::transport::middle_proxy::codec::WriterCommand;
use crate::transport::middle_proxy::pool::{MePool, MeWriter, WriterContour}; use crate::transport::middle_proxy::pool::{
MeFamilyRuntimeState, MePool, MeWriter, WriterContour,
};
use crate::transport::middle_proxy::registry::ConnMeta; use crate::transport::middle_proxy::registry::ConnMeta;
async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> { async fn make_pool(me_pool_drain_threshold: u64) -> Arc<MePool> {
@@ -1745,4 +2154,47 @@ mod tests {
assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20); assert_eq!(pool.registry.get_writer(conn_b).await.unwrap().writer_id, 20);
assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30); assert_eq!(pool.registry.get_writer(conn_c).await.unwrap().writer_id, 30);
} }
#[tokio::test]
async fn suppressed_family_probe_skip_preserves_suppressed_state() {
let pool = make_pool(0).await;
let now = Instant::now();
let now_epoch_secs = MePool::now_epoch_secs();
let suppressed_until_epoch_secs = now_epoch_secs.saturating_add(60);
pool.set_family_runtime_state(
IpFamily::V6,
MeFamilyRuntimeState::Suppressed,
now_epoch_secs,
suppressed_until_epoch_secs,
7,
0,
);
let mut circuit = FamilyCircuitState {
state: MeFamilyRuntimeState::Suppressed,
state_since_at: now,
suppressed_until: Some(now + Duration::from_secs(60)),
next_probe_at: now + Duration::from_secs(5),
fail_streak: 7,
recover_success_streak: 0,
};
assert!(!should_run_family_check(&mut circuit, now));
assert!(!apply_family_circuit_result(
&pool,
IpFamily::V6,
&mut circuit,
None,
true,
now,
now_epoch_secs,
));
assert_eq!(circuit.state, MeFamilyRuntimeState::Suppressed);
assert_eq!(circuit.fail_streak, 7);
assert_eq!(circuit.recover_success_streak, 0);
assert_eq!(
pool.family_runtime_state(IpFamily::V6),
MeFamilyRuntimeState::Suppressed,
);
}
} }

View File

@@ -316,7 +316,12 @@ async fn reap_draining_writers_maintains_warn_state_subset_property_under_bulk_c
let ids = sorted_writer_ids(&pool).await; let ids = sorted_writer_ids(&pool).await;
for writer_id in ids.into_iter().take(3) { for writer_id in ids.into_iter().take(3) {
let _ = pool.remove_writer_and_close_clients(writer_id).await; let _ = pool
.remove_writer_and_close_clients(
writer_id,
crate::stats::MeWriterTeardownReason::ReapEmpty,
)
.await;
} }
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await; reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;

View File

@@ -12,7 +12,9 @@ use super::codec::WriterCommand;
use super::health::{health_drain_close_budget, reap_draining_writers}; use super::health::{health_drain_close_budget, reap_draining_writers};
use super::pool::{MePool, MeWriter, WriterContour}; use super::pool::{MePool, MeWriter, WriterContour};
use super::registry::ConnMeta; use super::registry::ConnMeta;
use crate::config::{GeneralConfig, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode}; use crate::config::{
GeneralConfig, MeBindStaleMode, MeRouteNoWriterMode, MeSocksKdfPolicy, MeWriterPickMode,
};
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::network::probe::NetworkDecision; use crate::network::probe::NetworkDecision;
use crate::stats::Stats; use crate::stats::Stats;
@@ -195,7 +197,9 @@ async fn reap_draining_writers_drops_warn_state_for_removed_writer() {
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await; reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert!(warn_next_allowed.contains_key(&7)); assert!(warn_next_allowed.contains_key(&7));
let _ = pool.remove_writer_and_close_clients(7).await; let _ = pool
.remove_writer_and_close_clients(7, crate::stats::MeWriterTeardownReason::ReapEmpty)
.await;
assert!(pool.registry.get_writer(conn_ids[0]).await.is_none()); assert!(pool.registry.get_writer(conn_ids[0]).await.is_none());
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await; reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
@@ -525,7 +529,12 @@ async fn reap_draining_writers_warn_state_never_exceeds_live_draining_population
let existing_writer_ids = current_writer_ids(&pool).await; let existing_writer_ids = current_writer_ids(&pool).await;
for writer_id in existing_writer_ids.into_iter().take(4) { for writer_id in existing_writer_ids.into_iter().take(4) {
let _ = pool.remove_writer_and_close_clients(writer_id).await; let _ = pool
.remove_writer_and_close_clients(
writer_id,
crate::stats::MeWriterTeardownReason::ReapEmpty,
)
.await;
} }
reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await; reap_draining_writers(&pool, &mut warn_next_allowed, &mut soft_evict_next_allowed).await;
assert!(warn_next_allowed.len() <= pool.writers.read().await.len()); assert!(warn_next_allowed.len() <= pool.writers.read().await.len());
@@ -646,10 +655,23 @@ async fn reap_draining_writers_instadrain_removes_non_expired_writers_immediatel
#[test] #[test]
fn general_config_default_drain_threshold_remains_enabled() { fn general_config_default_drain_threshold_remains_enabled() {
assert_eq!(GeneralConfig::default().me_pool_drain_threshold, 128); assert_eq!(GeneralConfig::default().me_pool_drain_threshold, 32);
assert!(GeneralConfig::default().me_pool_drain_soft_evict_enabled); assert!(GeneralConfig::default().me_pool_drain_soft_evict_enabled);
assert_eq!( assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_per_writer, GeneralConfig::default().me_pool_drain_soft_evict_grace_secs,
1 10
); );
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_per_writer,
2
);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_budget_per_core,
16
);
assert_eq!(
GeneralConfig::default().me_pool_drain_soft_evict_cooldown_ms,
1000
);
assert_eq!(GeneralConfig::default().me_bind_stale_mode, MeBindStaleMode::Never);
} }

View File

@@ -30,7 +30,7 @@ mod health_adversarial_tests;
use bytes::Bytes; use bytes::Bytes;
pub use health::{me_drain_timeout_enforcer, me_health_monitor}; pub use health::{me_drain_timeout_enforcer, me_health_monitor, me_zombie_writer_watchdog};
#[allow(unused_imports)] #[allow(unused_imports)]
pub use ping::{run_me_ping, format_sample_line, format_me_route, MePingReport, MePingSample, MePingFamily}; pub use ping::{run_me_ping, format_sample_line, format_me_route, MePingReport, MePingSample, MePingFamily};
pub use pool::MePool; pub use pool::MePool;

View File

@@ -7,6 +7,7 @@ use tokio::net::UdpSocket;
use crate::config::{UpstreamConfig, UpstreamType}; use crate::config::{UpstreamConfig, UpstreamType};
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::error::ProxyError; use crate::error::ProxyError;
use crate::transport::shadowsocks::sanitize_shadowsocks_url;
use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind}; use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind};
use super::MePool; use super::MePool;
@@ -40,7 +41,11 @@ pub fn format_sample_line(sample: &MePingSample) -> String {
let sign = if sample.dc >= 0 { "+" } else { "-" }; let sign = if sample.dc >= 0 { "+" } else { "-" };
let addr = format!("{}:{}", sample.addr.ip(), sample.addr.port()); let addr = format!("{}:{}", sample.addr.ip(), sample.addr.port());
match (sample.connect_ms, sample.handshake_ms.as_ref(), sample.error.as_ref()) { match (
sample.connect_ms,
sample.handshake_ms.as_ref(),
sample.error.as_ref(),
) {
(Some(conn), Some(hs), None) => format!( (Some(conn), Some(hs), None) => format!(
" {sign} {addr}\tPing: {:.0} ms / RPC: {:.0} ms / OK", " {sign} {addr}\tPing: {:.0} ms / RPC: {:.0} ms / OK",
conn, hs conn, hs
@@ -121,6 +126,7 @@ fn route_from_egress(egress: Option<UpstreamEgressInfo>) -> Option<String> {
None => route, None => route,
}) })
} }
UpstreamRouteKind::Shadowsocks => Some("shadowsocks".to_string()),
} }
} }
@@ -232,6 +238,9 @@ pub async fn format_me_route(
} }
UpstreamType::Socks4 { address, .. } => format!("socks4://{address}"), UpstreamType::Socks4 { address, .. } => format!("socks4://{address}"),
UpstreamType::Socks5 { address, .. } => format!("socks5://{address}"), UpstreamType::Socks5 { address, .. } => format!("socks5://{address}"),
UpstreamType::Shadowsocks { url, .. } => sanitize_shadowsocks_url(url)
.map(|address| format!("shadowsocks://{address}"))
.unwrap_or_else(|_| "shadowsocks://invalid".to_string()),
}; };
} }
@@ -254,6 +263,12 @@ pub async fn format_me_route(
if has_socks5 { if has_socks5 {
kinds.push("socks5"); kinds.push("socks5");
} }
if enabled_upstreams
.iter()
.any(|u| matches!(u.upstream_type, UpstreamType::Shadowsocks { .. }))
{
kinds.push("shadowsocks");
}
format!("mixed upstreams ({})", kinds.join(", ")) format!("mixed upstreams ({})", kinds.join(", "))
} }
@@ -335,7 +350,10 @@ pub async fn run_me_ping(pool: &Arc<MePool>, rng: &SecureRandom) -> Vec<MePingRe
Ok((stream, conn_rtt, upstream_egress)) => { Ok((stream, conn_rtt, upstream_egress)) => {
connect_ms = Some(conn_rtt); connect_ms = Some(conn_rtt);
route = route_from_egress(upstream_egress); route = route_from_egress(upstream_egress);
match pool.handshake_only(stream, addr, upstream_egress, rng).await { match pool
.handshake_only(stream, addr, upstream_egress, rng)
.await
{
Ok(hs) => { Ok(hs) => {
handshake_ms = Some(hs.handshake_ms); handshake_ms = Some(hs.handshake_ms);
// drop halves to close // drop halves to close

View File

@@ -18,6 +18,8 @@ use crate::transport::UpstreamManager;
use super::ConnRegistry; use super::ConnRegistry;
use super::codec::WriterCommand; use super::codec::WriterCommand;
const ME_FORCE_CLOSE_SAFETY_FALLBACK_SECS: u64 = 300;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub(super) struct RefillDcKey { pub(super) struct RefillDcKey {
pub dc: i32, pub dc: i32,
@@ -72,6 +74,64 @@ impl WriterContour {
} }
} }
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum MeFamilyRuntimeState {
Healthy = 0,
Degraded = 1,
Suppressed = 2,
Recovering = 3,
}
impl MeFamilyRuntimeState {
pub(crate) fn from_u8(value: u8) -> Self {
match value {
1 => Self::Degraded,
2 => Self::Suppressed,
3 => Self::Recovering,
_ => Self::Healthy,
}
}
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::Healthy => "healthy",
Self::Degraded => "degraded",
Self::Suppressed => "suppressed",
Self::Recovering => "recovering",
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
#[repr(u8)]
pub(crate) enum MeDrainGateReason {
Open = 0,
CoverageQuorum = 1,
Redundancy = 2,
SuppressionActive = 3,
}
impl MeDrainGateReason {
pub(crate) fn from_u8(value: u8) -> Self {
match value {
1 => Self::CoverageQuorum,
2 => Self::Redundancy,
3 => Self::SuppressionActive,
_ => Self::Open,
}
}
pub(crate) fn as_str(self) -> &'static str {
match self {
Self::Open => "open",
Self::CoverageQuorum => "coverage_quorum",
Self::Redundancy => "redundancy",
Self::SuppressionActive => "suppression_active",
}
}
}
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct SecretSnapshot { pub struct SecretSnapshot {
pub epoch: u64, pub epoch: u64,
@@ -201,6 +261,20 @@ pub struct MePool {
pub(super) me_health_interval_ms_unhealthy: AtomicU64, pub(super) me_health_interval_ms_unhealthy: AtomicU64,
pub(super) me_health_interval_ms_healthy: AtomicU64, pub(super) me_health_interval_ms_healthy: AtomicU64,
pub(super) me_warn_rate_limit_ms: AtomicU64, pub(super) me_warn_rate_limit_ms: AtomicU64,
pub(super) me_family_v4_runtime_state: AtomicU8,
pub(super) me_family_v6_runtime_state: AtomicU8,
pub(super) me_family_v4_state_since_epoch_secs: AtomicU64,
pub(super) me_family_v6_state_since_epoch_secs: AtomicU64,
pub(super) me_family_v4_suppressed_until_epoch_secs: AtomicU64,
pub(super) me_family_v6_suppressed_until_epoch_secs: AtomicU64,
pub(super) me_family_v4_fail_streak: AtomicU32,
pub(super) me_family_v6_fail_streak: AtomicU32,
pub(super) me_family_v4_recover_success_streak: AtomicU32,
pub(super) me_family_v6_recover_success_streak: AtomicU32,
pub(super) me_last_drain_gate_route_quorum_ok: AtomicBool,
pub(super) me_last_drain_gate_redundancy_ok: AtomicBool,
pub(super) me_last_drain_gate_block_reason: AtomicU8,
pub(super) me_last_drain_gate_updated_at_epoch_secs: AtomicU64,
pub(super) runtime_ready: AtomicBool, pub(super) runtime_ready: AtomicBool,
pool_size: usize, pool_size: usize,
pub(super) preferred_endpoints_by_dc: Arc<RwLock<HashMap<i32, Vec<SocketAddr>>>>, pub(super) preferred_endpoints_by_dc: Arc<RwLock<HashMap<i32, Vec<SocketAddr>>>>,
@@ -229,6 +303,14 @@ impl MePool {
.as_secs() .as_secs()
} }
fn normalize_force_close_secs(force_close_secs: u64) -> u64 {
if force_close_secs == 0 {
ME_FORCE_CLOSE_SAFETY_FALLBACK_SECS
} else {
force_close_secs
}
}
pub fn new( pub fn new(
proxy_tag: Option<Vec<u8>>, proxy_tag: Option<Vec<u8>>,
proxy_secret: Vec<u8>, proxy_secret: Vec<u8>,
@@ -477,7 +559,9 @@ impl MePool {
me_pool_drain_soft_evict_cooldown_ms: AtomicU64::new( me_pool_drain_soft_evict_cooldown_ms: AtomicU64::new(
me_pool_drain_soft_evict_cooldown_ms.max(1), me_pool_drain_soft_evict_cooldown_ms.max(1),
), ),
me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs), me_pool_force_close_secs: AtomicU64::new(Self::normalize_force_close_secs(
me_pool_force_close_secs,
)),
me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille( me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(
me_pool_min_fresh_ratio, me_pool_min_fresh_ratio,
)), )),
@@ -506,6 +590,20 @@ impl MePool {
me_health_interval_ms_unhealthy: AtomicU64::new(me_health_interval_ms_unhealthy.max(1)), me_health_interval_ms_unhealthy: AtomicU64::new(me_health_interval_ms_unhealthy.max(1)),
me_health_interval_ms_healthy: AtomicU64::new(me_health_interval_ms_healthy.max(1)), me_health_interval_ms_healthy: AtomicU64::new(me_health_interval_ms_healthy.max(1)),
me_warn_rate_limit_ms: AtomicU64::new(me_warn_rate_limit_ms.max(1)), me_warn_rate_limit_ms: AtomicU64::new(me_warn_rate_limit_ms.max(1)),
me_family_v4_runtime_state: AtomicU8::new(MeFamilyRuntimeState::Healthy as u8),
me_family_v6_runtime_state: AtomicU8::new(MeFamilyRuntimeState::Healthy as u8),
me_family_v4_state_since_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
me_family_v6_state_since_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
me_family_v4_suppressed_until_epoch_secs: AtomicU64::new(0),
me_family_v6_suppressed_until_epoch_secs: AtomicU64::new(0),
me_family_v4_fail_streak: AtomicU32::new(0),
me_family_v6_fail_streak: AtomicU32::new(0),
me_family_v4_recover_success_streak: AtomicU32::new(0),
me_family_v6_recover_success_streak: AtomicU32::new(0),
me_last_drain_gate_route_quorum_ok: AtomicBool::new(false),
me_last_drain_gate_redundancy_ok: AtomicBool::new(false),
me_last_drain_gate_block_reason: AtomicU8::new(MeDrainGateReason::Open as u8),
me_last_drain_gate_updated_at_epoch_secs: AtomicU64::new(Self::now_epoch_secs()),
runtime_ready: AtomicBool::new(false), runtime_ready: AtomicBool::new(false),
preferred_endpoints_by_dc: Arc::new(RwLock::new(preferred_endpoints_by_dc)), preferred_endpoints_by_dc: Arc::new(RwLock::new(preferred_endpoints_by_dc)),
}) })
@@ -523,6 +621,153 @@ impl MePool {
self.runtime_ready.load(Ordering::Relaxed) self.runtime_ready.load(Ordering::Relaxed)
} }
pub(super) fn set_family_runtime_state(
&self,
family: IpFamily,
state: MeFamilyRuntimeState,
state_since_epoch_secs: u64,
suppressed_until_epoch_secs: u64,
fail_streak: u32,
recover_success_streak: u32,
) {
match family {
IpFamily::V4 => {
self.me_family_v4_runtime_state
.store(state as u8, Ordering::Relaxed);
self.me_family_v4_state_since_epoch_secs
.store(state_since_epoch_secs, Ordering::Relaxed);
self.me_family_v4_suppressed_until_epoch_secs
.store(suppressed_until_epoch_secs, Ordering::Relaxed);
self.me_family_v4_fail_streak
.store(fail_streak, Ordering::Relaxed);
self.me_family_v4_recover_success_streak
.store(recover_success_streak, Ordering::Relaxed);
}
IpFamily::V6 => {
self.me_family_v6_runtime_state
.store(state as u8, Ordering::Relaxed);
self.me_family_v6_state_since_epoch_secs
.store(state_since_epoch_secs, Ordering::Relaxed);
self.me_family_v6_suppressed_until_epoch_secs
.store(suppressed_until_epoch_secs, Ordering::Relaxed);
self.me_family_v6_fail_streak
.store(fail_streak, Ordering::Relaxed);
self.me_family_v6_recover_success_streak
.store(recover_success_streak, Ordering::Relaxed);
}
}
}
pub(crate) fn family_runtime_state(&self, family: IpFamily) -> MeFamilyRuntimeState {
match family {
IpFamily::V4 => MeFamilyRuntimeState::from_u8(
self.me_family_v4_runtime_state.load(Ordering::Relaxed),
),
IpFamily::V6 => MeFamilyRuntimeState::from_u8(
self.me_family_v6_runtime_state.load(Ordering::Relaxed),
),
}
}
pub(crate) fn family_runtime_state_since_epoch_secs(&self, family: IpFamily) -> u64 {
match family {
IpFamily::V4 => self
.me_family_v4_state_since_epoch_secs
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_state_since_epoch_secs
.load(Ordering::Relaxed),
}
}
pub(crate) fn family_suppressed_until_epoch_secs(&self, family: IpFamily) -> u64 {
match family {
IpFamily::V4 => self
.me_family_v4_suppressed_until_epoch_secs
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_suppressed_until_epoch_secs
.load(Ordering::Relaxed),
}
}
pub(crate) fn family_fail_streak(&self, family: IpFamily) -> u32 {
match family {
IpFamily::V4 => self.me_family_v4_fail_streak.load(Ordering::Relaxed),
IpFamily::V6 => self.me_family_v6_fail_streak.load(Ordering::Relaxed),
}
}
pub(crate) fn family_recover_success_streak(&self, family: IpFamily) -> u32 {
match family {
IpFamily::V4 => self
.me_family_v4_recover_success_streak
.load(Ordering::Relaxed),
IpFamily::V6 => self
.me_family_v6_recover_success_streak
.load(Ordering::Relaxed),
}
}
pub(crate) fn is_family_temporarily_suppressed(
&self,
family: IpFamily,
now_epoch_secs: u64,
) -> bool {
self.family_suppressed_until_epoch_secs(family) > now_epoch_secs
}
pub(super) fn family_enabled_for_drain_coverage(
&self,
family: IpFamily,
now_epoch_secs: u64,
) -> bool {
let configured = match family {
IpFamily::V4 => self.decision.ipv4_me,
IpFamily::V6 => self.decision.ipv6_me,
};
configured && !self.is_family_temporarily_suppressed(family, now_epoch_secs)
}
pub(super) fn set_last_drain_gate(
&self,
route_quorum_ok: bool,
redundancy_ok: bool,
block_reason: MeDrainGateReason,
updated_at_epoch_secs: u64,
) {
self.me_last_drain_gate_route_quorum_ok
.store(route_quorum_ok, Ordering::Relaxed);
self.me_last_drain_gate_redundancy_ok
.store(redundancy_ok, Ordering::Relaxed);
self.me_last_drain_gate_block_reason
.store(block_reason as u8, Ordering::Relaxed);
self.me_last_drain_gate_updated_at_epoch_secs
.store(updated_at_epoch_secs, Ordering::Relaxed);
}
pub(crate) fn last_drain_gate_route_quorum_ok(&self) -> bool {
self.me_last_drain_gate_route_quorum_ok
.load(Ordering::Relaxed)
}
pub(crate) fn last_drain_gate_redundancy_ok(&self) -> bool {
self.me_last_drain_gate_redundancy_ok
.load(Ordering::Relaxed)
}
pub(crate) fn last_drain_gate_block_reason(&self) -> MeDrainGateReason {
MeDrainGateReason::from_u8(
self.me_last_drain_gate_block_reason
.load(Ordering::Relaxed),
)
}
pub(crate) fn last_drain_gate_updated_at_epoch_secs(&self) -> u64 {
self.me_last_drain_gate_updated_at_epoch_secs
.load(Ordering::Relaxed)
}
pub fn update_runtime_reinit_policy( pub fn update_runtime_reinit_policy(
&self, &self,
hardswap: bool, hardswap: bool,
@@ -587,8 +832,10 @@ impl MePool {
); );
self.me_pool_drain_soft_evict_cooldown_ms self.me_pool_drain_soft_evict_cooldown_ms
.store(pool_drain_soft_evict_cooldown_ms.max(1), Ordering::Relaxed); .store(pool_drain_soft_evict_cooldown_ms.max(1), Ordering::Relaxed);
self.me_pool_force_close_secs self.me_pool_force_close_secs.store(
.store(force_close_secs, Ordering::Relaxed); Self::normalize_force_close_secs(force_close_secs),
Ordering::Relaxed,
);
self.me_pool_min_fresh_ratio_permille self.me_pool_min_fresh_ratio_permille
.store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed); .store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed);
self.me_hardswap_warmup_delay_min_ms self.me_hardswap_warmup_delay_min_ms
@@ -733,12 +980,9 @@ impl MePool {
} }
pub(super) fn force_close_timeout(&self) -> Option<Duration> { pub(super) fn force_close_timeout(&self) -> Option<Duration> {
let secs = self.me_pool_force_close_secs.load(Ordering::Relaxed); let secs =
if secs == 0 { Self::normalize_force_close_secs(self.me_pool_force_close_secs.load(Ordering::Relaxed));
None Some(Duration::from_secs(secs))
} else {
Some(Duration::from_secs(secs))
}
} }
pub(super) fn drain_soft_evict_enabled(&self) -> bool { pub(super) fn drain_soft_evict_enabled(&self) -> bool {
@@ -1010,9 +1254,10 @@ impl MePool {
} }
pub(super) async fn active_coverage_required_total(&self) -> usize { pub(super) async fn active_coverage_required_total(&self) -> usize {
let now_epoch_secs = Self::now_epoch_secs();
let mut endpoints_by_dc = HashMap::<i32, HashSet<SocketAddr>>::new(); let mut endpoints_by_dc = HashMap::<i32, HashSet<SocketAddr>>::new();
if self.decision.ipv4_me { if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map = self.proxy_map_v4.read().await; let map = self.proxy_map_v4.read().await;
for (dc, addrs) in map.iter() { for (dc, addrs) in map.iter() {
let entry = endpoints_by_dc.entry(*dc).or_default(); let entry = endpoints_by_dc.entry(*dc).or_default();
@@ -1022,7 +1267,7 @@ impl MePool {
} }
} }
if self.decision.ipv6_me { if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map = self.proxy_map_v6.read().await; let map = self.proxy_map_v6.read().await;
for (dc, addrs) in map.iter() { for (dc, addrs) in map.iter() {
let entry = endpoints_by_dc.entry(*dc).or_default(); let entry = endpoints_by_dc.entry(*dc).or_default();

View File

@@ -74,9 +74,8 @@ impl MePool {
debug!( debug!(
%addr, %addr,
wait_ms = expiry.saturating_duration_since(now).as_millis(), wait_ms = expiry.saturating_duration_since(now).as_millis(),
"All ME endpoints are quarantined for the DC group; retrying earliest one" "All ME endpoints are quarantined for the DC group; waiting for quarantine expiry"
); );
return vec![addr];
} }
Vec::new() Vec::new()
@@ -165,9 +164,10 @@ impl MePool {
} }
async fn endpoints_for_dc(&self, target_dc: i32) -> Vec<SocketAddr> { async fn endpoints_for_dc(&self, target_dc: i32) -> Vec<SocketAddr> {
let now_epoch_secs = Self::now_epoch_secs();
let mut endpoints = HashSet::<SocketAddr>::new(); let mut endpoints = HashSet::<SocketAddr>::new();
if self.decision.ipv4_me { if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map = self.proxy_map_v4.read().await; let map = self.proxy_map_v4.read().await;
if let Some(addrs) = map.get(&target_dc) { if let Some(addrs) = map.get(&target_dc) {
for (ip, port) in addrs { for (ip, port) in addrs {
@@ -176,7 +176,7 @@ impl MePool {
} }
} }
if self.decision.ipv6_me { if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map = self.proxy_map_v6.read().await; let map = self.proxy_map_v6.read().await;
if let Some(addrs) = map.get(&target_dc) { if let Some(addrs) = map.get(&target_dc) {
for (ip, port) in addrs { for (ip, port) in addrs {

View File

@@ -11,8 +11,9 @@ use tracing::{debug, info, warn};
use std::collections::hash_map::DefaultHasher; use std::collections::hash_map::DefaultHasher;
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::network::IpFamily;
use super::pool::{MePool, WriterContour}; use super::pool::{MeDrainGateReason, MePool, WriterContour};
const ME_HARDSWAP_PENDING_TTL_SECS: u64 = 1800; const ME_HARDSWAP_PENDING_TTL_SECS: u64 = 1800;
@@ -120,9 +121,10 @@ impl MePool {
} }
async fn desired_dc_endpoints(&self) -> HashMap<i32, HashSet<SocketAddr>> { async fn desired_dc_endpoints(&self) -> HashMap<i32, HashSet<SocketAddr>> {
let now_epoch_secs = Self::now_epoch_secs();
let mut out: HashMap<i32, HashSet<SocketAddr>> = HashMap::new(); let mut out: HashMap<i32, HashSet<SocketAddr>> = HashMap::new();
if self.decision.ipv4_me { if self.family_enabled_for_drain_coverage(IpFamily::V4, now_epoch_secs) {
let map_v4 = self.proxy_map_v4.read().await.clone(); let map_v4 = self.proxy_map_v4.read().await.clone();
for (dc, addrs) in map_v4 { for (dc, addrs) in map_v4 {
let entry = out.entry(dc).or_default(); let entry = out.entry(dc).or_default();
@@ -132,7 +134,7 @@ impl MePool {
} }
} }
if self.decision.ipv6_me { if self.family_enabled_for_drain_coverage(IpFamily::V6, now_epoch_secs) {
let map_v6 = self.proxy_map_v6.read().await.clone(); let map_v6 = self.proxy_map_v6.read().await.clone();
for (dc, addrs) in map_v6 { for (dc, addrs) in map_v6 {
let entry = out.entry(dc).or_default(); let entry = out.entry(dc).or_default();
@@ -313,13 +315,23 @@ impl MePool {
pub async fn zero_downtime_reinit_after_map_change(self: &Arc<Self>, rng: &SecureRandom) { pub async fn zero_downtime_reinit_after_map_change(self: &Arc<Self>, rng: &SecureRandom) {
let desired_by_dc = self.desired_dc_endpoints().await; let desired_by_dc = self.desired_dc_endpoints().await;
let now_epoch_secs = Self::now_epoch_secs();
let v4_suppressed = self.is_family_temporarily_suppressed(IpFamily::V4, now_epoch_secs);
let v6_suppressed = self.is_family_temporarily_suppressed(IpFamily::V6, now_epoch_secs);
if desired_by_dc.is_empty() { if desired_by_dc.is_empty() {
warn!("ME endpoint map is empty; skipping stale writer drain"); warn!("ME endpoint map is empty; skipping stale writer drain");
let reason = if (self.decision.ipv4_me && v4_suppressed)
|| (self.decision.ipv6_me && v6_suppressed)
{
MeDrainGateReason::SuppressionActive
} else {
MeDrainGateReason::CoverageQuorum
};
self.set_last_drain_gate(false, false, reason, now_epoch_secs);
return; return;
} }
let desired_map_hash = Self::desired_map_hash(&desired_by_dc); let desired_map_hash = Self::desired_map_hash(&desired_by_dc);
let now_epoch_secs = Self::now_epoch_secs();
let previous_generation = self.current_generation(); let previous_generation = self.current_generation();
let hardswap = self.hardswap.load(Ordering::Relaxed); let hardswap = self.hardswap.load(Ordering::Relaxed);
let generation = if hardswap { let generation = if hardswap {
@@ -390,7 +402,17 @@ impl MePool {
.load(Ordering::Relaxed), .load(Ordering::Relaxed),
); );
let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs); let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs);
let mut route_quorum_ok = coverage_ratio >= min_ratio;
let mut redundancy_ok = missing_dc.is_empty();
let mut redundancy_missing_dc = missing_dc.clone();
let mut gate_coverage_ratio = coverage_ratio;
if !hardswap && coverage_ratio < min_ratio { if !hardswap && coverage_ratio < min_ratio {
self.set_last_drain_gate(
false,
redundancy_ok,
MeDrainGateReason::CoverageQuorum,
now_epoch_secs,
);
warn!( warn!(
previous_generation, previous_generation,
generation, generation,
@@ -411,7 +433,17 @@ impl MePool {
.collect(); .collect();
let (fresh_coverage_ratio, fresh_missing_dc) = let (fresh_coverage_ratio, fresh_missing_dc) =
Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs); Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs);
if !fresh_missing_dc.is_empty() { route_quorum_ok = fresh_coverage_ratio >= min_ratio;
redundancy_ok = fresh_missing_dc.is_empty();
redundancy_missing_dc = fresh_missing_dc.clone();
gate_coverage_ratio = fresh_coverage_ratio;
if fresh_coverage_ratio < min_ratio {
self.set_last_drain_gate(
false,
redundancy_ok,
MeDrainGateReason::CoverageQuorum,
now_epoch_secs,
);
warn!( warn!(
previous_generation, previous_generation,
generation, generation,
@@ -421,13 +453,16 @@ impl MePool {
); );
return; return;
} }
} else if !missing_dc.is_empty() { }
self.set_last_drain_gate(route_quorum_ok, redundancy_ok, MeDrainGateReason::Open, now_epoch_secs);
if !redundancy_ok {
warn!( warn!(
missing_dc = ?missing_dc, missing_dc = ?redundancy_missing_dc,
// Keep stale writers alive when fresh coverage is incomplete. coverage_ratio = format_args!("{gate_coverage_ratio:.3}"),
"ME reinit coverage incomplete; keeping stale writers" min_ratio = format_args!("{min_ratio:.3}"),
"ME reinit proceeds with weighted quorum while some DC groups remain uncovered"
); );
return;
} }
if hardswap { if hardswap {

View File

@@ -1,7 +1,7 @@
use std::collections::HashMap; use std::collections::HashMap;
use std::time::Instant; use std::time::Instant;
use super::pool::{MePool, RefillDcKey}; use super::pool::{MeDrainGateReason, MePool, RefillDcKey};
use crate::network::IpFamily; use crate::network::IpFamily;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
@@ -36,6 +36,24 @@ pub(crate) struct MeApiNatStunSnapshot {
pub stun_backoff_remaining_ms: Option<u64>, pub stun_backoff_remaining_ms: Option<u64>,
} }
#[derive(Clone, Debug)]
pub(crate) struct MeApiFamilyStateSnapshot {
pub family: &'static str,
pub state: &'static str,
pub state_since_epoch_secs: u64,
pub suppressed_until_epoch_secs: Option<u64>,
pub fail_streak: u32,
pub recover_success_streak: u32,
}
#[derive(Clone, Debug)]
pub(crate) struct MeApiDrainGateSnapshot {
pub route_quorum_ok: bool,
pub redundancy_ok: bool,
pub block_reason: &'static str,
pub updated_at_epoch_secs: u64,
}
impl MePool { impl MePool {
pub(crate) async fn api_refill_snapshot(&self) -> MeApiRefillSnapshot { pub(crate) async fn api_refill_snapshot(&self) -> MeApiRefillSnapshot {
let inflight_endpoints_total = self.refill_inflight.lock().await.len(); let inflight_endpoints_total = self.refill_inflight.lock().await.len();
@@ -125,4 +143,35 @@ impl MePool {
stun_backoff_remaining_ms, stun_backoff_remaining_ms,
} }
} }
pub(crate) fn api_family_state_snapshot(&self) -> Vec<MeApiFamilyStateSnapshot> {
[IpFamily::V4, IpFamily::V6]
.into_iter()
.map(|family| {
let state = self.family_runtime_state(family);
let suppressed_until = self.family_suppressed_until_epoch_secs(family);
MeApiFamilyStateSnapshot {
family: match family {
IpFamily::V4 => "v4",
IpFamily::V6 => "v6",
},
state: state.as_str(),
state_since_epoch_secs: self.family_runtime_state_since_epoch_secs(family),
suppressed_until_epoch_secs: (suppressed_until != 0).then_some(suppressed_until),
fail_streak: self.family_fail_streak(family),
recover_success_streak: self.family_recover_success_streak(family),
}
})
.collect()
}
pub(crate) fn api_drain_gate_snapshot(&self) -> MeApiDrainGateSnapshot {
let reason: MeDrainGateReason = self.last_drain_gate_block_reason();
MeApiDrainGateSnapshot {
route_quorum_ok: self.last_drain_gate_route_quorum_ok(),
redundancy_ok: self.last_drain_gate_redundancy_ok(),
block_reason: reason.as_str(),
updated_at_epoch_secs: self.last_drain_gate_updated_at_epoch_secs(),
}
}
} }

View File

@@ -16,11 +16,13 @@ use crate::config::MeBindStaleMode;
use crate::crypto::SecureRandom; use crate::crypto::SecureRandom;
use crate::error::{ProxyError, Result}; use crate::error::{ProxyError, Result};
use crate::protocol::constants::{RPC_CLOSE_EXT_U32, RPC_PING_U32}; use crate::protocol::constants::{RPC_CLOSE_EXT_U32, RPC_PING_U32};
use crate::stats::{
MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason,
};
use super::codec::{RpcWriter, WriterCommand}; use super::codec::{RpcWriter, WriterCommand};
use super::pool::{MePool, MeWriter, WriterContour}; use super::pool::{MePool, MeWriter, WriterContour};
use super::reader::reader_loop; use super::reader::reader_loop;
use super::registry::BoundConn;
use super::wire::build_proxy_req_payload; use super::wire::build_proxy_req_payload;
const ME_ACTIVE_PING_SECS: u64 = 25; const ME_ACTIVE_PING_SECS: u64 = 25;
@@ -28,6 +30,12 @@ const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
const ME_IDLE_KEEPALIVE_MAX_SECS: u64 = 5; const ME_IDLE_KEEPALIVE_MAX_SECS: u64 = 5;
const ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS: u64 = 700; const ME_RPC_PROXY_REQ_RESPONSE_WAIT_MS: u64 = 700;
#[derive(Clone, Copy)]
enum WriterRemoveGuardMode {
Any,
DrainingOnly,
}
fn is_me_peer_closed_error(error: &ProxyError) -> bool { fn is_me_peer_closed_error(error: &ProxyError) -> bool {
matches!(error, ProxyError::Io(ioe) if ioe.kind() == ErrorKind::UnexpectedEof) matches!(error, ProxyError::Io(ioe) if ioe.kind() == ErrorKind::UnexpectedEof)
} }
@@ -44,9 +52,16 @@ impl MePool {
for writer_id in closed_writer_ids { for writer_id in closed_writer_ids {
if self.registry.is_writer_empty(writer_id).await { if self.registry.is_writer_empty(writer_id).await {
let _ = self.remove_writer_only(writer_id).await; let _ = self
.remove_writer_only(writer_id, MeWriterTeardownReason::PruneClosedWriter)
.await;
} else { } else {
let _ = self.remove_writer_and_close_clients(writer_id).await; let _ = self
.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::PruneClosedWriter,
)
.await;
} }
} }
} }
@@ -143,6 +158,9 @@ impl MePool {
crc_mode: hs.crc_mode, crc_mode: hs.crc_mode,
}; };
let cancel_wr = cancel.clone(); let cancel_wr = cancel.clone();
let cleanup_done = Arc::new(AtomicBool::new(false));
let cleanup_for_writer = cleanup_done.clone();
let pool_writer_task = Arc::downgrade(self);
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
tokio::select! { tokio::select! {
@@ -160,6 +178,20 @@ impl MePool {
_ = cancel_wr.cancelled() => break, _ = cancel_wr.cancelled() => break,
} }
} }
if cleanup_for_writer
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok()
{
if let Some(pool) = pool_writer_task.upgrade() {
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::WriterTaskExit,
)
.await;
} else {
cancel_wr.cancel();
}
}
}); });
let writer = MeWriter { let writer = MeWriter {
id: writer_id, id: writer_id,
@@ -196,7 +228,6 @@ impl MePool {
let cancel_ping = cancel.clone(); let cancel_ping = cancel.clone();
let tx_ping = tx.clone(); let tx_ping = tx.clone();
let ping_tracker_ping = ping_tracker.clone(); let ping_tracker_ping = ping_tracker.clone();
let cleanup_done = Arc::new(AtomicBool::new(false));
let cleanup_for_reader = cleanup_done.clone(); let cleanup_for_reader = cleanup_done.clone();
let cleanup_for_ping = cleanup_done.clone(); let cleanup_for_ping = cleanup_done.clone();
let keepalive_enabled = self.me_keepalive_enabled; let keepalive_enabled = self.me_keepalive_enabled;
@@ -242,21 +273,29 @@ impl MePool {
stats_reader_close.increment_me_idle_close_by_peer_total(); stats_reader_close.increment_me_idle_close_by_peer_total();
info!(writer_id, "ME socket closed by peer on idle writer"); info!(writer_id, "ME socket closed by peer on idle writer");
} }
if let Some(pool) = pool.upgrade() if cleanup_for_reader
&& cleanup_for_reader .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .is_ok()
.is_ok()
{ {
pool.remove_writer_and_close_clients(writer_id).await; if let Some(pool) = pool.upgrade() {
pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::ReaderExit,
)
.await;
} else {
// Fallback for shutdown races: make writer task exit quickly so stale
// channels are observable by periodic prune.
cancel_reader_token.cancel();
}
} }
if let Err(e) = res { if let Err(e) = res {
if !idle_close_by_peer { if !idle_close_by_peer {
warn!(error = %e, "ME reader ended"); warn!(error = %e, "ME reader ended");
} }
} }
let mut ws = writers_arc.write().await; let remaining = writers_arc.read().await.len();
ws.retain(|w| w.id != writer_id); debug!(writer_id, remaining, "ME reader task finished");
info!(remaining = ws.len(), "Dead ME writer removed from pool");
}); });
let pool_ping = Arc::downgrade(self); let pool_ping = Arc::downgrade(self);
@@ -351,7 +390,11 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok() .is_ok()
{ {
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::PingSendFail,
)
.await;
} }
break; break;
} }
@@ -444,7 +487,11 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok() .is_ok()
{ {
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::SignalSendFail,
)
.await;
} }
break; break;
} }
@@ -478,7 +525,11 @@ impl MePool {
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
.is_ok() .is_ok()
{ {
pool.remove_writer_and_close_clients(writer_id).await; pool.remove_writer_and_close_clients(
writer_id,
MeWriterTeardownReason::SignalSendFail,
)
.await;
} }
break; break;
} }
@@ -491,21 +542,83 @@ impl MePool {
Ok(()) Ok(())
} }
pub(crate) async fn remove_writer_and_close_clients(self: &Arc<Self>, writer_id: u64) { pub(crate) async fn remove_writer_and_close_clients(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
// Full client cleanup now happens inside `registry.writer_lost` to keep // Full client cleanup now happens inside `registry.writer_lost` to keep
// writer reap/remove paths strictly non-blocking per connection. // writer reap/remove paths strictly non-blocking per connection.
let _ = self.remove_writer_only(writer_id).await; self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::Normal,
WriterRemoveGuardMode::Any,
)
.await
} }
async fn remove_writer_only(self: &Arc<Self>, writer_id: u64) -> Vec<BoundConn> { pub(super) async fn remove_draining_writer_hard_detach(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::HardDetach,
WriterRemoveGuardMode::DrainingOnly,
)
.await
}
async fn remove_writer_only(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
) -> bool {
self.remove_writer_with_mode(
writer_id,
reason,
MeWriterTeardownMode::Normal,
WriterRemoveGuardMode::Any,
)
.await
}
// Authoritative teardown primitive shared by normal cleanup and watchdog path.
// Lock-order invariant:
// 1) mutate `writers` under pool write lock,
// 2) release pool lock,
// 3) run registry/metrics/refill side effects.
// `registry.writer_lost` must never run while `writers` lock is held.
async fn remove_writer_with_mode(
self: &Arc<Self>,
writer_id: u64,
reason: MeWriterTeardownReason,
mode: MeWriterTeardownMode,
guard_mode: WriterRemoveGuardMode,
) -> bool {
let started_at = Instant::now();
self.stats
.increment_me_writer_teardown_attempt_total(reason, mode);
let mut close_tx: Option<mpsc::Sender<WriterCommand>> = None; let mut close_tx: Option<mpsc::Sender<WriterCommand>> = None;
let mut removed_addr: Option<SocketAddr> = None; let mut removed_addr: Option<SocketAddr> = None;
let mut removed_dc: Option<i32> = None; let mut removed_dc: Option<i32> = None;
let mut removed_uptime: Option<Duration> = None; let mut removed_uptime: Option<Duration> = None;
let mut trigger_refill = false; let mut trigger_refill = false;
let mut removed = false;
{ {
let mut ws = self.writers.write().await; let mut ws = self.writers.write().await;
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) { if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
if matches!(guard_mode, WriterRemoveGuardMode::DrainingOnly)
&& !ws[pos].draining.load(Ordering::Relaxed)
{
self.stats.increment_me_writer_teardown_noop_total();
self.stats
.observe_me_writer_teardown_duration(mode, started_at.elapsed());
return false;
}
let w = ws.remove(pos); let w = ws.remove(pos);
let was_draining = w.draining.load(Ordering::Relaxed); let was_draining = w.draining.load(Ordering::Relaxed);
if was_draining { if was_draining {
@@ -522,6 +635,7 @@ impl MePool {
} }
close_tx = Some(w.tx.clone()); close_tx = Some(w.tx.clone());
self.conn_count.fetch_sub(1, Ordering::Relaxed); self.conn_count.fetch_sub(1, Ordering::Relaxed);
removed = true;
} }
} }
// State invariant: // State invariant:
@@ -529,7 +643,7 @@ impl MePool {
// - writer is removed from registry routing/binding maps via `writer_lost`. // - writer is removed from registry routing/binding maps via `writer_lost`.
// The close command below is only a best-effort accelerator for task shutdown. // The close command below is only a best-effort accelerator for task shutdown.
// Cleanup progress must never depend on command-channel availability. // Cleanup progress must never depend on command-channel availability.
let conns = self.registry.writer_lost(writer_id).await; let _ = self.registry.writer_lost(writer_id).await;
{ {
let mut tracker = self.ping_tracker.lock().await; let mut tracker = self.ping_tracker.lock().await;
tracker.retain(|_, (_, wid)| *wid != writer_id); tracker.retain(|_, (_, wid)| *wid != writer_id);
@@ -542,6 +656,9 @@ impl MePool {
self.stats.increment_me_writer_close_signal_drop_total(); self.stats.increment_me_writer_close_signal_drop_total();
self.stats self.stats
.increment_me_writer_close_signal_channel_full_total(); .increment_me_writer_close_signal_channel_full_total();
self.stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelFull,
);
debug!( debug!(
writer_id, writer_id,
"Skipping close signal for removed writer: command channel is full" "Skipping close signal for removed writer: command channel is full"
@@ -549,6 +666,9 @@ impl MePool {
} }
Err(TrySendError::Closed(_)) => { Err(TrySendError::Closed(_)) => {
self.stats.increment_me_writer_close_signal_drop_total(); self.stats.increment_me_writer_close_signal_drop_total();
self.stats.increment_me_writer_cleanup_side_effect_failures_total(
MeWriterCleanupSideEffectStep::CloseSignalChannelClosed,
);
debug!( debug!(
writer_id, writer_id,
"Skipping close signal for removed writer: command channel is closed" "Skipping close signal for removed writer: command channel is closed"
@@ -556,16 +676,24 @@ impl MePool {
} }
} }
} }
if trigger_refill if let Some(addr) = removed_addr {
&& let Some(addr) = removed_addr
&& let Some(writer_dc) = removed_dc
{
if let Some(uptime) = removed_uptime { if let Some(uptime) = removed_uptime {
self.maybe_quarantine_flapping_endpoint(addr, uptime).await; self.maybe_quarantine_flapping_endpoint(addr, uptime).await;
} }
self.trigger_immediate_refill_for_dc(addr, writer_dc); if trigger_refill
&& let Some(writer_dc) = removed_dc
{
self.trigger_immediate_refill_for_dc(addr, writer_dc);
}
} }
conns if removed {
self.stats.increment_me_writer_teardown_success_total(mode);
} else {
self.stats.increment_me_writer_teardown_noop_total();
}
self.stats
.observe_me_writer_teardown_duration(mode, started_at.elapsed());
removed
} }
pub(crate) async fn mark_writer_draining_with_timeout( pub(crate) async fn mark_writer_draining_with_timeout(

View File

@@ -14,6 +14,7 @@ use crate::config::{MeRouteNoWriterMode, MeWriterPickMode};
use crate::error::{ProxyError, Result}; use crate::error::{ProxyError, Result};
use crate::network::IpFamily; use crate::network::IpFamily;
use crate::protocol::constants::{RPC_CLOSE_CONN_U32, RPC_CLOSE_EXT_U32}; use crate::protocol::constants::{RPC_CLOSE_CONN_U32, RPC_CLOSE_EXT_U32};
use crate::stats::MeWriterTeardownReason;
use super::MePool; use super::MePool;
use super::codec::WriterCommand; use super::codec::WriterCommand;
@@ -134,7 +135,11 @@ impl MePool {
Ok(()) => return Ok(()), Ok(()) => return Ok(()),
Err(TimedSendError::Closed(_)) => { Err(TimedSendError::Closed(_)) => {
warn!(writer_id = current.writer_id, "ME writer channel closed"); warn!(writer_id = current.writer_id, "ME writer channel closed");
self.remove_writer_and_close_clients(current.writer_id).await; self.remove_writer_and_close_clients(
current.writer_id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue; continue;
} }
Err(TimedSendError::Timeout(_)) => { Err(TimedSendError::Timeout(_)) => {
@@ -151,7 +156,11 @@ impl MePool {
} }
Err(TrySendError::Closed(_)) => { Err(TrySendError::Closed(_)) => {
warn!(writer_id = current.writer_id, "ME writer channel closed"); warn!(writer_id = current.writer_id, "ME writer channel closed");
self.remove_writer_and_close_clients(current.writer_id).await; self.remove_writer_and_close_clients(
current.writer_id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue; continue;
} }
} }
@@ -458,7 +467,11 @@ impl MePool {
Err(TrySendError::Closed(_)) => { Err(TrySendError::Closed(_)) => {
self.stats.increment_me_writer_pick_closed_total(pick_mode); self.stats.increment_me_writer_pick_closed_total(pick_mode);
warn!(writer_id = w.id, "ME writer channel closed"); warn!(writer_id = w.id, "ME writer channel closed");
self.remove_writer_and_close_clients(w.id).await; self.remove_writer_and_close_clients(
w.id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
continue; continue;
} }
} }
@@ -503,7 +516,11 @@ impl MePool {
Err(TimedSendError::Closed(_)) => { Err(TimedSendError::Closed(_)) => {
self.stats.increment_me_writer_pick_closed_total(pick_mode); self.stats.increment_me_writer_pick_closed_total(pick_mode);
warn!(writer_id = w.id, "ME writer channel closed (blocking)"); warn!(writer_id = w.id, "ME writer channel closed (blocking)");
self.remove_writer_and_close_clients(w.id).await; self.remove_writer_and_close_clients(
w.id,
MeWriterTeardownReason::RouteChannelClosed,
)
.await;
} }
Err(TimedSendError::Timeout(_)) => { Err(TimedSendError::Timeout(_)) => {
self.stats.increment_me_writer_pick_full_total(pick_mode); self.stats.increment_me_writer_pick_full_total(pick_mode);
@@ -654,7 +671,11 @@ impl MePool {
} }
Err(TrySendError::Closed(_)) => { Err(TrySendError::Closed(_)) => {
debug!("ME close write failed"); debug!("ME close write failed");
self.remove_writer_and_close_clients(w.writer_id).await; self.remove_writer_and_close_clients(
w.writer_id,
MeWriterTeardownReason::CloseRpcChannelClosed,
)
.await;
} }
} }
} else { } else {

View File

@@ -2,6 +2,7 @@
pub mod pool; pub mod pool;
pub mod proxy_protocol; pub mod proxy_protocol;
pub mod shadowsocks;
pub mod socket; pub mod socket;
pub mod socks; pub mod socks;
pub mod upstream; pub mod upstream;
@@ -14,5 +15,8 @@ pub use socket::*;
#[allow(unused_imports)] #[allow(unused_imports)]
pub use socks::*; pub use socks::*;
#[allow(unused_imports)] #[allow(unused_imports)]
pub use upstream::{DcPingResult, StartupPingResult, UpstreamEgressInfo, UpstreamManager, UpstreamRouteKind}; pub use upstream::{
DcPingResult, StartupPingResult, UpstreamEgressInfo, UpstreamManager, UpstreamRouteKind,
UpstreamStream,
};
pub mod middle_proxy; pub mod middle_proxy;

View File

@@ -0,0 +1,60 @@
use std::net::{IpAddr, SocketAddr};
use std::time::Duration;
use shadowsocks::{
ProxyClientStream,
config::{ServerConfig, ServerType},
context::Context,
net::ConnectOpts,
};
use crate::error::{ProxyError, Result};
pub(crate) type ShadowsocksStream = ProxyClientStream<shadowsocks::net::TcpStream>;
fn parse_server_config(url: &str, connect_timeout: Duration) -> Result<ServerConfig> {
let mut config = ServerConfig::from_url(url)
.map_err(|error| ProxyError::Config(format!("invalid shadowsocks url: {error}")))?;
if config.plugin().is_some() {
return Err(ProxyError::Config(
"shadowsocks plugins are not supported".to_string(),
));
}
config.set_timeout(connect_timeout);
Ok(config)
}
pub(crate) fn sanitize_shadowsocks_url(url: &str) -> Result<String> {
Ok(parse_server_config(url, Duration::from_secs(1))?
.addr()
.to_string())
}
fn connect_opts_for_interface(interface: &Option<String>) -> ConnectOpts {
let mut opts = ConnectOpts::default();
if let Some(interface) = interface {
if let Ok(ip) = interface.parse::<IpAddr>() {
opts.bind_local_addr = Some(SocketAddr::new(ip, 0));
} else {
opts.bind_interface = Some(interface.clone());
}
}
opts
}
pub(crate) async fn connect_shadowsocks(
url: &str,
interface: &Option<String>,
target: SocketAddr,
connect_timeout: Duration,
) -> Result<ShadowsocksStream> {
let config = parse_server_config(url, connect_timeout)?;
let context = Context::new_shared(ServerType::Local);
let opts = connect_opts_for_interface(interface);
ProxyClientStream::connect_with_opts(context, &config, target, &opts)
.await
.map_err(ProxyError::Io)
}

View File

@@ -4,22 +4,28 @@
#![allow(deprecated)] #![allow(deprecated)]
use rand::Rng;
use std::collections::{BTreeSet, HashMap}; use std::collections::{BTreeSet, HashMap};
use std::net::{SocketAddr, IpAddr}; use std::net::{IpAddr, SocketAddr};
use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering}; use std::sync::atomic::{AtomicU64, AtomicUsize, Ordering};
use std::task::{Context, Poll};
use std::time::Duration; use std::time::Duration;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tokio::time::Instant; use tokio::time::Instant;
use rand::Rng; use tracing::{debug, info, trace, warn};
use tracing::{debug, warn, info, trace};
use crate::config::{UpstreamConfig, UpstreamType}; use crate::config::{UpstreamConfig, UpstreamType};
use crate::error::{Result, ProxyError}; use crate::error::{ProxyError, Result};
use crate::network::dns_overrides::{resolve_socket_addr, split_host_port}; use crate::network::dns_overrides::{resolve_socket_addr, split_host_port};
use crate::protocol::constants::{TG_DATACENTERS_V4, TG_DATACENTERS_V6, TG_DATACENTER_PORT}; use crate::protocol::constants::{TG_DATACENTER_PORT, TG_DATACENTERS_V4, TG_DATACENTERS_V6};
use crate::stats::Stats; use crate::stats::Stats;
use crate::transport::shadowsocks::{
ShadowsocksStream, connect_shadowsocks, sanitize_shadowsocks_url,
};
use crate::transport::socket::{create_outgoing_socket_bound, resolve_interface_ip}; use crate::transport::socket::{create_outgoing_socket_bound, resolve_interface_ip};
use crate::transport::socks::{connect_socks4, connect_socks5}; use crate::transport::socks::{connect_socks4, connect_socks5};
@@ -47,7 +53,10 @@ struct LatencyEma {
impl LatencyEma { impl LatencyEma {
const fn new(alpha: f64) -> Self { const fn new(alpha: f64) -> Self {
Self { value_ms: None, alpha } Self {
value_ms: None,
alpha,
}
} }
fn update(&mut self, sample_ms: f64) { fn update(&mut self, sample_ms: f64) {
@@ -131,11 +140,17 @@ impl UpstreamState {
return Some(ms); return Some(ms);
} }
let (sum, count) = self.dc_latency.iter() let (sum, count) = self
.dc_latency
.iter()
.filter_map(|l| l.get()) .filter_map(|l| l.get())
.fold((0.0, 0u32), |(s, c), v| (s + v, c + 1)); .fold((0.0, 0u32), |(s, c), v| (s + v, c + 1));
if count > 0 { Some(sum / count as f64) } else { None } if count > 0 {
Some(sum / count as f64)
} else {
None
}
} }
} }
@@ -158,11 +173,78 @@ pub struct StartupPingResult {
pub both_available: bool, pub both_available: bool,
} }
pub enum UpstreamStream {
Tcp(TcpStream),
Shadowsocks(Box<ShadowsocksStream>),
}
impl std::fmt::Debug for UpstreamStream {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Tcp(_) => f.write_str("UpstreamStream::Tcp(..)"),
Self::Shadowsocks(_) => f.write_str("UpstreamStream::Shadowsocks(..)"),
}
}
}
impl UpstreamStream {
pub fn into_tcp(self) -> Result<TcpStream> {
match self {
Self::Tcp(stream) => Ok(stream),
Self::Shadowsocks(_) => Err(ProxyError::Config(
"shadowsocks upstreams are not supported when general.use_middle_proxy = true"
.to_string(),
)),
}
}
}
impl AsyncRead for UpstreamStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_read(cx, buf),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_read(cx, buf),
}
}
}
impl AsyncWrite for UpstreamStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<std::io::Result<usize>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_write(cx, buf),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_flush(cx),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_flush(cx),
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<()>> {
match self.get_mut() {
Self::Tcp(stream) => Pin::new(stream).poll_shutdown(cx),
Self::Shadowsocks(stream) => Pin::new(stream.as_mut()).poll_shutdown(cx),
}
}
}
#[derive(Debug, Clone, Copy, PartialEq, Eq)] #[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum UpstreamRouteKind { pub enum UpstreamRouteKind {
Direct, Direct,
Socks4, Socks4,
Socks5, Socks5,
Shadowsocks,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -194,6 +276,7 @@ pub struct UpstreamApiSummarySnapshot {
pub direct_total: usize, pub direct_total: usize,
pub socks4_total: usize, pub socks4_total: usize,
pub socks5_total: usize, pub socks5_total: usize,
pub shadowsocks_total: usize,
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@@ -253,7 +336,8 @@ impl UpstreamManager {
connect_failfast_hard_errors: bool, connect_failfast_hard_errors: bool,
stats: Arc<Stats>, stats: Arc<Stats>,
) -> Self { ) -> Self {
let states = configs.into_iter() let states = configs
.into_iter()
.filter(|c| c.enabled) .filter(|c| c.enabled)
.map(UpstreamState::new) .map(UpstreamState::new)
.collect(); .collect();
@@ -311,20 +395,13 @@ impl UpstreamManager {
summary.unhealthy_total += 1; summary.unhealthy_total += 1;
} }
let (route_kind, address) = match &upstream.config.upstream_type { let (route_kind, address) = Self::describe_upstream(&upstream.config.upstream_type);
UpstreamType::Direct { .. } => { match route_kind {
summary.direct_total += 1; UpstreamRouteKind::Direct => summary.direct_total += 1,
(UpstreamRouteKind::Direct, "direct".to_string()) UpstreamRouteKind::Socks4 => summary.socks4_total += 1,
} UpstreamRouteKind::Socks5 => summary.socks5_total += 1,
UpstreamType::Socks4 { address, .. } => { UpstreamRouteKind::Shadowsocks => summary.shadowsocks_total += 1,
summary.socks4_total += 1; }
(UpstreamRouteKind::Socks4, address.clone())
}
UpstreamType::Socks5 { address, .. } => {
summary.socks5_total += 1;
(UpstreamRouteKind::Socks5, address.clone())
}
};
let mut dc = Vec::with_capacity(NUM_DCS); let mut dc = Vec::with_capacity(NUM_DCS);
for dc_idx in 0..NUM_DCS { for dc_idx in 0..NUM_DCS {
@@ -352,6 +429,18 @@ impl UpstreamManager {
Some(UpstreamApiSnapshot { summary, upstreams }) Some(UpstreamApiSnapshot { summary, upstreams })
} }
fn describe_upstream(upstream_type: &UpstreamType) -> (UpstreamRouteKind, String) {
match upstream_type {
UpstreamType::Direct { .. } => (UpstreamRouteKind::Direct, "direct".to_string()),
UpstreamType::Socks4 { address, .. } => (UpstreamRouteKind::Socks4, address.clone()),
UpstreamType::Socks5 { address, .. } => (UpstreamRouteKind::Socks5, address.clone()),
UpstreamType::Shadowsocks { url, .. } => (
UpstreamRouteKind::Shadowsocks,
sanitize_shadowsocks_url(url).unwrap_or_else(|_| "invalid".to_string()),
),
}
}
pub fn api_policy_snapshot(&self) -> UpstreamApiPolicySnapshot { pub fn api_policy_snapshot(&self) -> UpstreamApiPolicySnapshot {
UpstreamApiPolicySnapshot { UpstreamApiPolicySnapshot {
connect_retry_attempts: self.connect_retry_attempts, connect_retry_attempts: self.connect_retry_attempts,
@@ -539,44 +628,44 @@ impl UpstreamManager {
// Scope filter: // Scope filter:
// If scope is set: only scoped and matched items // If scope is set: only scoped and matched items
// If scope is not set: only unscoped items // If scope is not set: only unscoped items
let filtered_upstreams : Vec<usize> = upstreams.iter() let filtered_upstreams: Vec<usize> = upstreams
.iter()
.enumerate() .enumerate()
.filter(|(_, u)| { .filter(|(_, u)| {
scope.map_or( scope.map_or(u.config.scopes.is_empty(), |req_scope| {
u.config.scopes.is_empty(), u.config
|req_scope| { .scopes
u.config.scopes .split(',')
.split(',') .map(str::trim)
.map(str::trim) .any(|s| s == req_scope)
.any(|s| s == req_scope) })
}
)
}) })
.map(|(i, _)| i) .map(|(i, _)| i)
.collect(); .collect();
// Healthy filter // Healthy filter
let healthy: Vec<usize> = filtered_upstreams.iter() let healthy: Vec<usize> = filtered_upstreams
.iter()
.filter(|&&i| upstreams[i].healthy) .filter(|&&i| upstreams[i].healthy)
.copied() .copied()
.collect(); .collect();
if filtered_upstreams.is_empty() { if filtered_upstreams.is_empty() {
if Self::should_emit_warn( if Self::should_emit_warn(self.no_upstreams_warn_epoch_ms.as_ref(), 5_000) {
self.no_upstreams_warn_epoch_ms.as_ref(), warn!(
5_000, scope = scope,
) { "No upstreams available! Using first (direct?)"
warn!(scope = scope, "No upstreams available! Using first (direct?)"); );
} }
return None; return None;
} }
if healthy.is_empty() { if healthy.is_empty() {
if Self::should_emit_warn( if Self::should_emit_warn(self.no_healthy_warn_epoch_ms.as_ref(), 5_000) {
self.no_healthy_warn_epoch_ms.as_ref(), warn!(
5_000, scope = scope,
) { "No healthy upstreams available! Using random."
warn!(scope = scope, "No healthy upstreams available! Using random."); );
} }
return Some(filtered_upstreams[rand::rng().gen_range(0..filtered_upstreams.len())]); return Some(filtered_upstreams[rand::rng().gen_range(0..filtered_upstreams.len())]);
} }
@@ -585,14 +674,18 @@ impl UpstreamManager {
return Some(healthy[0]); return Some(healthy[0]);
} }
let weights: Vec<(usize, f64)> = healthy.iter().map(|&i| { let weights: Vec<(usize, f64)> = healthy
let base = upstreams[i].config.weight as f64; .iter()
let latency_factor = upstreams[i].effective_latency(dc_idx) .map(|&i| {
.map(|ms| if ms > 1.0 { 1000.0 / ms } else { 1000.0 }) let base = upstreams[i].config.weight as f64;
.unwrap_or(1.0); let latency_factor = upstreams[i]
.effective_latency(dc_idx)
.map(|ms| if ms > 1.0 { 1000.0 / ms } else { 1000.0 })
.unwrap_or(1.0);
(i, base * latency_factor) (i, base * latency_factor)
}).collect(); })
.collect();
let total: f64 = weights.iter().map(|(_, w)| w).sum(); let total: f64 = weights.iter().map(|(_, w)| w).sum();
@@ -620,8 +713,34 @@ impl UpstreamManager {
} }
/// Connect to target through a selected upstream. /// Connect to target through a selected upstream.
pub async fn connect(&self, target: SocketAddr, dc_idx: Option<i16>, scope: Option<&str>) -> Result<TcpStream> { pub async fn connect(
let (stream, _) = self.connect_with_details(target, dc_idx, scope).await?; &self,
target: SocketAddr,
dc_idx: Option<i16>,
scope: Option<&str>,
) -> Result<UpstreamStream> {
let idx = self
.select_upstream(dc_idx, scope)
.await
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
let mut upstream = {
let guard = self.upstreams.read().await;
guard[idx].config.clone()
};
if let Some(s) = scope {
upstream.selected_scope = s.to_string();
}
let bind_rr = {
let guard = self.upstreams.read().await;
guard.get(idx).map(|u| u.bind_rr.clone())
};
let (stream, _) = self
.connect_selected_upstream(idx, upstream, target, dc_idx, bind_rr)
.await?;
Ok(stream) Ok(stream)
} }
@@ -632,7 +751,9 @@ impl UpstreamManager {
dc_idx: Option<i16>, dc_idx: Option<i16>,
scope: Option<&str>, scope: Option<&str>,
) -> Result<(TcpStream, UpstreamEgressInfo)> { ) -> Result<(TcpStream, UpstreamEgressInfo)> {
let idx = self.select_upstream(dc_idx, scope).await let idx = self
.select_upstream(dc_idx, scope)
.await
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?; .ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
let mut upstream = { let mut upstream = {
@@ -650,6 +771,20 @@ impl UpstreamManager {
guard.get(idx).map(|u| u.bind_rr.clone()) guard.get(idx).map(|u| u.bind_rr.clone())
}; };
let (stream, egress) = self
.connect_selected_upstream(idx, upstream, target, dc_idx, bind_rr)
.await?;
Ok((stream.into_tcp()?, egress))
}
async fn connect_selected_upstream(
&self,
idx: usize,
upstream: UpstreamConfig,
target: SocketAddr,
dc_idx: Option<i16>,
bind_rr: Option<Arc<AtomicUsize>>,
) -> Result<(UpstreamStream, UpstreamEgressInfo)> {
let connect_started_at = Instant::now(); let connect_started_at = Instant::now();
let mut last_error: Option<ProxyError> = None; let mut last_error: Option<ProxyError> = None;
let mut attempts_used = 0u32; let mut attempts_used = 0u32;
@@ -662,8 +797,8 @@ impl UpstreamManager {
break; break;
} }
let remaining_budget = self.connect_budget.saturating_sub(elapsed); let remaining_budget = self.connect_budget.saturating_sub(elapsed);
let attempt_timeout = Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS) let attempt_timeout =
.min(remaining_budget); Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS).min(remaining_budget);
if attempt_timeout.is_zero() { if attempt_timeout.is_zero() {
last_error = Some(ProxyError::ConnectionTimeout { last_error = Some(ProxyError::ConnectionTimeout {
addr: target.to_string(), addr: target.to_string(),
@@ -786,9 +921,12 @@ impl UpstreamManager {
target: SocketAddr, target: SocketAddr,
bind_rr: Option<Arc<AtomicUsize>>, bind_rr: Option<Arc<AtomicUsize>>,
connect_timeout: Duration, connect_timeout: Duration,
) -> Result<(TcpStream, UpstreamEgressInfo)> { ) -> Result<(UpstreamStream, UpstreamEgressInfo)> {
match &config.upstream_type { match &config.upstream_type {
UpstreamType::Direct { interface, bind_addresses } => { UpstreamType::Direct {
interface,
bind_addresses,
} => {
let bind_ip = Self::resolve_bind_address( let bind_ip = Self::resolve_bind_address(
interface, interface,
bind_addresses, bind_addresses,
@@ -796,9 +934,7 @@ impl UpstreamManager {
bind_rr.as_deref(), bind_rr.as_deref(),
true, true,
); );
if bind_ip.is_none() if bind_ip.is_none() && bind_addresses.as_ref().is_some_and(|v| !v.is_empty()) {
&& bind_addresses.as_ref().is_some_and(|v| !v.is_empty())
{
return Err(ProxyError::Config(format!( return Err(ProxyError::Config(format!(
"No valid bind_addresses for target family {target}" "No valid bind_addresses for target family {target}"
))); )));
@@ -813,8 +949,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?; socket.set_nonblocking(true)?;
match socket.connect(&target.into()) { match socket.connect(&target.into()) {
Ok(()) => {}, Ok(()) => {}
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {}, Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)), Err(err) => return Err(ProxyError::Io(err)),
} }
@@ -836,7 +974,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok(); let local_addr = stream.local_addr().ok();
Ok(( Ok((
stream, UpstreamStream::Tcp(stream),
UpstreamEgressInfo { UpstreamEgressInfo {
upstream_id, upstream_id,
route_kind: UpstreamRouteKind::Direct, route_kind: UpstreamRouteKind::Direct,
@@ -846,8 +984,12 @@ impl UpstreamManager {
socks_proxy_addr: None, socks_proxy_addr: None,
}, },
)) ))
}, }
UpstreamType::Socks4 { address, interface, user_id } => { UpstreamType::Socks4 {
address,
interface,
user_id,
} => {
// Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port // Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port
let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() { let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() {
// IP:port format - use socket with optional interface binding // IP:port format - use socket with optional interface binding
@@ -863,8 +1005,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?; socket.set_nonblocking(true)?;
match socket.connect(&proxy_addr.into()) { match socket.connect(&proxy_addr.into()) {
Ok(()) => {}, Ok(()) => {}
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {}, Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)), Err(err) => return Err(ProxyError::Io(err)),
} }
@@ -888,14 +1032,16 @@ impl UpstreamManager {
// Hostname:port format - use tokio DNS resolution // Hostname:port format - use tokio DNS resolution
// Note: interface binding is not supported for hostnames // Note: interface binding is not supported for hostnames
if interface.is_some() { if interface.is_some() {
warn!("SOCKS4 interface binding is not supported for hostname addresses, ignoring"); warn!(
"SOCKS4 interface binding is not supported for hostname addresses, ignoring"
);
} }
Self::connect_hostname_with_dns_override(address, connect_timeout).await? Self::connect_hostname_with_dns_override(address, connect_timeout).await?
}; };
// replace socks user_id with config.selected_scope, if set // replace socks user_id with config.selected_scope, if set
let scope: Option<&str> = Some(config.selected_scope.as_str()) let scope: Option<&str> =
.filter(|s| !s.is_empty()); Some(config.selected_scope.as_str()).filter(|s| !s.is_empty());
let _user_id: Option<&str> = scope.or(user_id.as_deref()); let _user_id: Option<&str> = scope.or(user_id.as_deref());
let bound = match tokio::time::timeout( let bound = match tokio::time::timeout(
@@ -915,7 +1061,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok(); let local_addr = stream.local_addr().ok();
let socks_proxy_addr = stream.peer_addr().ok(); let socks_proxy_addr = stream.peer_addr().ok();
Ok(( Ok((
stream, UpstreamStream::Tcp(stream),
UpstreamEgressInfo { UpstreamEgressInfo {
upstream_id, upstream_id,
route_kind: UpstreamRouteKind::Socks4, route_kind: UpstreamRouteKind::Socks4,
@@ -925,8 +1071,13 @@ impl UpstreamManager {
socks_proxy_addr, socks_proxy_addr,
}, },
)) ))
}, }
UpstreamType::Socks5 { address, interface, username, password } => { UpstreamType::Socks5 {
address,
interface,
username,
password,
} => {
// Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port // Try to parse as SocketAddr first (IP:port), otherwise treat as hostname:port
let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() { let mut stream = if let Ok(proxy_addr) = address.parse::<SocketAddr>() {
// IP:port format - use socket with optional interface binding // IP:port format - use socket with optional interface binding
@@ -942,8 +1093,10 @@ impl UpstreamManager {
socket.set_nonblocking(true)?; socket.set_nonblocking(true)?;
match socket.connect(&proxy_addr.into()) { match socket.connect(&proxy_addr.into()) {
Ok(()) => {}, Ok(()) => {}
Err(err) if err.raw_os_error() == Some(libc::EINPROGRESS) || err.kind() == std::io::ErrorKind::WouldBlock => {}, Err(err)
if err.raw_os_error() == Some(libc::EINPROGRESS)
|| err.kind() == std::io::ErrorKind::WouldBlock => {}
Err(err) => return Err(ProxyError::Io(err)), Err(err) => return Err(ProxyError::Io(err)),
} }
@@ -967,15 +1120,17 @@ impl UpstreamManager {
// Hostname:port format - use tokio DNS resolution // Hostname:port format - use tokio DNS resolution
// Note: interface binding is not supported for hostnames // Note: interface binding is not supported for hostnames
if interface.is_some() { if interface.is_some() {
warn!("SOCKS5 interface binding is not supported for hostname addresses, ignoring"); warn!(
"SOCKS5 interface binding is not supported for hostname addresses, ignoring"
);
} }
Self::connect_hostname_with_dns_override(address, connect_timeout).await? Self::connect_hostname_with_dns_override(address, connect_timeout).await?
}; };
debug!(config = ?config, "Socks5 connection"); debug!(config = ?config, "Socks5 connection");
// replace socks user:pass with config.selected_scope, if set // replace socks user:pass with config.selected_scope, if set
let scope: Option<&str> = Some(config.selected_scope.as_str()) let scope: Option<&str> =
.filter(|s| !s.is_empty()); Some(config.selected_scope.as_str()).filter(|s| !s.is_empty());
let _username: Option<&str> = scope.or(username.as_deref()); let _username: Option<&str> = scope.or(username.as_deref());
let _password: Option<&str> = scope.or(password.as_deref()); let _password: Option<&str> = scope.or(password.as_deref());
@@ -996,7 +1151,7 @@ impl UpstreamManager {
let local_addr = stream.local_addr().ok(); let local_addr = stream.local_addr().ok();
let socks_proxy_addr = stream.peer_addr().ok(); let socks_proxy_addr = stream.peer_addr().ok();
Ok(( Ok((
stream, UpstreamStream::Tcp(stream),
UpstreamEgressInfo { UpstreamEgressInfo {
upstream_id, upstream_id,
route_kind: UpstreamRouteKind::Socks5, route_kind: UpstreamRouteKind::Socks5,
@@ -1006,7 +1161,22 @@ impl UpstreamManager {
socks_proxy_addr, socks_proxy_addr,
}, },
)) ))
}, }
UpstreamType::Shadowsocks { url, interface } => {
let stream = connect_shadowsocks(url, interface, target, connect_timeout).await?;
let local_addr = stream.get_ref().local_addr().ok();
Ok((
UpstreamStream::Shadowsocks(Box::new(stream)),
UpstreamEgressInfo {
upstream_id,
route_kind: UpstreamRouteKind::Shadowsocks,
local_addr,
direct_bind_ip: None,
socks_bound_addr: None,
socks_proxy_addr: None,
},
))
}
} }
} }
@@ -1023,7 +1193,9 @@ impl UpstreamManager {
) -> Vec<StartupPingResult> { ) -> Vec<StartupPingResult> {
let upstreams: Vec<(usize, UpstreamConfig, Arc<AtomicUsize>)> = { let upstreams: Vec<(usize, UpstreamConfig, Arc<AtomicUsize>)> = {
let guard = self.upstreams.read().await; let guard = self.upstreams.read().await;
guard.iter().enumerate() guard
.iter()
.enumerate()
.map(|(i, u)| (i, u.config.clone(), u.bind_rr.clone())) .map(|(i, u)| (i, u.config.clone(), u.bind_rr.clone()))
.collect() .collect()
}; };
@@ -1051,6 +1223,11 @@ impl UpstreamManager {
} }
UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address), UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address),
UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address), UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address),
UpstreamType::Shadowsocks { url, .. } => {
let address =
sanitize_shadowsocks_url(url).unwrap_or_else(|_| "invalid".to_string());
format!("shadowsocks://{address}")
}
}; };
let mut v6_results = Vec::with_capacity(NUM_DCS); let mut v6_results = Vec::with_capacity(NUM_DCS);
@@ -1061,8 +1238,14 @@ impl UpstreamManager {
let result = tokio::time::timeout( let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS), Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v6) self.ping_single_dc(
).await; *upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr_v6,
),
)
.await;
let ping_result = match result { let ping_result = match result {
Ok(Ok(rtt_ms)) => { Ok(Ok(rtt_ms)) => {
@@ -1112,8 +1295,14 @@ impl UpstreamManager {
let result = tokio::time::timeout( let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS), Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr_v4) self.ping_single_dc(
).await; *upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr_v4,
),
)
.await;
let ping_result = match result { let ping_result = match result {
Ok(Ok(rtt_ms)) => { Ok(Ok(rtt_ms)) => {
@@ -1162,7 +1351,7 @@ impl UpstreamManager {
Err(_) => { Err(_) => {
warn!(dc = %dc_key, "Invalid dc_overrides key, skipping"); warn!(dc = %dc_key, "Invalid dc_overrides key, skipping");
continue; continue;
}, }
_ => continue, _ => continue,
}; };
let dc_idx = dc_num as usize; let dc_idx = dc_num as usize;
@@ -1175,8 +1364,14 @@ impl UpstreamManager {
} }
let result = tokio::time::timeout( let result = tokio::time::timeout(
Duration::from_secs(DC_PING_TIMEOUT_SECS), Duration::from_secs(DC_PING_TIMEOUT_SECS),
self.ping_single_dc(*upstream_idx, upstream_config, Some(bind_rr.clone()), addr) self.ping_single_dc(
).await; *upstream_idx,
upstream_config,
Some(bind_rr.clone()),
addr,
),
)
.await;
let ping_result = match result { let ping_result = match result {
Ok(Ok(rtt_ms)) => DcPingResult { Ok(Ok(rtt_ms)) => DcPingResult {
@@ -1205,7 +1400,9 @@ impl UpstreamManager {
v4_results.push(ping_result); v4_results.push(ping_result);
} }
} }
Err(_) => warn!(dc = %dc_idx, addr = %addr_str, "Invalid dc_overrides address, skipping"), Err(_) => {
warn!(dc = %dc_idx, addr = %addr_str, "Invalid dc_overrides address, skipping")
}
} }
} }
} }
@@ -1381,12 +1578,8 @@ impl UpstreamManager {
ipv6_enabled: bool, ipv6_enabled: bool,
dc_overrides: HashMap<String, Vec<String>>, dc_overrides: HashMap<String, Vec<String>>,
) { ) {
let groups = Self::build_health_check_groups( let groups =
prefer_ipv6, Self::build_health_check_groups(prefer_ipv6, ipv4_enabled, ipv6_enabled, &dc_overrides);
ipv4_enabled,
ipv6_enabled,
&dc_overrides,
);
let required_healthy_groups = Self::required_healthy_group_count(groups.len()); let required_healthy_groups = Self::required_healthy_group_count(groups.len());
let mut endpoint_rotation: HashMap<(usize, i16, bool), usize> = HashMap::new(); let mut endpoint_rotation: HashMap<(usize, i16, bool), usize> = HashMap::new();
@@ -1416,13 +1609,16 @@ impl UpstreamManager {
let mut group_ok = false; let mut group_ok = false;
let mut group_rtt_ms = None; let mut group_rtt_ms = None;
for (is_primary, endpoints) in [(true, &group.primary), (false, &group.fallback)] { for (is_primary, endpoints) in
[(true, &group.primary), (false, &group.fallback)]
{
if endpoints.is_empty() { if endpoints.is_empty() {
continue; continue;
} }
let rotation_key = (i, group.dc_idx, is_primary); let rotation_key = (i, group.dc_idx, is_primary);
let start_idx = *endpoint_rotation.entry(rotation_key).or_insert(0) % endpoints.len(); let start_idx =
*endpoint_rotation.entry(rotation_key).or_insert(0) % endpoints.len();
let mut next_idx = (start_idx + 1) % endpoints.len(); let mut next_idx = (start_idx + 1) % endpoints.len();
for step in 0..endpoints.len() { for step in 0..endpoints.len() {
@@ -1544,8 +1740,7 @@ impl UpstreamManager {
return None; return None;
} }
UpstreamState::dc_array_idx(dc_idx) UpstreamState::dc_array_idx(dc_idx).map(|idx| guard[0].dc_ip_pref[idx])
.map(|idx| guard[0].dc_ip_pref[idx])
} }
/// Get preferred DC address based on config preference /// Get preferred DC address based on config preference
@@ -1566,6 +1761,12 @@ impl UpstreamManager {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use std::sync::Arc;
use crate::stats::Stats;
const TEST_SHADOWSOCKS_URL: &str =
"ss://2022-blake3-aes-256-gcm:MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTIzNDU2Nzg5MDE=@127.0.0.1:8388";
#[test] #[test]
fn required_healthy_group_count_applies_three_group_threshold() { fn required_healthy_group_count_applies_three_group_threshold() {
@@ -1596,15 +1797,18 @@ mod tests {
assert!(dc2.primary.iter().all(|addr| addr.is_ipv6())); assert!(dc2.primary.iter().all(|addr| addr.is_ipv6()));
assert!(dc2.fallback.iter().all(|addr| addr.is_ipv4())); assert!(dc2.fallback.iter().all(|addr| addr.is_ipv4()));
assert!(dc2 assert!(
.primary dc2.primary
.contains(&"[2001:db8::10]:443".parse::<SocketAddr>().unwrap())); .contains(&"[2001:db8::10]:443".parse::<SocketAddr>().unwrap())
assert!(dc2 );
.fallback assert!(
.contains(&"203.0.113.10:443".parse::<SocketAddr>().unwrap())); dc2.fallback
assert!(dc2 .contains(&"203.0.113.10:443".parse::<SocketAddr>().unwrap())
.fallback );
.contains(&"203.0.113.11:443".parse::<SocketAddr>().unwrap())); assert!(
dc2.fallback
.contains(&"203.0.113.11:443".parse::<SocketAddr>().unwrap())
);
} }
#[test] #[test]
@@ -1626,12 +1830,14 @@ mod tests {
.expect("override-only dc group must be present"); .expect("override-only dc group must be present");
assert_eq!(dc9.primary.len(), 2); assert_eq!(dc9.primary.len(), 2);
assert!(dc9 assert!(
.primary dc9.primary
.contains(&"198.51.100.1:443".parse::<SocketAddr>().unwrap())); .contains(&"198.51.100.1:443".parse::<SocketAddr>().unwrap())
assert!(dc9 );
.primary assert!(
.contains(&"198.51.100.2:443".parse::<SocketAddr>().unwrap())); dc9.primary
.contains(&"198.51.100.2:443".parse::<SocketAddr>().unwrap())
);
assert!(dc9.fallback.is_empty()); assert!(dc9.fallback.is_empty());
} }
@@ -1678,4 +1884,36 @@ mod tests {
assert_eq!(bind, None); assert_eq!(bind, None);
} }
#[test]
fn api_snapshot_reports_shadowsocks_as_sanitized_route() {
let manager = UpstreamManager::new(
vec![UpstreamConfig {
upstream_type: UpstreamType::Shadowsocks {
url: TEST_SHADOWSOCKS_URL.to_string(),
interface: None,
},
weight: 2,
enabled: true,
scopes: String::new(),
selected_scope: String::new(),
}],
1,
100,
1000,
1,
false,
Arc::new(Stats::new()),
);
let snapshot = manager.try_api_snapshot().expect("snapshot");
assert_eq!(snapshot.summary.configured_total, 1);
assert_eq!(snapshot.summary.shadowsocks_total, 1);
assert_eq!(snapshot.upstreams.len(), 1);
assert_eq!(
snapshot.upstreams[0].route_kind,
UpstreamRouteKind::Shadowsocks
);
assert_eq!(snapshot.upstreams[0].address, "127.0.0.1:8388");
}
} }