mirror of
https://github.com/telemt/telemt.git
synced 2026-04-16 18:14:10 +03:00
Compare commits
66 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5be81952f3 | ||
|
|
7ce2e33bae | ||
|
|
9e2f0af5be | ||
|
|
4d72cb1680 | ||
|
|
79eebeb9ef | ||
|
|
1045289539 | ||
|
|
3d0b32edf5 | ||
|
|
41601a40fc | ||
|
|
a2cc503e81 | ||
|
|
5ee4556cea | ||
|
|
487aa8fbce | ||
|
|
32a9405002 | ||
|
|
708bedc95e | ||
|
|
ce64bf1cee | ||
|
|
f4b79f2f79 | ||
|
|
9a907a2470 | ||
|
|
e6839adc17 | ||
|
|
5e98b35fb7 | ||
|
|
af35ad3923 | ||
|
|
8f47fa6dd8 | ||
|
|
453fb477db | ||
|
|
42ae148e78 | ||
|
|
a7e840c19b | ||
|
|
1593fc4e53 | ||
|
|
fc8010a861 | ||
|
|
7293b8eb32 | ||
|
|
6934faaf93 | ||
|
|
66fdc3a34d | ||
|
|
0c4d9301ec | ||
|
|
f7a7fb94d4 | ||
|
|
85fff5e30a | ||
|
|
fc28c1ad88 | ||
|
|
bb87a37686 | ||
|
|
bf2da8f5d8 | ||
|
|
2926b9f5c8 | ||
|
|
820ed8d346 | ||
|
|
e340b716b2 | ||
|
|
9edbbb692e | ||
|
|
356d64371a | ||
|
|
4be4670668 | ||
|
|
0768fee06a | ||
|
|
35ae455e2b | ||
|
|
433e6c9a20 | ||
|
|
34f5289fc3 | ||
|
|
97804d47ff | ||
|
|
b68e9d642e | ||
|
|
f31d9d42fe | ||
|
|
d941873cce | ||
|
|
b11a767741 | ||
|
|
301f829c3c | ||
|
|
76a02610d8 | ||
|
|
76bf5337e8 | ||
|
|
e76b388a05 | ||
|
|
f37e6cbe29 | ||
|
|
e54dce5366 | ||
|
|
c7464d53e1 | ||
|
|
03a6493147 | ||
|
|
36ef2f722d | ||
|
|
b9fda9e2c2 | ||
|
|
c5b590062c | ||
|
|
c0357b2890 | ||
|
|
4f7f7d6880 | ||
|
|
efba10f839 | ||
|
|
6ba12f35d0 | ||
|
|
6a57c23700 | ||
|
|
94b85afbc5 |
52
.github/workflows/release.yml
vendored
52
.github/workflows/release.yml
vendored
@@ -25,10 +25,16 @@ jobs:
|
|||||||
include:
|
include:
|
||||||
- target: x86_64-unknown-linux-gnu
|
- target: x86_64-unknown-linux-gnu
|
||||||
artifact_name: telemt
|
artifact_name: telemt
|
||||||
asset_name: telemt-x86_64-linux
|
asset_name: telemt-x86_64-linux-gnu
|
||||||
- target: aarch64-unknown-linux-gnu
|
- target: aarch64-unknown-linux-gnu
|
||||||
artifact_name: telemt
|
artifact_name: telemt
|
||||||
asset_name: telemt-aarch64-linux
|
asset_name: telemt-aarch64-linux-gnu
|
||||||
|
- target: x86_64-unknown-linux-musl
|
||||||
|
artifact_name: telemt
|
||||||
|
asset_name: telemt-x86_64-linux-musl
|
||||||
|
- target: aarch64-unknown-linux-musl
|
||||||
|
artifact_name: telemt
|
||||||
|
asset_name: telemt-aarch64-linux-musl
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout repository
|
- name: Checkout repository
|
||||||
@@ -56,12 +62,13 @@ jobs:
|
|||||||
restore-keys: |
|
restore-keys: |
|
||||||
${{ runner.os }}-${{ matrix.target }}-cargo-
|
${{ runner.os }}-${{ matrix.target }}-cargo-
|
||||||
|
|
||||||
|
- name: Install cross
|
||||||
|
run: cargo install cross --git https://github.com/cross-rs/cross
|
||||||
|
|
||||||
- name: Build Release
|
- name: Build Release
|
||||||
uses: actions-rs/cargo@ae10961054e4aa8bff448f48a500763b90d5c550 # v1.0.1
|
env:
|
||||||
with:
|
RUSTFLAGS: ${{ contains(matrix.target, 'musl') && '-C target-feature=+crt-static' || '' }}
|
||||||
use-cross: true
|
run: cross build --release --target ${{ matrix.target }}
|
||||||
command: build
|
|
||||||
args: --release --target ${{ matrix.target }}
|
|
||||||
|
|
||||||
- name: Package binary
|
- name: Package binary
|
||||||
run: |
|
run: |
|
||||||
@@ -85,13 +92,42 @@ jobs:
|
|||||||
contents: write
|
contents: write
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||||
|
with:
|
||||||
|
fetch-depth: 0
|
||||||
|
token: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Download all artifacts
|
- name: Download all artifacts
|
||||||
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
|
uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
|
||||||
with:
|
with:
|
||||||
path: artifacts
|
path: artifacts
|
||||||
|
|
||||||
|
- name: Update version in Cargo.toml and Cargo.lock
|
||||||
|
run: |
|
||||||
|
# Extract version from tag (remove 'v' prefix if present)
|
||||||
|
VERSION="${GITHUB_REF#refs/tags/}"
|
||||||
|
VERSION="${VERSION#v}"
|
||||||
|
|
||||||
|
# Install cargo-edit for version bumping
|
||||||
|
cargo install cargo-edit
|
||||||
|
|
||||||
|
# Update Cargo.toml version
|
||||||
|
cargo set-version "$VERSION"
|
||||||
|
|
||||||
|
# Configure git
|
||||||
|
git config user.name "github-actions[bot]"
|
||||||
|
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||||
|
|
||||||
|
# Commit and push changes
|
||||||
|
#git add Cargo.toml Cargo.lock
|
||||||
|
#git commit -m "chore: bump version to $VERSION" || echo "No changes to commit"
|
||||||
|
#git push origin HEAD:main
|
||||||
|
env:
|
||||||
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Create Release
|
- name: Create Release
|
||||||
uses: softprops/action-gh-release@c95fe1489396fe360a41fb53f90de6ddce8c4c8a # v2.2.1
|
uses: softprops/action-gh-release@v2
|
||||||
with:
|
with:
|
||||||
files: artifacts/**/*
|
files: artifacts/**/*
|
||||||
generate_release_notes: true
|
generate_release_notes: true
|
||||||
|
|||||||
13
CONTRIBUTING.md
Normal file
13
CONTRIBUTING.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
## Pull Requests - Rules
|
||||||
|
- ONLY signed and verified commits
|
||||||
|
- ONLY from your name
|
||||||
|
- DO NOT commit with `codex` or `claude` as author/commiter
|
||||||
|
- PREFER `flow` branch for development, not `main`
|
||||||
|
|
||||||
|
### AI
|
||||||
|
We are not against modern tools, like AI, where you act as a principal or architect, but we consider it important:
|
||||||
|
|
||||||
|
- you really understand what you're doing
|
||||||
|
- you understand the relationships and dependencies of the components being modified
|
||||||
|
- you understand the architecture of Telegram MTProto, MTProxy, Middle-End KDF at least generically
|
||||||
|
- you DO NOT commit for the sake of commits, but to help the community, core-developers and ordinary users
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "telemt"
|
name = "telemt"
|
||||||
version = "3.0.3"
|
version = "3.0.5"
|
||||||
edition = "2024"
|
edition = "2024"
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
@@ -24,6 +24,7 @@ zeroize = { version = "1.8", features = ["derive"] }
|
|||||||
|
|
||||||
# Network
|
# Network
|
||||||
socket2 = { version = "0.5", features = ["all"] }
|
socket2 = { version = "0.5", features = ["all"] }
|
||||||
|
nix = { version = "0.28", default-features = false, features = ["net"] }
|
||||||
|
|
||||||
# Serialization
|
# Serialization
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
@@ -47,6 +48,7 @@ regex = "1.11"
|
|||||||
crossbeam-queue = "0.3"
|
crossbeam-queue = "0.3"
|
||||||
num-bigint = "0.4"
|
num-bigint = "0.4"
|
||||||
num-traits = "0.2"
|
num-traits = "0.2"
|
||||||
|
anyhow = "1.0"
|
||||||
|
|
||||||
# HTTP
|
# HTTP
|
||||||
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
|
||||||
@@ -54,6 +56,9 @@ hyper = { version = "1", features = ["server", "http1"] }
|
|||||||
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
|
||||||
http-body-util = "0.1"
|
http-body-util = "0.1"
|
||||||
httpdate = "1.0"
|
httpdate = "1.0"
|
||||||
|
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
|
||||||
|
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
|
||||||
|
webpki-roots = "0.26"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio-test = "0.4"
|
tokio-test = "0.4"
|
||||||
|
|||||||
17
LICENSING.md
Normal file
17
LICENSING.md
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
# LICENSING
|
||||||
|
## Licenses for Versions
|
||||||
|
| Version | License |
|
||||||
|
|---------|---------------|
|
||||||
|
| 1.0 | NO LICNESE |
|
||||||
|
| 1.1 | NO LICENSE |
|
||||||
|
| 1.2 | NO LICENSE |
|
||||||
|
| 2.0 | NO LICENSE |
|
||||||
|
| 3.0 | TELEMT UL 1 |
|
||||||
|
|
||||||
|
### License Types
|
||||||
|
- **NO LICENSE** = ***ALL RIGHT RESERVED***
|
||||||
|
- **TELEMT UL1** - work in progress license for source code of `telemt`, which encourages:
|
||||||
|
- fair use,
|
||||||
|
- contributions,
|
||||||
|
- distribution,
|
||||||
|
- but prohibits NOT mentioning the authors
|
||||||
180
README.md
180
README.md
@@ -10,74 +10,40 @@
|
|||||||
|
|
||||||
### 🇷🇺 RU
|
### 🇷🇺 RU
|
||||||
|
|
||||||
15 февраля мы опубликовали `telemt 3` с поддержкой Middle-End Proxy, а значит:
|
18 февраля мы опубликовали `telemt 3.0.3`, он имеет:
|
||||||
|
|
||||||
- с функциональными медиа, в том числе с CDN/DC=203
|
- улучшенный механизм Middle-End Health Check
|
||||||
- с Ad-tag — показывайте спонсорский канал и собирайте статистику через официального бота
|
- высокоскоростное восстановление инициализации Middle-End
|
||||||
- с новым подходом к безопасности и асинхронности
|
- меньше задержек на hot-path
|
||||||
- с высокоточной диагностикой криптографии через `ME_DIAG`
|
- более корректную работу в Dualstack, а именно - IPv6 Middle-End
|
||||||
|
- аккуратное переподключение клиента без дрифта сессий между Middle-End
|
||||||
|
- автоматическая деградация на Direct-DC при массовой (>2 ME-DC-групп) недоступности Middle-End
|
||||||
|
- автодетект IP за NAT, при возможности - будет выполнен хендшейк с ME, при неудаче - автодеградация
|
||||||
|
- единственный известный специальный DC=203 уже добавлен в код: медиа загружаются с CDN в Direct-DC режиме
|
||||||
|
|
||||||
Для использования нужно:
|
[Здесь вы можете найти релиз](https://github.com/telemt/telemt/releases/tag/3.0.3)
|
||||||
|
|
||||||
1. Версия `telemt` ≥3.0.0
|
Если у вас есть компетенции в асинхронных сетевых приложениях, анализе трафика, реверс-инжиниринге или сетевых расследованиях - мы открыты к идеям и pull requests!
|
||||||
2. Выполнение любого из наборов условий:
|
|
||||||
- публичный IP для исходящих соединений установлен на интерфейса инстанса с `telemt`
|
|
||||||
- ЛИБО
|
|
||||||
- вы используете NAT 1:1 + включили STUN-пробинг
|
|
||||||
3. В конфиге, в секции `[general]` указать:
|
|
||||||
```toml
|
|
||||||
use_middle_proxy = true
|
|
||||||
```
|
|
||||||
|
|
||||||
Если условия из пункта 1 не выполняются:
|
|
||||||
1. Выключите ME-режим:
|
|
||||||
- установите `use_middle_proxy = false`
|
|
||||||
- ЛИБО
|
|
||||||
- Middle-End Proxy будет выключен автоматически по таймауту, но это займёт больше времени при запуске
|
|
||||||
2. В конфиге, добавьте в конец:
|
|
||||||
```toml
|
|
||||||
[dc_overrides]
|
|
||||||
"203" = "91.105.192.100:443"
|
|
||||||
```
|
|
||||||
|
|
||||||
Если у вас есть компетенции в асинхронных сетевых приложениях, анализе трафика, реверс-инжиниринге или сетевых расследованиях — мы открыты к идеям и pull requests.
|
|
||||||
|
|
||||||
</td>
|
</td>
|
||||||
<td width="50%" valign="top">
|
<td width="50%" valign="top">
|
||||||
|
|
||||||
### 🇬🇧 EN
|
### 🇬🇧 EN
|
||||||
|
|
||||||
On February 15, we released `telemt 3` with support for Middle-End Proxy, which means:
|
On February 18, we released `telemt 3.0.3`. This version introduces:
|
||||||
|
|
||||||
- functional media, including CDN/DC=203
|
- improved Middle-End Health Check method
|
||||||
- Ad-tag support – promote a sponsored channel and collect statistics via Telegram bot
|
- high-speed recovery of Middle-End init
|
||||||
- new approach to security and asynchronicity
|
- reduced latency on the hot path
|
||||||
- high-precision cryptography diagnostics via `ME_DIAG`
|
- correct Dualstack support: proper handling of IPv6 Middle-End
|
||||||
|
- *clean* client reconnection without session "drift" between Middle-End
|
||||||
|
- automatic degradation to Direct-DC mode in case of large-scale (>2 ME-DC groups) Middle-End unavailability
|
||||||
|
- automatic public IP detection behind NAT; first - Middle-End handshake is performed, otherwise automatic degradation is applied
|
||||||
|
- known special DC=203 is now handled natively: media is delivered from the CDN via Direct-DC mode
|
||||||
|
|
||||||
To use this feature, the following requirements must be met:
|
[Release is available here](https://github.com/telemt/telemt/releases/tag/3.0.3)
|
||||||
1. `telemt` version ≥ 3.0.0
|
|
||||||
2. One of the following conditions satisfied:
|
|
||||||
- the instance running `telemt` has a public IP address assigned to its network interface for outbound connections
|
|
||||||
- OR
|
|
||||||
- you are using 1:1 NAT and have STUN probing enabled
|
|
||||||
3. In the config file, under the `[general]` section, specify:
|
|
||||||
```toml
|
|
||||||
use_middle_proxy = true
|
|
||||||
````
|
|
||||||
|
|
||||||
If the conditions from step 1 are not satisfied:
|
If you have expertise in asynchronous network applications, traffic analysis, reverse engineering, or network forensics - we welcome ideas and pull requests!
|
||||||
1. Disable Middle-End mode:
|
|
||||||
- set `use_middle_proxy = false`
|
|
||||||
- OR
|
|
||||||
- Middle-End Proxy will be disabled automatically after a timeout, but this will increase startup time
|
|
||||||
|
|
||||||
2. In the config file, add the following at the end:
|
|
||||||
```toml
|
|
||||||
[dc_overrides]
|
|
||||||
"203" = "91.105.192.100:443"
|
|
||||||
```
|
|
||||||
|
|
||||||
If you have expertise in asynchronous network applications, traffic analysis, reverse engineering, or network forensics — we welcome ideas, suggestions, and pull requests.
|
|
||||||
|
|
||||||
</td>
|
</td>
|
||||||
</tr>
|
</tr>
|
||||||
@@ -86,7 +52,9 @@ If you have expertise in asynchronous network applications, traffic analysis, re
|
|||||||
# Features
|
# Features
|
||||||
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
💥 The configuration structure has changed since version 1.1.0.0. change it in your environment!
|
||||||
|
|
||||||
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
⚓ Our implementation of **TLS-fronting** is one of the most deeply debugged, focused, advanced and *almost* **"behaviorally consistent to real"**: we are confident we have it right - [see evidence on our validation and traces](#recognizability-for-dpi-and-crawler)
|
||||||
|
|
||||||
|
⚓ Our ***Middle-End Pool*** is fastest by design in standard scenarios, compared to other implementations of connecting to the Middle-End Proxy: non dramatically, but usual
|
||||||
|
|
||||||
# GOTO
|
# GOTO
|
||||||
- [Features](#features)
|
- [Features](#features)
|
||||||
@@ -127,7 +95,7 @@ If you have expertise in asynchronous network applications, traffic analysis, re
|
|||||||
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
**This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux**
|
||||||
1. Download release
|
1. Download release
|
||||||
```bash
|
```bash
|
||||||
wget https://github.com/telemt/telemt/releases/latest/download/telemt
|
wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz
|
||||||
```
|
```
|
||||||
2. Move to Bin Folder
|
2. Move to Bin Folder
|
||||||
```bash
|
```bash
|
||||||
@@ -210,53 +178,97 @@ then Ctrl+X -> Y -> Enter to save
|
|||||||
```toml
|
```toml
|
||||||
# === General Settings ===
|
# === General Settings ===
|
||||||
[general]
|
[general]
|
||||||
# prefer_ipv6 is deprecated; use [network].prefer
|
|
||||||
prefer_ipv6 = false
|
|
||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = false
|
use_middle_proxy = true
|
||||||
# ad_tag = "..."
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
|
# Path to proxy-secret binary (auto-downloaded if missing).
|
||||||
|
proxy_secret_path = "proxy-secret"
|
||||||
|
# disable_colors = false # Disable colored output in logs (useful for files/systemd)
|
||||||
|
|
||||||
[network]
|
# === Log Level ===
|
||||||
ipv4 = true
|
# Log level: debug | verbose | normal | silent
|
||||||
ipv6 = true # set false to disable, omit for auto
|
# Can be overridden with --silent or --log-level CLI flags
|
||||||
prefer = 4 # 4 or 6
|
# RUST_LOG env var takes absolute priority over all of these
|
||||||
multipath = false
|
log_level = "normal"
|
||||||
|
|
||||||
|
# === Middle Proxy - ME ===
|
||||||
|
# Public IP override for ME KDF when behind NAT; leave unset to auto-detect.
|
||||||
|
# middle_proxy_nat_ip = "203.0.113.10"
|
||||||
|
# Enable STUN probing to discover public IP:port for ME.
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
# Primary STUN server (host:port); defaults to Telegram STUN when empty.
|
||||||
|
middle_proxy_nat_stun = "stun.l.google.com:19302"
|
||||||
|
# Optional fallback STUN servers list.
|
||||||
|
middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"]
|
||||||
|
# Desired number of concurrent ME writers in pool.
|
||||||
|
middle_proxy_pool_size = 16
|
||||||
|
# Pre-initialized warm-standby ME connections kept idle.
|
||||||
|
middle_proxy_warm_standby = 8
|
||||||
|
# Ignore STUN/interface mismatch and keep ME enabled even if IP differs.
|
||||||
|
stun_iface_mismatch_ignore = false
|
||||||
|
# Keepalive padding frames - fl==4
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
me_keepalive_interval_secs = 25 # Period between keepalives
|
||||||
|
me_keepalive_jitter_secs = 5 # Jitter added to interval
|
||||||
|
me_keepalive_payload_random = true # Randomize 4-byte payload (vs zeros)
|
||||||
|
# Stagger extra ME connections on warmup to de-phase lifecycles.
|
||||||
|
me_warmup_stagger_enabled = true
|
||||||
|
me_warmup_step_delay_ms = 500 # Base delay between extra connects
|
||||||
|
me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
|
||||||
|
# Reconnect policy knobs.
|
||||||
|
me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
|
||||||
|
me_reconnect_backoff_base_ms = 500 # Backoff start
|
||||||
|
me_reconnect_backoff_cap_ms = 30000 # Backoff cap
|
||||||
|
me_reconnect_fast_retry_count = 11 # Quick retries before backoff
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
secure = false
|
secure = false
|
||||||
tls = true
|
tls = true
|
||||||
|
|
||||||
|
[general.links]
|
||||||
|
show = "*"
|
||||||
|
# show = ["alice", "bob"] # Only show links for alice and bob
|
||||||
|
# show = "*" # Show links for all users
|
||||||
|
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||||
|
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||||
|
|
||||||
|
# === Network Parameters ===
|
||||||
|
[network]
|
||||||
|
# Enable/disable families: true/false/auto(None)
|
||||||
|
ipv4 = true
|
||||||
|
ipv6 = false # UNSTABLE WITH ME
|
||||||
|
# prefer = 4 or 6
|
||||||
|
prefer = 4
|
||||||
|
multipath = false # EXPERIMENTAL!
|
||||||
|
|
||||||
# === Server Binding ===
|
# === Server Binding ===
|
||||||
[server]
|
[server]
|
||||||
port = 443
|
port = 443
|
||||||
listen_addr_ipv4 = "0.0.0.0"
|
listen_addr_ipv4 = "0.0.0.0"
|
||||||
listen_addr_ipv6 = "::"
|
listen_addr_ipv6 = "::"
|
||||||
|
# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
|
||||||
|
# listen_unix_sock_perm = "0666" # Socket file permissions
|
||||||
# metrics_port = 9090
|
# metrics_port = 9090
|
||||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
# metrics_whitelist = ["127.0.0.1", "::1"]
|
||||||
|
|
||||||
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
# Listen on multiple interfaces/IPs - IPv4
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "0.0.0.0"
|
ip = "0.0.0.0"
|
||||||
# announce = "my.hostname.tld" # Optional: hostname for tg:// links
|
|
||||||
# OR
|
|
||||||
# announce = "1.2.3.4" # Optional: Public IP for tg:// links
|
|
||||||
|
|
||||||
|
# Listen on multiple interfaces/IPs - IPv6
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "::"
|
ip = "::"
|
||||||
|
|
||||||
# Users to show in the startup log (tg:// links)
|
|
||||||
[general.links]
|
|
||||||
show = ["hello"] # Users to show in the startup log (tg:// links)
|
|
||||||
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
|
||||||
# public_port = 443 # Port for tg:// links (default: server.port)
|
|
||||||
|
|
||||||
# === Timeouts (in seconds) ===
|
# === Timeouts (in seconds) ===
|
||||||
[timeouts]
|
[timeouts]
|
||||||
client_handshake = 15
|
client_handshake = 30
|
||||||
tg_connect = 10
|
tg_connect = 10
|
||||||
client_keepalive = 60
|
client_keepalive = 60
|
||||||
client_ack = 300
|
client_ack = 300
|
||||||
|
# Quick ME reconnects for single-address DCs (count and per-attempt timeout, ms).
|
||||||
|
me_one_retry = 12
|
||||||
|
me_one_timeout_ms = 1200
|
||||||
|
|
||||||
# === Anti-Censorship & Masking ===
|
# === Anti-Censorship & Masking ===
|
||||||
[censorship]
|
[censorship]
|
||||||
@@ -268,9 +280,9 @@ mask_port = 443
|
|||||||
fake_cert_len = 2048
|
fake_cert_len = 2048
|
||||||
|
|
||||||
# === Access Control & Users ===
|
# === Access Control & Users ===
|
||||||
# username "hello" is used for example
|
|
||||||
[access]
|
[access]
|
||||||
replay_check_len = 65536
|
replay_check_len = 65536
|
||||||
|
replay_window_secs = 1800
|
||||||
ignore_time_skew = false
|
ignore_time_skew = false
|
||||||
|
|
||||||
[access.users]
|
[access.users]
|
||||||
@@ -280,28 +292,28 @@ hello = "00000000000000000000000000000000"
|
|||||||
# [access.user_max_tcp_conns]
|
# [access.user_max_tcp_conns]
|
||||||
# hello = 50
|
# hello = 50
|
||||||
|
|
||||||
|
# [access.user_max_unique_ips]
|
||||||
|
# hello = 5
|
||||||
|
|
||||||
# [access.user_data_quota]
|
# [access.user_data_quota]
|
||||||
# hello = 1073741824 # 1 GB
|
# hello = 1073741824 # 1 GB
|
||||||
|
|
||||||
# === Upstreams & Routing ===
|
# === Upstreams & Routing ===
|
||||||
# By default, direct connection is used, but you can add SOCKS proxy
|
|
||||||
|
|
||||||
# Direct - Default
|
|
||||||
[[upstreams]]
|
[[upstreams]]
|
||||||
type = "direct"
|
type = "direct"
|
||||||
enabled = true
|
enabled = true
|
||||||
weight = 10
|
weight = 10
|
||||||
|
|
||||||
# SOCKS5
|
|
||||||
# [[upstreams]]
|
# [[upstreams]]
|
||||||
# type = "socks5"
|
# type = "socks5"
|
||||||
# address = "127.0.0.1:9050"
|
# address = "127.0.0.1:1080"
|
||||||
# enabled = false
|
# enabled = false
|
||||||
# weight = 1
|
# weight = 1
|
||||||
|
|
||||||
# === DC Address Overrides ===
|
# === DC Address Overrides ===
|
||||||
# [dc_overrides]
|
# [dc_overrides]
|
||||||
# "203" = "91.105.192.100:443"
|
# "203" = "91.105.192.100:443"
|
||||||
|
|
||||||
```
|
```
|
||||||
### Advanced
|
### Advanced
|
||||||
#### Adtag
|
#### Adtag
|
||||||
|
|||||||
83
config.toml
83
config.toml
@@ -1,29 +1,69 @@
|
|||||||
# === General Settings ===
|
# === General Settings ===
|
||||||
[general]
|
[general]
|
||||||
# prefer_ipv6 is deprecated; use [network].prefer instead
|
|
||||||
prefer_ipv6 = false
|
|
||||||
fast_mode = true
|
fast_mode = true
|
||||||
use_middle_proxy = true
|
use_middle_proxy = true
|
||||||
#ad_tag = "00000000000000000000000000000000"
|
# ad_tag = "00000000000000000000000000000000"
|
||||||
|
# Path to proxy-secret binary (auto-downloaded if missing).
|
||||||
[network]
|
proxy_secret_path = "proxy-secret"
|
||||||
# Enable/disable families; ipv6 = true/false/auto(None)
|
# disable_colors = false # Disable colored output in logs (useful for files/systemd)
|
||||||
ipv4 = true
|
|
||||||
ipv6 = true
|
|
||||||
# prefer = 4 or 6
|
|
||||||
prefer = 4
|
|
||||||
multipath = false
|
|
||||||
|
|
||||||
|
# === Log Level ===
|
||||||
# Log level: debug | verbose | normal | silent
|
# Log level: debug | verbose | normal | silent
|
||||||
# Can be overridden with --silent or --log-level CLI flags
|
# Can be overridden with --silent or --log-level CLI flags
|
||||||
# RUST_LOG env var takes absolute priority over all of these
|
# RUST_LOG env var takes absolute priority over all of these
|
||||||
log_level = "normal"
|
log_level = "normal"
|
||||||
|
|
||||||
|
# === Middle Proxy - ME ===
|
||||||
|
# Public IP override for ME KDF when behind NAT; leave unset to auto-detect.
|
||||||
|
# middle_proxy_nat_ip = "203.0.113.10"
|
||||||
|
# Enable STUN probing to discover public IP:port for ME.
|
||||||
|
middle_proxy_nat_probe = true
|
||||||
|
# Primary STUN server (host:port); defaults to Telegram STUN when empty.
|
||||||
|
middle_proxy_nat_stun = "stun.l.google.com:19302"
|
||||||
|
# Optional fallback STUN servers list.
|
||||||
|
middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"]
|
||||||
|
# Desired number of concurrent ME writers in pool.
|
||||||
|
middle_proxy_pool_size = 16
|
||||||
|
# Pre-initialized warm-standby ME connections kept idle.
|
||||||
|
middle_proxy_warm_standby = 8
|
||||||
|
# Ignore STUN/interface mismatch and keep ME enabled even if IP differs.
|
||||||
|
stun_iface_mismatch_ignore = false
|
||||||
|
# Keepalive padding frames - fl==4
|
||||||
|
me_keepalive_enabled = true
|
||||||
|
me_keepalive_interval_secs = 25 # Period between keepalives
|
||||||
|
me_keepalive_jitter_secs = 5 # Jitter added to interval
|
||||||
|
me_keepalive_payload_random = true # Randomize 4-byte payload (vs zeros)
|
||||||
|
# Stagger extra ME connections on warmup to de-phase lifecycles.
|
||||||
|
me_warmup_stagger_enabled = true
|
||||||
|
me_warmup_step_delay_ms = 500 # Base delay between extra connects
|
||||||
|
me_warmup_step_jitter_ms = 300 # Jitter for warmup delay
|
||||||
|
# Reconnect policy knobs.
|
||||||
|
me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE!
|
||||||
|
me_reconnect_backoff_base_ms = 500 # Backoff start
|
||||||
|
me_reconnect_backoff_cap_ms = 30000 # Backoff cap
|
||||||
|
me_reconnect_fast_retry_count = 11 # Quick retries before backoff
|
||||||
|
|
||||||
[general.modes]
|
[general.modes]
|
||||||
classic = false
|
classic = false
|
||||||
secure = false
|
secure = false
|
||||||
tls = true
|
tls = true
|
||||||
|
|
||||||
|
[general.links]
|
||||||
|
show = "*"
|
||||||
|
# show = ["alice", "bob"] # Only show links for alice and bob
|
||||||
|
# show = "*" # Show links for all users
|
||||||
|
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
||||||
|
# public_port = 443 # Port for tg:// links (default: server.port)
|
||||||
|
|
||||||
|
# === Network Parameters ===
|
||||||
|
[network]
|
||||||
|
# Enable/disable families: true/false/auto(None)
|
||||||
|
ipv4 = true
|
||||||
|
ipv6 = false # UNSTABLE WITH ME
|
||||||
|
# prefer = 4 or 6
|
||||||
|
prefer = 4
|
||||||
|
multipath = false # EXPERIMENTAL!
|
||||||
|
|
||||||
# === Server Binding ===
|
# === Server Binding ===
|
||||||
[server]
|
[server]
|
||||||
port = 443
|
port = 443
|
||||||
@@ -31,38 +71,39 @@ listen_addr_ipv4 = "0.0.0.0"
|
|||||||
listen_addr_ipv6 = "::"
|
listen_addr_ipv6 = "::"
|
||||||
# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
|
# listen_unix_sock = "/var/run/telemt.sock" # Unix socket
|
||||||
# listen_unix_sock_perm = "0666" # Socket file permissions
|
# listen_unix_sock_perm = "0666" # Socket file permissions
|
||||||
|
# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol
|
||||||
# metrics_port = 9090
|
# metrics_port = 9090
|
||||||
# metrics_whitelist = ["127.0.0.1", "::1"]
|
# metrics_whitelist = ["127.0.0.1", "::1"]
|
||||||
|
|
||||||
# Listen on multiple interfaces/IPs (overrides listen_addr_*)
|
# Listen on multiple interfaces/IPs - IPv4
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "0.0.0.0"
|
ip = "0.0.0.0"
|
||||||
# announce_ip = "1.2.3.4" # Optional: Public IP for tg:// links
|
|
||||||
|
|
||||||
|
# Listen on multiple interfaces/IPs - IPv6
|
||||||
[[server.listeners]]
|
[[server.listeners]]
|
||||||
ip = "::"
|
ip = "::"
|
||||||
|
|
||||||
# Users to show in the startup log (tg:// links)
|
|
||||||
[general.links]
|
|
||||||
show = ["hello"] # Users to show in the startup log (tg:// links)
|
|
||||||
# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links
|
|
||||||
# public_port = 443 # Port for tg:// links (default: server.port)
|
|
||||||
|
|
||||||
# === Timeouts (in seconds) ===
|
# === Timeouts (in seconds) ===
|
||||||
[timeouts]
|
[timeouts]
|
||||||
client_handshake = 15
|
client_handshake = 30
|
||||||
tg_connect = 10
|
tg_connect = 10
|
||||||
client_keepalive = 60
|
client_keepalive = 60
|
||||||
client_ack = 300
|
client_ack = 300
|
||||||
|
# Quick ME reconnects for single-address DCs (count and per-attempt timeout, ms).
|
||||||
|
me_one_retry = 12
|
||||||
|
me_one_timeout_ms = 1200
|
||||||
|
|
||||||
# === Anti-Censorship & Masking ===
|
# === Anti-Censorship & Masking ===
|
||||||
[censorship]
|
[censorship]
|
||||||
tls_domain = "petrovich.ru"
|
tls_domain = "petrovich.ru"
|
||||||
|
# tls_domains = ["example.com", "cdn.example.net"] # Additional domains for EE links
|
||||||
mask = true
|
mask = true
|
||||||
mask_port = 443
|
mask_port = 443
|
||||||
# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
|
# mask_host = "petrovich.ru" # Defaults to tls_domain if not set
|
||||||
# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
|
# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host)
|
||||||
fake_cert_len = 2048
|
fake_cert_len = 2048
|
||||||
|
# tls_emulation = false # Fetch real cert lengths and emulate TLS records
|
||||||
|
# tls_front_dir = "tlsfront" # Cache directory for TLS emulation
|
||||||
|
|
||||||
# === Access Control & Users ===
|
# === Access Control & Users ===
|
||||||
[access]
|
[access]
|
||||||
@@ -88,6 +129,8 @@ hello = "00000000000000000000000000000000"
|
|||||||
type = "direct"
|
type = "direct"
|
||||||
enabled = true
|
enabled = true
|
||||||
weight = 10
|
weight = 10
|
||||||
|
# interface = "192.168.1.100" # Bind outgoing to specific IP or iface name
|
||||||
|
# bind_addresses = ["192.168.1.100"] # List for round-robin binding (family must match target)
|
||||||
|
|
||||||
# [[upstreams]]
|
# [[upstreams]]
|
||||||
# type = "socks5"
|
# type = "socks5"
|
||||||
|
|||||||
@@ -23,6 +23,10 @@ pub(crate) fn default_fake_cert_len() -> usize {
|
|||||||
2048
|
2048
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_tls_front_dir() -> String {
|
||||||
|
"tlsfront".to_string()
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn default_replay_check_len() -> usize {
|
pub(crate) fn default_replay_check_len() -> usize {
|
||||||
65_536
|
65_536
|
||||||
}
|
}
|
||||||
@@ -74,6 +78,34 @@ pub(crate) fn default_unknown_dc_log_path() -> Option<String> {
|
|||||||
Some("unknown-dc.txt".to_string())
|
Some("unknown-dc.txt".to_string())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_pool_size() -> usize {
|
||||||
|
2
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_keepalive_interval() -> u64 {
|
||||||
|
25
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_keepalive_jitter() -> u64 {
|
||||||
|
5
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_warmup_step_delay_ms() -> u64 {
|
||||||
|
500
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_warmup_step_jitter_ms() -> u64 {
|
||||||
|
300
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_reconnect_backoff_base_ms() -> u64 {
|
||||||
|
500
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn default_reconnect_backoff_cap_ms() -> u64 {
|
||||||
|
30_000
|
||||||
|
}
|
||||||
|
|
||||||
// Custom deserializer helpers
|
// Custom deserializer helpers
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
|
|||||||
@@ -11,6 +11,32 @@ use crate::error::{ProxyError, Result};
|
|||||||
use super::defaults::*;
|
use super::defaults::*;
|
||||||
use super::types::*;
|
use super::types::*;
|
||||||
|
|
||||||
|
fn preprocess_includes(content: &str, base_dir: &Path, depth: u8) -> Result<String> {
|
||||||
|
if depth > 10 {
|
||||||
|
return Err(ProxyError::Config("Include depth > 10".into()));
|
||||||
|
}
|
||||||
|
let mut output = String::with_capacity(content.len());
|
||||||
|
for line in content.lines() {
|
||||||
|
let trimmed = line.trim();
|
||||||
|
if let Some(rest) = trimmed.strip_prefix("include") {
|
||||||
|
let rest = rest.trim();
|
||||||
|
if let Some(rest) = rest.strip_prefix('=') {
|
||||||
|
let path_str = rest.trim().trim_matches('"');
|
||||||
|
let resolved = base_dir.join(path_str);
|
||||||
|
let included = std::fs::read_to_string(&resolved)
|
||||||
|
.map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
let included_dir = resolved.parent().unwrap_or(base_dir);
|
||||||
|
output.push_str(&preprocess_includes(&included, included_dir, depth + 1)?);
|
||||||
|
output.push('\n');
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
output.push_str(line);
|
||||||
|
output.push('\n');
|
||||||
|
}
|
||||||
|
Ok(output)
|
||||||
|
}
|
||||||
|
|
||||||
fn validate_network_cfg(net: &mut NetworkConfig) -> Result<()> {
|
fn validate_network_cfg(net: &mut NetworkConfig) -> Result<()> {
|
||||||
if !net.ipv4 && matches!(net.ipv6, Some(false)) {
|
if !net.ipv4 && matches!(net.ipv6, Some(false)) {
|
||||||
return Err(ProxyError::Config(
|
return Err(ProxyError::Config(
|
||||||
@@ -84,10 +110,12 @@ pub struct ProxyConfig {
|
|||||||
impl ProxyConfig {
|
impl ProxyConfig {
|
||||||
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
pub fn load<P: AsRef<Path>>(path: P) -> Result<Self> {
|
||||||
let content =
|
let content =
|
||||||
std::fs::read_to_string(path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
std::fs::read_to_string(&path).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
let base_dir = path.as_ref().parent().unwrap_or(Path::new("."));
|
||||||
|
let processed = preprocess_includes(&content, base_dir, 0)?;
|
||||||
|
|
||||||
let mut config: ProxyConfig =
|
let mut config: ProxyConfig =
|
||||||
toml::from_str(&content).map_err(|e| ProxyError::Config(e.to_string()))?;
|
toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?;
|
||||||
|
|
||||||
// Validate secrets.
|
// Validate secrets.
|
||||||
for (user, secret) in &config.access.users {
|
for (user, secret) in &config.access.users {
|
||||||
@@ -135,6 +163,21 @@ impl ProxyConfig {
|
|||||||
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
config.censorship.mask_host = Some(config.censorship.tls_domain.clone());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Merge primary + extra TLS domains, deduplicate (primary always first).
|
||||||
|
if !config.censorship.tls_domains.is_empty() {
|
||||||
|
let mut all = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||||
|
all.push(config.censorship.tls_domain.clone());
|
||||||
|
for d in std::mem::take(&mut config.censorship.tls_domains) {
|
||||||
|
if !d.is_empty() && !all.contains(&d) {
|
||||||
|
all.push(d);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// keep primary as tls_domain; store remaining back to tls_domains
|
||||||
|
if all.len() > 1 {
|
||||||
|
config.censorship.tls_domains = all[1..].to_vec();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Migration: prefer_ipv6 -> network.prefer.
|
// Migration: prefer_ipv6 -> network.prefer.
|
||||||
if config.general.prefer_ipv6 {
|
if config.general.prefer_ipv6 {
|
||||||
if config.network.prefer == 4 {
|
if config.network.prefer == 4 {
|
||||||
@@ -151,8 +194,10 @@ impl ProxyConfig {
|
|||||||
|
|
||||||
validate_network_cfg(&mut config.network)?;
|
validate_network_cfg(&mut config.network)?;
|
||||||
|
|
||||||
// Random fake_cert_len.
|
// Random fake_cert_len only when default is in use.
|
||||||
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
if !config.censorship.tls_emulation && config.censorship.fake_cert_len == default_fake_cert_len() {
|
||||||
|
config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
|
||||||
|
}
|
||||||
|
|
||||||
// Resolve listen_tcp: explicit value wins, otherwise auto-detect.
|
// Resolve listen_tcp: explicit value wins, otherwise auto-detect.
|
||||||
// If unix socket is set → TCP only when listen_addr_ipv4 or listeners are explicitly provided.
|
// If unix socket is set → TCP only when listen_addr_ipv4 or listeners are explicitly provided.
|
||||||
@@ -205,9 +250,11 @@ impl ProxyConfig {
|
|||||||
// Migration: Populate upstreams if empty (Default Direct).
|
// Migration: Populate upstreams if empty (Default Direct).
|
||||||
if config.upstreams.is_empty() {
|
if config.upstreams.is_empty() {
|
||||||
config.upstreams.push(UpstreamConfig {
|
config.upstreams.push(UpstreamConfig {
|
||||||
upstream_type: UpstreamType::Direct { interface: None },
|
upstream_type: UpstreamType::Direct { interface: None, bind_addresses: None },
|
||||||
weight: 1,
|
weight: 1,
|
||||||
enabled: true,
|
enabled: true,
|
||||||
|
scopes: String::new(),
|
||||||
|
selected_scope: String::new(),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -143,6 +143,62 @@ pub struct GeneralConfig {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub middle_proxy_nat_stun: Option<String>,
|
pub middle_proxy_nat_stun: Option<String>,
|
||||||
|
|
||||||
|
/// Optional list of STUN servers for NAT probing fallback.
|
||||||
|
#[serde(default)]
|
||||||
|
pub middle_proxy_nat_stun_servers: Vec<String>,
|
||||||
|
|
||||||
|
/// Desired size of active Middle-Proxy writer pool.
|
||||||
|
#[serde(default = "default_pool_size")]
|
||||||
|
pub middle_proxy_pool_size: usize,
|
||||||
|
|
||||||
|
/// Number of warm standby ME connections kept pre-initialized.
|
||||||
|
#[serde(default)]
|
||||||
|
pub middle_proxy_warm_standby: usize,
|
||||||
|
|
||||||
|
/// Enable ME keepalive padding frames.
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub me_keepalive_enabled: bool,
|
||||||
|
|
||||||
|
/// Keepalive interval in seconds.
|
||||||
|
#[serde(default = "default_keepalive_interval")]
|
||||||
|
pub me_keepalive_interval_secs: u64,
|
||||||
|
|
||||||
|
/// Keepalive jitter in seconds.
|
||||||
|
#[serde(default = "default_keepalive_jitter")]
|
||||||
|
pub me_keepalive_jitter_secs: u64,
|
||||||
|
|
||||||
|
/// Keepalive payload randomized (4 bytes); otherwise zeros.
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub me_keepalive_payload_random: bool,
|
||||||
|
|
||||||
|
/// Enable staggered warmup of extra ME writers.
|
||||||
|
#[serde(default = "default_true")]
|
||||||
|
pub me_warmup_stagger_enabled: bool,
|
||||||
|
|
||||||
|
/// Base delay between warmup connections in ms.
|
||||||
|
#[serde(default = "default_warmup_step_delay_ms")]
|
||||||
|
pub me_warmup_step_delay_ms: u64,
|
||||||
|
|
||||||
|
/// Jitter for warmup delay in ms.
|
||||||
|
#[serde(default = "default_warmup_step_jitter_ms")]
|
||||||
|
pub me_warmup_step_jitter_ms: u64,
|
||||||
|
|
||||||
|
/// Max concurrent reconnect attempts per DC.
|
||||||
|
#[serde(default)]
|
||||||
|
pub me_reconnect_max_concurrent_per_dc: u32,
|
||||||
|
|
||||||
|
/// Base backoff in ms for reconnect.
|
||||||
|
#[serde(default = "default_reconnect_backoff_base_ms")]
|
||||||
|
pub me_reconnect_backoff_base_ms: u64,
|
||||||
|
|
||||||
|
/// Cap backoff in ms for reconnect.
|
||||||
|
#[serde(default = "default_reconnect_backoff_cap_ms")]
|
||||||
|
pub me_reconnect_backoff_cap_ms: u64,
|
||||||
|
|
||||||
|
/// Fast retry attempts before backoff.
|
||||||
|
#[serde(default)]
|
||||||
|
pub me_reconnect_fast_retry_count: u32,
|
||||||
|
|
||||||
/// Ignore STUN/interface IP mismatch (keep using Middle Proxy even if NAT detected).
|
/// Ignore STUN/interface IP mismatch (keep using Middle Proxy even if NAT detected).
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub stun_iface_mismatch_ignore: bool,
|
pub stun_iface_mismatch_ignore: bool,
|
||||||
@@ -175,6 +231,20 @@ impl Default for GeneralConfig {
|
|||||||
middle_proxy_nat_ip: None,
|
middle_proxy_nat_ip: None,
|
||||||
middle_proxy_nat_probe: false,
|
middle_proxy_nat_probe: false,
|
||||||
middle_proxy_nat_stun: None,
|
middle_proxy_nat_stun: None,
|
||||||
|
middle_proxy_nat_stun_servers: Vec::new(),
|
||||||
|
middle_proxy_pool_size: default_pool_size(),
|
||||||
|
middle_proxy_warm_standby: 0,
|
||||||
|
me_keepalive_enabled: true,
|
||||||
|
me_keepalive_interval_secs: default_keepalive_interval(),
|
||||||
|
me_keepalive_jitter_secs: default_keepalive_jitter(),
|
||||||
|
me_keepalive_payload_random: true,
|
||||||
|
me_warmup_stagger_enabled: true,
|
||||||
|
me_warmup_step_delay_ms: default_warmup_step_delay_ms(),
|
||||||
|
me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(),
|
||||||
|
me_reconnect_max_concurrent_per_dc: 1,
|
||||||
|
me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(),
|
||||||
|
me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(),
|
||||||
|
me_reconnect_fast_retry_count: 1,
|
||||||
stun_iface_mismatch_ignore: false,
|
stun_iface_mismatch_ignore: false,
|
||||||
unknown_dc_log_path: default_unknown_dc_log_path(),
|
unknown_dc_log_path: default_unknown_dc_log_path(),
|
||||||
log_level: LogLevel::Normal,
|
log_level: LogLevel::Normal,
|
||||||
@@ -225,6 +295,11 @@ pub struct ServerConfig {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub listen_tcp: Option<bool>,
|
pub listen_tcp: Option<bool>,
|
||||||
|
|
||||||
|
/// Accept HAProxy PROXY protocol headers on incoming connections.
|
||||||
|
/// When enabled, real client IPs are extracted from PROXY v1/v2 headers.
|
||||||
|
#[serde(default)]
|
||||||
|
pub proxy_protocol: bool,
|
||||||
|
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub metrics_port: Option<u16>,
|
pub metrics_port: Option<u16>,
|
||||||
|
|
||||||
@@ -244,6 +319,7 @@ impl Default for ServerConfig {
|
|||||||
listen_unix_sock: None,
|
listen_unix_sock: None,
|
||||||
listen_unix_sock_perm: None,
|
listen_unix_sock_perm: None,
|
||||||
listen_tcp: None,
|
listen_tcp: None,
|
||||||
|
proxy_protocol: false,
|
||||||
metrics_port: None,
|
metrics_port: None,
|
||||||
metrics_whitelist: default_metrics_whitelist(),
|
metrics_whitelist: default_metrics_whitelist(),
|
||||||
listeners: Vec::new(),
|
listeners: Vec::new(),
|
||||||
@@ -292,6 +368,10 @@ pub struct AntiCensorshipConfig {
|
|||||||
#[serde(default = "default_tls_domain")]
|
#[serde(default = "default_tls_domain")]
|
||||||
pub tls_domain: String,
|
pub tls_domain: String,
|
||||||
|
|
||||||
|
/// Additional TLS domains for generating multiple proxy links.
|
||||||
|
#[serde(default)]
|
||||||
|
pub tls_domains: Vec<String>,
|
||||||
|
|
||||||
#[serde(default = "default_true")]
|
#[serde(default = "default_true")]
|
||||||
pub mask: bool,
|
pub mask: bool,
|
||||||
|
|
||||||
@@ -306,17 +386,28 @@ pub struct AntiCensorshipConfig {
|
|||||||
|
|
||||||
#[serde(default = "default_fake_cert_len")]
|
#[serde(default = "default_fake_cert_len")]
|
||||||
pub fake_cert_len: usize,
|
pub fake_cert_len: usize,
|
||||||
|
|
||||||
|
/// Enable TLS certificate emulation using cached real certificates.
|
||||||
|
#[serde(default)]
|
||||||
|
pub tls_emulation: bool,
|
||||||
|
|
||||||
|
/// Directory to store TLS front cache (on disk).
|
||||||
|
#[serde(default = "default_tls_front_dir")]
|
||||||
|
pub tls_front_dir: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Default for AntiCensorshipConfig {
|
impl Default for AntiCensorshipConfig {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
tls_domain: default_tls_domain(),
|
tls_domain: default_tls_domain(),
|
||||||
|
tls_domains: Vec::new(),
|
||||||
mask: true,
|
mask: true,
|
||||||
mask_host: None,
|
mask_host: None,
|
||||||
mask_port: default_mask_port(),
|
mask_port: default_mask_port(),
|
||||||
mask_unix_sock: None,
|
mask_unix_sock: None,
|
||||||
fake_cert_len: default_fake_cert_len(),
|
fake_cert_len: default_fake_cert_len(),
|
||||||
|
tls_emulation: false,
|
||||||
|
tls_front_dir: default_tls_front_dir(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -376,6 +467,8 @@ pub enum UpstreamType {
|
|||||||
Direct {
|
Direct {
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
interface: Option<String>,
|
interface: Option<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
bind_addresses: Option<Vec<String>>,
|
||||||
},
|
},
|
||||||
Socks4 {
|
Socks4 {
|
||||||
address: String,
|
address: String,
|
||||||
@@ -403,6 +496,10 @@ pub struct UpstreamConfig {
|
|||||||
pub weight: u16,
|
pub weight: u16,
|
||||||
#[serde(default = "default_true")]
|
#[serde(default = "default_true")]
|
||||||
pub enabled: bool,
|
pub enabled: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub scopes: String,
|
||||||
|
#[serde(skip)]
|
||||||
|
pub selected_scope: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
|
|||||||
@@ -11,6 +11,9 @@ pub struct SecureRandom {
|
|||||||
inner: Mutex<SecureRandomInner>,
|
inner: Mutex<SecureRandomInner>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
unsafe impl Send for SecureRandom {}
|
||||||
|
unsafe impl Sync for SecureRandom {}
|
||||||
|
|
||||||
struct SecureRandomInner {
|
struct SecureRandomInner {
|
||||||
rng: StdRng,
|
rng: StdRng,
|
||||||
cipher: AesCtr,
|
cipher: AesCtr,
|
||||||
@@ -211,4 +214,4 @@ mod tests {
|
|||||||
|
|
||||||
assert_ne!(shuffled, original);
|
assert_ne!(shuffled, original);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
107
src/main.rs
107
src/main.rs
@@ -23,6 +23,7 @@ mod proxy;
|
|||||||
mod stats;
|
mod stats;
|
||||||
mod stream;
|
mod stream;
|
||||||
mod transport;
|
mod transport;
|
||||||
|
mod tls_front;
|
||||||
mod util;
|
mod util;
|
||||||
|
|
||||||
use crate::config::{LogLevel, ProxyConfig};
|
use crate::config::{LogLevel, ProxyConfig};
|
||||||
@@ -36,6 +37,7 @@ use crate::transport::middle_proxy::{
|
|||||||
MePool, fetch_proxy_config, run_me_ping, MePingFamily, MePingSample, format_sample_line,
|
MePool, fetch_proxy_config, run_me_ping, MePingFamily, MePingSample, format_sample_line,
|
||||||
};
|
};
|
||||||
use crate::transport::{ListenOptions, UpstreamManager, create_listener};
|
use crate::transport::{ListenOptions, UpstreamManager, create_listener};
|
||||||
|
use crate::tls_front::TlsFrontCache;
|
||||||
|
|
||||||
fn parse_cli() -> (String, bool, Option<String>) {
|
fn parse_cli() -> (String, bool, Option<String>) {
|
||||||
let mut config_path = "config.toml".to_string();
|
let mut config_path = "config.toml".to_string();
|
||||||
@@ -92,6 +94,10 @@ fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
eprintln!(" --no-start Don't start the service after install");
|
eprintln!(" --no-start Don't start the service after install");
|
||||||
std::process::exit(0);
|
std::process::exit(0);
|
||||||
}
|
}
|
||||||
|
"--version" | "-V" => {
|
||||||
|
println!("telemt {}", env!("CARGO_PKG_VERSION"));
|
||||||
|
std::process::exit(0);
|
||||||
|
}
|
||||||
s if !s.starts_with('-') => {
|
s if !s.starts_with('-') => {
|
||||||
config_path = s.to_string();
|
config_path = s.to_string();
|
||||||
}
|
}
|
||||||
@@ -106,34 +112,47 @@ fn parse_cli() -> (String, bool, Option<String>) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
|
||||||
info!("--- Proxy Links ({}) ---", host);
|
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
|
||||||
for user_name in config.general.links.show.resolve_users(&config.access.users) {
|
for user_name in config.general.links.show.resolve_users(&config.access.users) {
|
||||||
if let Some(secret) = config.access.users.get(user_name) {
|
if let Some(secret) = config.access.users.get(user_name) {
|
||||||
info!("User: {}", user_name);
|
info!(target: "telemt::links", "User: {}", user_name);
|
||||||
if config.general.modes.classic {
|
if config.general.modes.classic {
|
||||||
info!(
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
" Classic: tg://proxy?server={}&port={}&secret={}",
|
" Classic: tg://proxy?server={}&port={}&secret={}",
|
||||||
host, port, secret
|
host, port, secret
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if config.general.modes.secure {
|
if config.general.modes.secure {
|
||||||
info!(
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
" DD: tg://proxy?server={}&port={}&secret=dd{}",
|
||||||
host, port, secret
|
host, port, secret
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
if config.general.modes.tls {
|
if config.general.modes.tls {
|
||||||
let domain_hex = hex::encode(&config.censorship.tls_domain);
|
let mut domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||||
info!(
|
domains.push(config.censorship.tls_domain.clone());
|
||||||
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
for d in &config.censorship.tls_domains {
|
||||||
host, port, secret, domain_hex
|
if !domains.contains(d) {
|
||||||
);
|
domains.push(d.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for domain in domains {
|
||||||
|
let domain_hex = hex::encode(&domain);
|
||||||
|
info!(
|
||||||
|
target: "telemt::links",
|
||||||
|
" EE-TLS: tg://proxy?server={}&port={}&secret=ee{}{}",
|
||||||
|
host, port, secret, domain_hex
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
warn!("User '{}' in show_link not found", user_name);
|
warn!(target: "telemt::links", "User '{}' in show_link not found", user_name);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
info!("------------------------");
|
info!(target: "telemt::links", "------------------------");
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
@@ -240,6 +259,46 @@ async fn main() -> std::result::Result<(), Box<dyn std::error::Error>> {
|
|||||||
info!("IP limits configured for {} users", config.access.user_max_unique_ips.len());
|
info!("IP limits configured for {} users", config.access.user_max_unique_ips.len());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TLS front cache (optional emulation)
|
||||||
|
let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len());
|
||||||
|
tls_domains.push(config.censorship.tls_domain.clone());
|
||||||
|
for d in &config.censorship.tls_domains {
|
||||||
|
if !tls_domains.contains(d) {
|
||||||
|
tls_domains.push(d.clone());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let tls_cache: Option<Arc<TlsFrontCache>> = if config.censorship.tls_emulation {
|
||||||
|
let cache = Arc::new(TlsFrontCache::new(
|
||||||
|
&tls_domains,
|
||||||
|
config.censorship.fake_cert_len,
|
||||||
|
&config.censorship.tls_front_dir,
|
||||||
|
));
|
||||||
|
|
||||||
|
let cache_clone = cache.clone();
|
||||||
|
let domains = tls_domains.clone();
|
||||||
|
let port = config.censorship.mask_port;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
for domain in domains {
|
||||||
|
match crate::tls_front::fetcher::fetch_real_tls(
|
||||||
|
&domain,
|
||||||
|
port,
|
||||||
|
&domain,
|
||||||
|
Duration::from_secs(5),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Ok(res) => cache_clone.update_from_fetch(&domain, res).await,
|
||||||
|
Err(e) => warn!(domain = %domain, error = %e, "TLS emulation fetch failed"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
Some(cache)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
// Connection concurrency limit
|
// Connection concurrency limit
|
||||||
let _max_connections = Arc::new(Semaphore::new(10_000));
|
let _max_connections = Arc::new(Semaphore::new(10_000));
|
||||||
|
|
||||||
@@ -317,6 +376,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
config.general.middle_proxy_nat_ip,
|
config.general.middle_proxy_nat_ip,
|
||||||
config.general.middle_proxy_nat_probe,
|
config.general.middle_proxy_nat_probe,
|
||||||
config.general.middle_proxy_nat_stun.clone(),
|
config.general.middle_proxy_nat_stun.clone(),
|
||||||
|
config.general.middle_proxy_nat_stun_servers.clone(),
|
||||||
probe.detected_ipv6,
|
probe.detected_ipv6,
|
||||||
config.timeouts.me_one_retry,
|
config.timeouts.me_one_retry,
|
||||||
config.timeouts.me_one_timeout_ms,
|
config.timeouts.me_one_timeout_ms,
|
||||||
@@ -325,18 +385,32 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
cfg_v4.default_dc.or(cfg_v6.default_dc),
|
||||||
decision.clone(),
|
decision.clone(),
|
||||||
rng.clone(),
|
rng.clone(),
|
||||||
|
stats.clone(),
|
||||||
|
config.general.me_keepalive_enabled,
|
||||||
|
config.general.me_keepalive_interval_secs,
|
||||||
|
config.general.me_keepalive_jitter_secs,
|
||||||
|
config.general.me_keepalive_payload_random,
|
||||||
|
config.general.me_warmup_stagger_enabled,
|
||||||
|
config.general.me_warmup_step_delay_ms,
|
||||||
|
config.general.me_warmup_step_jitter_ms,
|
||||||
|
config.general.me_reconnect_max_concurrent_per_dc,
|
||||||
|
config.general.me_reconnect_backoff_base_ms,
|
||||||
|
config.general.me_reconnect_backoff_cap_ms,
|
||||||
|
config.general.me_reconnect_fast_retry_count,
|
||||||
);
|
);
|
||||||
|
|
||||||
match pool.init(2, &rng).await {
|
let pool_size = config.general.middle_proxy_pool_size.max(1);
|
||||||
|
match pool.init(pool_size, &rng).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
info!("Middle-End pool initialized successfully");
|
info!("Middle-End pool initialized successfully");
|
||||||
|
|
||||||
// Phase 4: Start health monitor
|
// Phase 4: Start health monitor
|
||||||
let pool_clone = pool.clone();
|
let pool_clone = pool.clone();
|
||||||
let rng_clone = rng.clone();
|
let rng_clone = rng.clone();
|
||||||
|
let min_conns = pool_size;
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
crate::transport::middle_proxy::me_health_monitor(
|
crate::transport::middle_proxy::me_health_monitor(
|
||||||
pool_clone, rng_clone, 2,
|
pool_clone, rng_clone, min_conns,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
});
|
});
|
||||||
@@ -693,6 +767,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
@@ -711,13 +786,14 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
if let Err(e) = crate::proxy::client::handle_client_stream(
|
if let Err(e) = crate::proxy::client::handle_client_stream(
|
||||||
stream, fake_peer, config, stats,
|
stream, fake_peer, config, stats,
|
||||||
upstream_manager, replay_checker, buffer_pool, rng,
|
upstream_manager, replay_checker, buffer_pool, rng,
|
||||||
me_pool, ip_tracker,
|
me_pool, tls_cache, ip_tracker,
|
||||||
).await {
|
).await {
|
||||||
debug!(error = %e, "Unix socket connection error");
|
debug!(error = %e, "Unix socket connection error");
|
||||||
}
|
}
|
||||||
@@ -740,6 +816,8 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
// Switch to user-configured log level after startup
|
// Switch to user-configured log level after startup
|
||||||
let runtime_filter = if has_rust_log {
|
let runtime_filter = if has_rust_log {
|
||||||
EnvFilter::from_default_env()
|
EnvFilter::from_default_env()
|
||||||
|
} else if matches!(effective_log_level, LogLevel::Silent) {
|
||||||
|
EnvFilter::new("warn,telemt::links=info")
|
||||||
} else {
|
} else {
|
||||||
EnvFilter::new(effective_log_level.to_filter_str())
|
EnvFilter::new(effective_log_level.to_filter_str())
|
||||||
};
|
};
|
||||||
@@ -763,6 +841,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
@@ -776,6 +855,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
let buffer_pool = buffer_pool.clone();
|
let buffer_pool = buffer_pool.clone();
|
||||||
let rng = rng.clone();
|
let rng = rng.clone();
|
||||||
let me_pool = me_pool.clone();
|
let me_pool = me_pool.clone();
|
||||||
|
let tls_cache = tls_cache.clone();
|
||||||
let ip_tracker = ip_tracker.clone();
|
let ip_tracker = ip_tracker.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
@@ -789,12 +869,13 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
)
|
)
|
||||||
.run()
|
.run()
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
debug!(peer = %peer_addr, error = %e, "Connection error");
|
warn!(peer = %peer_addr, error = %e, "Connection closed with error");
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -91,6 +91,22 @@ fn render_metrics(stats: &Stats) -> String {
|
|||||||
let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter");
|
let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter");
|
||||||
let _ = writeln!(out, "telemt_handshake_timeouts_total {}", stats.get_handshake_timeouts());
|
let _ = writeln!(out, "telemt_handshake_timeouts_total {}", stats.get_handshake_timeouts());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_me_keepalive_sent_total ME keepalive frames sent");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_me_keepalive_sent_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_me_keepalive_sent_total {}", stats.get_me_keepalive_sent());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_me_keepalive_failed_total ME keepalive send failures");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_me_keepalive_failed_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_me_keepalive_failed_total {}", stats.get_me_keepalive_failed());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_me_reconnect_attempts_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_me_reconnect_attempts_total {}", stats.get_me_reconnect_attempts());
|
||||||
|
|
||||||
|
let _ = writeln!(out, "# HELP telemt_me_reconnect_success_total ME reconnect successes");
|
||||||
|
let _ = writeln!(out, "# TYPE telemt_me_reconnect_success_total counter");
|
||||||
|
let _ = writeln!(out, "telemt_me_reconnect_success_total {}", stats.get_me_reconnect_success());
|
||||||
|
|
||||||
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
|
||||||
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
|
||||||
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
|
||||||
|
|||||||
@@ -50,10 +50,17 @@ pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result<Opti
|
|||||||
|
|
||||||
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
let target_addr = resolve_stun_addr(stun_addr, family).await?;
|
||||||
if let Some(addr) = target_addr {
|
if let Some(addr) = target_addr {
|
||||||
socket
|
match socket.connect(addr).await {
|
||||||
.connect(addr)
|
Ok(()) => {}
|
||||||
.await
|
Err(e) if family == IpFamily::V6 && matches!(
|
||||||
.map_err(|e| ProxyError::Proxy(format!("STUN connect failed: {e}")))?;
|
e.kind(),
|
||||||
|
std::io::ErrorKind::NetworkUnreachable
|
||||||
|
| std::io::ErrorKind::HostUnreachable
|
||||||
|
| std::io::ErrorKind::Unsupported
|
||||||
|
| std::io::ErrorKind::NetworkDown
|
||||||
|
) => return Ok(None),
|
||||||
|
Err(e) => return Err(ProxyError::Proxy(format!("STUN connect failed: {e}"))),
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -397,6 +397,84 @@ pub fn build_server_hello(
|
|||||||
response
|
response
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Extract SNI (server_name) from a TLS ClientHello.
|
||||||
|
pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option<String> {
|
||||||
|
if handshake.len() < 43 || handshake[0] != TLS_RECORD_HANDSHAKE {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut pos = 5; // after record header
|
||||||
|
if handshake.get(pos).copied()? != 0x01 {
|
||||||
|
return None; // not ClientHello
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handshake length bytes
|
||||||
|
pos += 4; // type + len (3)
|
||||||
|
|
||||||
|
// version (2) + random (32)
|
||||||
|
pos += 2 + 32;
|
||||||
|
if pos + 1 > handshake.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let session_id_len = *handshake.get(pos)? as usize;
|
||||||
|
pos += 1 + session_id_len;
|
||||||
|
if pos + 2 > handshake.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let cipher_suites_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||||
|
pos += 2 + cipher_suites_len;
|
||||||
|
if pos + 1 > handshake.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let comp_len = *handshake.get(pos)? as usize;
|
||||||
|
pos += 1 + comp_len;
|
||||||
|
if pos + 2 > handshake.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let ext_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||||
|
pos += 2;
|
||||||
|
let ext_end = pos + ext_len;
|
||||||
|
if ext_end > handshake.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
while pos + 4 <= ext_end {
|
||||||
|
let etype = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]);
|
||||||
|
let elen = u16::from_be_bytes([handshake[pos + 2], handshake[pos + 3]]) as usize;
|
||||||
|
pos += 4;
|
||||||
|
if pos + elen > ext_end {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if etype == 0x0000 && elen >= 5 {
|
||||||
|
// server_name extension
|
||||||
|
let list_len = u16::from_be_bytes([handshake[pos], handshake[pos + 1]]) as usize;
|
||||||
|
let mut sn_pos = pos + 2;
|
||||||
|
let sn_end = std::cmp::min(sn_pos + list_len, pos + elen);
|
||||||
|
while sn_pos + 3 <= sn_end {
|
||||||
|
let name_type = handshake[sn_pos];
|
||||||
|
let name_len = u16::from_be_bytes([handshake[sn_pos + 1], handshake[sn_pos + 2]]) as usize;
|
||||||
|
sn_pos += 3;
|
||||||
|
if sn_pos + name_len > sn_end {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if name_type == 0 && name_len > 0 {
|
||||||
|
if let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len]) {
|
||||||
|
return Some(host.to_string());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
sn_pos += name_len;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pos += elen;
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Check if bytes look like a TLS ClientHello
|
/// Check if bytes look like a TLS ClientHello
|
||||||
pub fn is_tls_handshake(first_bytes: &[u8]) -> bool {
|
pub fn is_tls_handshake(first_bytes: &[u8]) -> bool {
|
||||||
if first_bytes.len() < 3 {
|
if first_bytes.len() < 3 {
|
||||||
|
|||||||
@@ -30,7 +30,8 @@ use crate::protocol::tls;
|
|||||||
use crate::stats::{ReplayChecker, Stats};
|
use crate::stats::{ReplayChecker, Stats};
|
||||||
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
use crate::stream::{BufferPool, CryptoReader, CryptoWriter};
|
||||||
use crate::transport::middle_proxy::MePool;
|
use crate::transport::middle_proxy::MePool;
|
||||||
use crate::transport::{UpstreamManager, configure_client_socket};
|
use crate::transport::{UpstreamManager, configure_client_socket, parse_proxy_protocol};
|
||||||
|
use crate::tls_front::TlsFrontCache;
|
||||||
|
|
||||||
use crate::proxy::direct_relay::handle_via_direct;
|
use crate::proxy::direct_relay::handle_via_direct;
|
||||||
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle_tls_handshake};
|
||||||
@@ -47,13 +48,35 @@ pub async fn handle_client_stream<S>(
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
) -> Result<()>
|
) -> Result<()>
|
||||||
where
|
where
|
||||||
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
S: AsyncRead + AsyncWrite + Unpin + Send + 'static,
|
||||||
{
|
{
|
||||||
stats.increment_connects_all();
|
stats.increment_connects_all();
|
||||||
debug!(peer = %peer, "New connection (generic stream)");
|
let mut real_peer = peer;
|
||||||
|
|
||||||
|
if config.server.proxy_protocol {
|
||||||
|
match parse_proxy_protocol(&mut stream, peer).await {
|
||||||
|
Ok(info) => {
|
||||||
|
debug!(
|
||||||
|
peer = %peer,
|
||||||
|
client = %info.src_addr,
|
||||||
|
version = info.version,
|
||||||
|
"PROXY protocol header parsed"
|
||||||
|
);
|
||||||
|
real_peer = info.src_addr;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
stats.increment_connects_bad();
|
||||||
|
warn!(peer = %peer, error = %e, "Invalid PROXY protocol header");
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
debug!(peer = %real_peer, "New connection (generic stream)");
|
||||||
|
|
||||||
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake);
|
||||||
let stats_for_timeout = stats.clone();
|
let stats_for_timeout = stats.clone();
|
||||||
@@ -69,13 +92,13 @@ where
|
|||||||
stream.read_exact(&mut first_bytes).await?;
|
stream.read_exact(&mut first_bytes).await?;
|
||||||
|
|
||||||
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
let is_tls = tls::is_tls_handshake(&first_bytes[..3]);
|
||||||
debug!(peer = %peer, is_tls = is_tls, "Handshake type detected");
|
debug!(peer = %real_peer, is_tls = is_tls, "Handshake type detected");
|
||||||
|
|
||||||
if is_tls {
|
if is_tls {
|
||||||
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize;
|
||||||
|
|
||||||
if tls_len < 512 {
|
if tls_len < 512 {
|
||||||
debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short");
|
debug!(peer = %real_peer, tls_len = tls_len, "TLS handshake too short");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
||||||
@@ -89,8 +112,8 @@ where
|
|||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake(
|
||||||
&handshake, read_half, write_half, peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, &rng,
|
&config, &replay_checker, &rng, tls_cache.clone(),
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
HandshakeResult::BadClient { reader, writer } => {
|
HandshakeResult::BadClient { reader, writer } => {
|
||||||
@@ -107,7 +130,7 @@ where
|
|||||||
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
.map_err(|_| ProxyError::InvalidHandshake("Short MTProto handshake".into()))?;
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
&mtproto_handshake, tls_reader, tls_writer, peer,
|
&mtproto_handshake, tls_reader, tls_writer, real_peer,
|
||||||
&config, &replay_checker, true,
|
&config, &replay_checker, true,
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
@@ -123,12 +146,12 @@ where
|
|||||||
RunningClientHandler::handle_authenticated_static(
|
RunningClientHandler::handle_authenticated_static(
|
||||||
crypto_reader, crypto_writer, success,
|
crypto_reader, crypto_writer, success,
|
||||||
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
upstream_manager, stats, config, buffer_pool, rng, me_pool,
|
||||||
local_addr, peer, ip_tracker.clone(),
|
local_addr, real_peer, ip_tracker.clone(),
|
||||||
),
|
),
|
||||||
)))
|
)))
|
||||||
} else {
|
} else {
|
||||||
if !config.general.modes.classic && !config.general.modes.secure {
|
if !config.general.modes.classic && !config.general.modes.secure {
|
||||||
debug!(peer = %peer, "Non-TLS modes disabled");
|
debug!(peer = %real_peer, "Non-TLS modes disabled");
|
||||||
stats.increment_connects_bad();
|
stats.increment_connects_bad();
|
||||||
let (reader, writer) = tokio::io::split(stream);
|
let (reader, writer) = tokio::io::split(stream);
|
||||||
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
handle_bad_client(reader, writer, &first_bytes, &config).await;
|
||||||
@@ -142,7 +165,7 @@ where
|
|||||||
let (read_half, write_half) = tokio::io::split(stream);
|
let (read_half, write_half) = tokio::io::split(stream);
|
||||||
|
|
||||||
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake(
|
||||||
&handshake, read_half, write_half, peer,
|
&handshake, read_half, write_half, real_peer,
|
||||||
&config, &replay_checker, false,
|
&config, &replay_checker, false,
|
||||||
).await {
|
).await {
|
||||||
HandshakeResult::Success(result) => result,
|
HandshakeResult::Success(result) => result,
|
||||||
@@ -166,7 +189,7 @@ where
|
|||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
local_addr,
|
local_addr,
|
||||||
peer,
|
real_peer,
|
||||||
ip_tracker.clone(),
|
ip_tracker.clone(),
|
||||||
)
|
)
|
||||||
)))
|
)))
|
||||||
@@ -203,6 +226,7 @@ pub struct RunningClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -217,6 +241,7 @@ impl ClientHandler {
|
|||||||
buffer_pool: Arc<BufferPool>,
|
buffer_pool: Arc<BufferPool>,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
me_pool: Option<Arc<MePool>>,
|
me_pool: Option<Arc<MePool>>,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
ip_tracker: Arc<UserIpTracker>,
|
ip_tracker: Arc<UserIpTracker>,
|
||||||
) -> RunningClientHandler {
|
) -> RunningClientHandler {
|
||||||
RunningClientHandler {
|
RunningClientHandler {
|
||||||
@@ -229,6 +254,7 @@ impl ClientHandler {
|
|||||||
buffer_pool,
|
buffer_pool,
|
||||||
rng,
|
rng,
|
||||||
me_pool,
|
me_pool,
|
||||||
|
tls_cache,
|
||||||
ip_tracker,
|
ip_tracker,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -275,6 +301,25 @@ impl RunningClientHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
async fn do_handshake(mut self) -> Result<HandshakeOutcome> {
|
||||||
|
if self.config.server.proxy_protocol {
|
||||||
|
match parse_proxy_protocol(&mut self.stream, self.peer).await {
|
||||||
|
Ok(info) => {
|
||||||
|
debug!(
|
||||||
|
peer = %self.peer,
|
||||||
|
client = %info.src_addr,
|
||||||
|
version = info.version,
|
||||||
|
"PROXY protocol header parsed"
|
||||||
|
);
|
||||||
|
self.peer = info.src_addr;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
self.stats.increment_connects_bad();
|
||||||
|
warn!(peer = %self.peer, error = %e, "Invalid PROXY protocol header");
|
||||||
|
return Err(e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let mut first_bytes = [0u8; 5];
|
let mut first_bytes = [0u8; 5];
|
||||||
self.stream.read_exact(&mut first_bytes).await?;
|
self.stream.read_exact(&mut first_bytes).await?;
|
||||||
|
|
||||||
@@ -327,6 +372,7 @@ impl RunningClientHandler {
|
|||||||
&config,
|
&config,
|
||||||
&replay_checker,
|
&replay_checker,
|
||||||
&self.rng,
|
&self.rng,
|
||||||
|
self.tls_cache.clone(),
|
||||||
)
|
)
|
||||||
.await
|
.await
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -45,7 +45,7 @@ where
|
|||||||
);
|
);
|
||||||
|
|
||||||
let tg_stream = upstream_manager
|
let tg_stream = upstream_manager
|
||||||
.connect(dc_addr, Some(success.dc_idx))
|
.connect(dc_addr, Some(success.dc_idx), user.strip_prefix("scope_").filter(|s| !s.is_empty()))
|
||||||
.await?;
|
.await?;
|
||||||
|
|
||||||
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
|
debug!(peer = %success.peer, dc_addr = %dc_addr, "Connected, performing TG handshake");
|
||||||
|
|||||||
@@ -1,6 +1,7 @@
|
|||||||
//! MTProto Handshake
|
//! MTProto Handshake
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
|
use std::sync::Arc;
|
||||||
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
|
||||||
use tracing::{debug, warn, trace, info};
|
use tracing::{debug, warn, trace, info};
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
@@ -12,6 +13,7 @@ use crate::stream::{FakeTlsReader, FakeTlsWriter, CryptoReader, CryptoWriter};
|
|||||||
use crate::error::{ProxyError, HandshakeResult};
|
use crate::error::{ProxyError, HandshakeResult};
|
||||||
use crate::stats::ReplayChecker;
|
use crate::stats::ReplayChecker;
|
||||||
use crate::config::ProxyConfig;
|
use crate::config::ProxyConfig;
|
||||||
|
use crate::tls_front::{TlsFrontCache, emulator};
|
||||||
|
|
||||||
/// Result of successful handshake
|
/// Result of successful handshake
|
||||||
///
|
///
|
||||||
@@ -55,6 +57,7 @@ pub async fn handle_tls_handshake<R, W>(
|
|||||||
config: &ProxyConfig,
|
config: &ProxyConfig,
|
||||||
replay_checker: &ReplayChecker,
|
replay_checker: &ReplayChecker,
|
||||||
rng: &SecureRandom,
|
rng: &SecureRandom,
|
||||||
|
tls_cache: Option<Arc<TlsFrontCache>>,
|
||||||
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
) -> HandshakeResult<(FakeTlsReader<R>, FakeTlsWriter<W>, String), R, W>
|
||||||
where
|
where
|
||||||
R: AsyncRead + Unpin,
|
R: AsyncRead + Unpin,
|
||||||
@@ -102,13 +105,37 @@ where
|
|||||||
None => return HandshakeResult::BadClient { reader, writer },
|
None => return HandshakeResult::BadClient { reader, writer },
|
||||||
};
|
};
|
||||||
|
|
||||||
let response = tls::build_server_hello(
|
let cached = if config.censorship.tls_emulation {
|
||||||
secret,
|
if let Some(cache) = tls_cache.as_ref() {
|
||||||
&validation.digest,
|
if let Some(sni) = tls::extract_sni_from_client_hello(handshake) {
|
||||||
&validation.session_id,
|
Some(cache.get(&sni).await)
|
||||||
config.censorship.fake_cert_len,
|
} else {
|
||||||
rng,
|
Some(cache.get(&config.censorship.tls_domain).await)
|
||||||
);
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
let response = if let Some(cached_entry) = cached {
|
||||||
|
emulator::build_emulated_server_hello(
|
||||||
|
secret,
|
||||||
|
&validation.digest,
|
||||||
|
&validation.session_id,
|
||||||
|
&cached_entry,
|
||||||
|
rng,
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
tls::build_server_hello(
|
||||||
|
secret,
|
||||||
|
&validation.digest,
|
||||||
|
&validation.session_id,
|
||||||
|
config.censorship.fake_cert_len,
|
||||||
|
rng,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
debug!(peer = %peer, response_len = response.len(), "Sending TLS ServerHello");
|
debug!(peer = %peer, response_len = response.len(), "Sending TLS ServerHello");
|
||||||
|
|
||||||
|
|||||||
@@ -19,6 +19,10 @@ pub struct Stats {
|
|||||||
connects_all: AtomicU64,
|
connects_all: AtomicU64,
|
||||||
connects_bad: AtomicU64,
|
connects_bad: AtomicU64,
|
||||||
handshake_timeouts: AtomicU64,
|
handshake_timeouts: AtomicU64,
|
||||||
|
me_keepalive_sent: AtomicU64,
|
||||||
|
me_keepalive_failed: AtomicU64,
|
||||||
|
me_reconnect_attempts: AtomicU64,
|
||||||
|
me_reconnect_success: AtomicU64,
|
||||||
user_stats: DashMap<String, UserStats>,
|
user_stats: DashMap<String, UserStats>,
|
||||||
start_time: parking_lot::RwLock<Option<Instant>>,
|
start_time: parking_lot::RwLock<Option<Instant>>,
|
||||||
}
|
}
|
||||||
@@ -43,8 +47,16 @@ impl Stats {
|
|||||||
pub fn increment_connects_all(&self) { self.connects_all.fetch_add(1, Ordering::Relaxed); }
|
pub fn increment_connects_all(&self) { self.connects_all.fetch_add(1, Ordering::Relaxed); }
|
||||||
pub fn increment_connects_bad(&self) { self.connects_bad.fetch_add(1, Ordering::Relaxed); }
|
pub fn increment_connects_bad(&self) { self.connects_bad.fetch_add(1, Ordering::Relaxed); }
|
||||||
pub fn increment_handshake_timeouts(&self) { self.handshake_timeouts.fetch_add(1, Ordering::Relaxed); }
|
pub fn increment_handshake_timeouts(&self) { self.handshake_timeouts.fetch_add(1, Ordering::Relaxed); }
|
||||||
|
pub fn increment_me_keepalive_sent(&self) { self.me_keepalive_sent.fetch_add(1, Ordering::Relaxed); }
|
||||||
|
pub fn increment_me_keepalive_failed(&self) { self.me_keepalive_failed.fetch_add(1, Ordering::Relaxed); }
|
||||||
|
pub fn increment_me_reconnect_attempt(&self) { self.me_reconnect_attempts.fetch_add(1, Ordering::Relaxed); }
|
||||||
|
pub fn increment_me_reconnect_success(&self) { self.me_reconnect_success.fetch_add(1, Ordering::Relaxed); }
|
||||||
pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) }
|
pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) }
|
||||||
pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) }
|
pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) }
|
||||||
|
pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) }
|
||||||
|
pub fn get_me_keepalive_failed(&self) -> u64 { self.me_keepalive_failed.load(Ordering::Relaxed) }
|
||||||
|
pub fn get_me_reconnect_attempts(&self) -> u64 { self.me_reconnect_attempts.load(Ordering::Relaxed) }
|
||||||
|
pub fn get_me_reconnect_success(&self) -> u64 { self.me_reconnect_success.load(Ordering::Relaxed) }
|
||||||
|
|
||||||
pub fn increment_user_connects(&self, user: &str) {
|
pub fn increment_user_connects(&self, user: &str) {
|
||||||
self.user_stats.entry(user.to_string()).or_default()
|
self.user_stats.entry(user.to_string()).or_default()
|
||||||
|
|||||||
103
src/tls_front/cache.rs
Normal file
103
src/tls_front/cache.rs
Normal file
@@ -0,0 +1,103 @@
|
|||||||
|
use std::collections::HashMap;
|
||||||
|
use std::path::{Path, PathBuf};
|
||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::{SystemTime, Duration};
|
||||||
|
|
||||||
|
use tokio::sync::RwLock;
|
||||||
|
use tokio::time::sleep;
|
||||||
|
use tracing::{debug, warn, info};
|
||||||
|
|
||||||
|
use crate::tls_front::types::{CachedTlsData, ParsedServerHello, TlsFetchResult};
|
||||||
|
|
||||||
|
/// Lightweight in-memory + optional on-disk cache for TLS fronting data.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct TlsFrontCache {
|
||||||
|
memory: RwLock<HashMap<String, Arc<CachedTlsData>>>,
|
||||||
|
default: Arc<CachedTlsData>,
|
||||||
|
disk_path: PathBuf,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TlsFrontCache {
|
||||||
|
pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef<Path>) -> Self {
|
||||||
|
let default_template = ParsedServerHello {
|
||||||
|
version: [0x03, 0x03],
|
||||||
|
random: [0u8; 32],
|
||||||
|
session_id: Vec::new(),
|
||||||
|
cipher_suite: [0x13, 0x01],
|
||||||
|
compression: 0,
|
||||||
|
extensions: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let default = Arc::new(CachedTlsData {
|
||||||
|
server_hello_template: default_template,
|
||||||
|
cert_info: None,
|
||||||
|
app_data_records_sizes: vec![default_len],
|
||||||
|
total_app_data_len: default_len,
|
||||||
|
fetched_at: SystemTime::now(),
|
||||||
|
domain: "default".to_string(),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut map = HashMap::new();
|
||||||
|
for d in domains {
|
||||||
|
map.insert(d.clone(), default.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
Self {
|
||||||
|
memory: RwLock::new(map),
|
||||||
|
default,
|
||||||
|
disk_path: disk_path.as_ref().to_path_buf(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get(&self, sni: &str) -> Arc<CachedTlsData> {
|
||||||
|
let guard = self.memory.read().await;
|
||||||
|
guard.get(sni).cloned().unwrap_or_else(|| self.default.clone())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn set(&self, domain: &str, data: CachedTlsData) {
|
||||||
|
let mut guard = self.memory.write().await;
|
||||||
|
guard.insert(domain.to_string(), Arc::new(data));
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn background updater that periodically refreshes cached domains using provided fetcher.
|
||||||
|
pub fn spawn_updater<F>(
|
||||||
|
self: Arc<Self>,
|
||||||
|
domains: Vec<String>,
|
||||||
|
interval: Duration,
|
||||||
|
fetcher: F,
|
||||||
|
) where
|
||||||
|
F: Fn(String) -> tokio::task::JoinHandle<()> + Send + Sync + 'static,
|
||||||
|
{
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
for domain in &domains {
|
||||||
|
fetcher(domain.clone()).await;
|
||||||
|
}
|
||||||
|
sleep(interval).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Replace cached entry from a fetch result.
|
||||||
|
pub async fn update_from_fetch(&self, domain: &str, fetched: TlsFetchResult) {
|
||||||
|
let data = CachedTlsData {
|
||||||
|
server_hello_template: fetched.server_hello_parsed,
|
||||||
|
cert_info: None,
|
||||||
|
app_data_records_sizes: fetched.app_data_records_sizes.clone(),
|
||||||
|
total_app_data_len: fetched.total_app_data_len,
|
||||||
|
fetched_at: SystemTime::now(),
|
||||||
|
domain: domain.to_string(),
|
||||||
|
};
|
||||||
|
|
||||||
|
self.set(domain, data).await;
|
||||||
|
debug!(domain = %domain, len = fetched.total_app_data_len, "TLS cache updated");
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn default_entry(&self) -> Arc<CachedTlsData> {
|
||||||
|
self.default.clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn disk_path(&self) -> &Path {
|
||||||
|
&self.disk_path
|
||||||
|
}
|
||||||
|
}
|
||||||
104
src/tls_front/emulator.rs
Normal file
104
src/tls_front/emulator.rs
Normal file
@@ -0,0 +1,104 @@
|
|||||||
|
use crate::crypto::{sha256_hmac, SecureRandom};
|
||||||
|
use crate::protocol::constants::{
|
||||||
|
TLS_RECORD_APPLICATION, TLS_RECORD_CHANGE_CIPHER, TLS_RECORD_HANDSHAKE, TLS_VERSION,
|
||||||
|
};
|
||||||
|
use crate::protocol::tls::{TLS_DIGEST_LEN, TLS_DIGEST_POS, gen_fake_x25519_key};
|
||||||
|
use crate::tls_front::types::CachedTlsData;
|
||||||
|
|
||||||
|
/// Build a ServerHello + CCS + ApplicationData sequence using cached TLS metadata.
|
||||||
|
pub fn build_emulated_server_hello(
|
||||||
|
secret: &[u8],
|
||||||
|
client_digest: &[u8; TLS_DIGEST_LEN],
|
||||||
|
session_id: &[u8],
|
||||||
|
cached: &CachedTlsData,
|
||||||
|
rng: &SecureRandom,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
// --- ServerHello ---
|
||||||
|
let mut extensions = Vec::new();
|
||||||
|
// KeyShare (x25519)
|
||||||
|
let key = gen_fake_x25519_key(rng);
|
||||||
|
extensions.extend_from_slice(&0x0033u16.to_be_bytes()); // key_share
|
||||||
|
extensions.extend_from_slice(&(2 + 2 + 32u16).to_be_bytes()); // len
|
||||||
|
extensions.extend_from_slice(&0x001du16.to_be_bytes()); // X25519
|
||||||
|
extensions.extend_from_slice(&(32u16).to_be_bytes());
|
||||||
|
extensions.extend_from_slice(&key);
|
||||||
|
// supported_versions (TLS1.3)
|
||||||
|
extensions.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||||
|
extensions.extend_from_slice(&(2u16).to_be_bytes());
|
||||||
|
extensions.extend_from_slice(&0x0304u16.to_be_bytes());
|
||||||
|
|
||||||
|
let extensions_len = extensions.len() as u16;
|
||||||
|
|
||||||
|
let body_len = 2 + // version
|
||||||
|
32 + // random
|
||||||
|
1 + session_id.len() + // session id
|
||||||
|
2 + // cipher
|
||||||
|
1 + // compression
|
||||||
|
2 + extensions.len(); // extensions
|
||||||
|
|
||||||
|
let mut message = Vec::with_capacity(4 + body_len);
|
||||||
|
message.push(0x02); // ServerHello
|
||||||
|
let len_bytes = (body_len as u32).to_be_bytes();
|
||||||
|
message.extend_from_slice(&len_bytes[1..4]);
|
||||||
|
message.extend_from_slice(&cached.server_hello_template.version); // 0x0303
|
||||||
|
message.extend_from_slice(&[0u8; 32]); // random placeholder
|
||||||
|
message.push(session_id.len() as u8);
|
||||||
|
message.extend_from_slice(session_id);
|
||||||
|
let cipher = if cached.server_hello_template.cipher_suite == [0, 0] {
|
||||||
|
[0x13, 0x01]
|
||||||
|
} else {
|
||||||
|
cached.server_hello_template.cipher_suite
|
||||||
|
};
|
||||||
|
message.extend_from_slice(&cipher);
|
||||||
|
message.push(cached.server_hello_template.compression);
|
||||||
|
message.extend_from_slice(&extensions_len.to_be_bytes());
|
||||||
|
message.extend_from_slice(&extensions);
|
||||||
|
|
||||||
|
let mut server_hello = Vec::with_capacity(5 + message.len());
|
||||||
|
server_hello.push(TLS_RECORD_HANDSHAKE);
|
||||||
|
server_hello.extend_from_slice(&TLS_VERSION);
|
||||||
|
server_hello.extend_from_slice(&(message.len() as u16).to_be_bytes());
|
||||||
|
server_hello.extend_from_slice(&message);
|
||||||
|
|
||||||
|
// --- ChangeCipherSpec ---
|
||||||
|
let change_cipher_spec = [
|
||||||
|
TLS_RECORD_CHANGE_CIPHER,
|
||||||
|
TLS_VERSION[0],
|
||||||
|
TLS_VERSION[1],
|
||||||
|
0x00,
|
||||||
|
0x01,
|
||||||
|
0x01,
|
||||||
|
];
|
||||||
|
|
||||||
|
// --- ApplicationData (fake encrypted records) ---
|
||||||
|
// Use the same number and sizes of ApplicationData records as the cached server.
|
||||||
|
let mut sizes = cached.app_data_records_sizes.clone();
|
||||||
|
if sizes.is_empty() {
|
||||||
|
sizes.push(cached.total_app_data_len.max(1024));
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut app_data = Vec::new();
|
||||||
|
for size in sizes {
|
||||||
|
let mut rec = Vec::with_capacity(5 + size);
|
||||||
|
rec.push(TLS_RECORD_APPLICATION);
|
||||||
|
rec.extend_from_slice(&TLS_VERSION);
|
||||||
|
rec.extend_from_slice(&(size as u16).to_be_bytes());
|
||||||
|
rec.extend_from_slice(&rng.bytes(size));
|
||||||
|
app_data.extend_from_slice(&rec);
|
||||||
|
}
|
||||||
|
|
||||||
|
// --- Combine ---
|
||||||
|
let mut response = Vec::with_capacity(server_hello.len() + change_cipher_spec.len() + app_data.len());
|
||||||
|
response.extend_from_slice(&server_hello);
|
||||||
|
response.extend_from_slice(&change_cipher_spec);
|
||||||
|
response.extend_from_slice(&app_data);
|
||||||
|
|
||||||
|
// --- HMAC ---
|
||||||
|
let mut hmac_input = Vec::with_capacity(TLS_DIGEST_LEN + response.len());
|
||||||
|
hmac_input.extend_from_slice(client_digest);
|
||||||
|
hmac_input.extend_from_slice(&response);
|
||||||
|
let digest = sha256_hmac(secret, &hmac_input);
|
||||||
|
response[TLS_DIGEST_POS..TLS_DIGEST_POS + TLS_DIGEST_LEN].copy_from_slice(&digest);
|
||||||
|
|
||||||
|
response
|
||||||
|
}
|
||||||
391
src/tls_front/fetcher.rs
Normal file
391
src/tls_front/fetcher.rs
Normal file
@@ -0,0 +1,391 @@
|
|||||||
|
use std::sync::Arc;
|
||||||
|
use std::time::Duration;
|
||||||
|
|
||||||
|
use anyhow::{Context, Result, anyhow};
|
||||||
|
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||||
|
use tokio::net::TcpStream;
|
||||||
|
use tokio::time::timeout;
|
||||||
|
use tokio_rustls::client::TlsStream;
|
||||||
|
use tokio_rustls::TlsConnector;
|
||||||
|
use tracing::{debug, warn};
|
||||||
|
|
||||||
|
use rustls::client::danger::{HandshakeSignatureValid, ServerCertVerified, ServerCertVerifier};
|
||||||
|
use rustls::client::ClientConfig;
|
||||||
|
use rustls::pki_types::{CertificateDer, ServerName, UnixTime};
|
||||||
|
use rustls::{DigitallySignedStruct, Error as RustlsError};
|
||||||
|
|
||||||
|
use crate::crypto::SecureRandom;
|
||||||
|
use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE, TLS_VERSION};
|
||||||
|
use crate::tls_front::types::{ParsedServerHello, TlsExtension, TlsFetchResult};
|
||||||
|
|
||||||
|
/// No-op verifier: accept any certificate (we only need lengths and metadata).
|
||||||
|
#[derive(Debug)]
|
||||||
|
struct NoVerify;
|
||||||
|
|
||||||
|
impl ServerCertVerifier for NoVerify {
|
||||||
|
fn verify_server_cert(
|
||||||
|
&self,
|
||||||
|
_end_entity: &CertificateDer<'_>,
|
||||||
|
_intermediates: &[CertificateDer<'_>],
|
||||||
|
_server_name: &ServerName<'_>,
|
||||||
|
_ocsp: &[u8],
|
||||||
|
_now: UnixTime,
|
||||||
|
) -> Result<ServerCertVerified, RustlsError> {
|
||||||
|
Ok(ServerCertVerified::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_tls12_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &CertificateDer<'_>,
|
||||||
|
_dss: &DigitallySignedStruct,
|
||||||
|
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||||
|
Ok(HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn verify_tls13_signature(
|
||||||
|
&self,
|
||||||
|
_message: &[u8],
|
||||||
|
_cert: &CertificateDer<'_>,
|
||||||
|
_dss: &DigitallySignedStruct,
|
||||||
|
) -> Result<HandshakeSignatureValid, RustlsError> {
|
||||||
|
Ok(HandshakeSignatureValid::assertion())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn supported_verify_schemes(&self) -> Vec<rustls::SignatureScheme> {
|
||||||
|
use rustls::SignatureScheme::*;
|
||||||
|
vec![
|
||||||
|
RSA_PKCS1_SHA256,
|
||||||
|
RSA_PSS_SHA256,
|
||||||
|
ECDSA_NISTP256_SHA256,
|
||||||
|
ECDSA_NISTP384_SHA384,
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_client_config() -> Arc<ClientConfig> {
|
||||||
|
let root = rustls::RootCertStore::empty();
|
||||||
|
|
||||||
|
let provider = rustls::crypto::ring::default_provider();
|
||||||
|
let mut config = ClientConfig::builder_with_provider(Arc::new(provider))
|
||||||
|
.with_protocol_versions(&[&rustls::version::TLS13, &rustls::version::TLS12])
|
||||||
|
.expect("protocol versions")
|
||||||
|
.with_root_certificates(root)
|
||||||
|
.with_no_client_auth();
|
||||||
|
|
||||||
|
config
|
||||||
|
.dangerous()
|
||||||
|
.set_certificate_verifier(Arc::new(NoVerify));
|
||||||
|
|
||||||
|
Arc::new(config)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn build_client_hello(sni: &str, rng: &SecureRandom) -> Vec<u8> {
|
||||||
|
// === ClientHello body ===
|
||||||
|
let mut body = Vec::new();
|
||||||
|
|
||||||
|
// Legacy version (TLS 1.0) as in real ClientHello headers
|
||||||
|
body.extend_from_slice(&[0x03, 0x03]);
|
||||||
|
|
||||||
|
// Random
|
||||||
|
body.extend_from_slice(&rng.bytes(32));
|
||||||
|
|
||||||
|
// Session ID: empty
|
||||||
|
body.push(0);
|
||||||
|
|
||||||
|
// Cipher suites (common minimal set, TLS1.3 + a few 1.2 fallbacks)
|
||||||
|
let cipher_suites: [u8; 10] = [
|
||||||
|
0x13, 0x01, // TLS_AES_128_GCM_SHA256
|
||||||
|
0x13, 0x02, // TLS_AES_256_GCM_SHA384
|
||||||
|
0x13, 0x03, // TLS_CHACHA20_POLY1305_SHA256
|
||||||
|
0x00, 0x2f, // TLS_RSA_WITH_AES_128_CBC_SHA (legacy)
|
||||||
|
0x00, 0xff, // RENEGOTIATION_INFO_SCSV
|
||||||
|
];
|
||||||
|
body.extend_from_slice(&(cipher_suites.len() as u16).to_be_bytes());
|
||||||
|
body.extend_from_slice(&cipher_suites);
|
||||||
|
|
||||||
|
// Compression methods: null only
|
||||||
|
body.push(1);
|
||||||
|
body.push(0);
|
||||||
|
|
||||||
|
// === Extensions ===
|
||||||
|
let mut exts = Vec::new();
|
||||||
|
|
||||||
|
// server_name (SNI)
|
||||||
|
let sni_bytes = sni.as_bytes();
|
||||||
|
let mut sni_ext = Vec::with_capacity(5 + sni_bytes.len());
|
||||||
|
sni_ext.extend_from_slice(&(sni_bytes.len() as u16 + 3).to_be_bytes());
|
||||||
|
sni_ext.push(0); // host_name
|
||||||
|
sni_ext.extend_from_slice(&(sni_bytes.len() as u16).to_be_bytes());
|
||||||
|
sni_ext.extend_from_slice(sni_bytes);
|
||||||
|
exts.extend_from_slice(&0x0000u16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&(sni_ext.len() as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&sni_ext);
|
||||||
|
|
||||||
|
// supported_groups
|
||||||
|
let groups: [u16; 2] = [0x001d, 0x0017]; // x25519, secp256r1
|
||||||
|
exts.extend_from_slice(&0x000au16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((2 + groups.len() * 2) as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&(groups.len() as u16 * 2).to_be_bytes());
|
||||||
|
for g in groups { exts.extend_from_slice(&g.to_be_bytes()); }
|
||||||
|
|
||||||
|
// signature_algorithms
|
||||||
|
let sig_algs: [u16; 4] = [0x0804, 0x0805, 0x0403, 0x0503]; // rsa_pss_rsae_sha256/384, ecdsa_secp256r1_sha256, rsa_pkcs1_sha256
|
||||||
|
exts.extend_from_slice(&0x000du16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((2 + sig_algs.len() * 2) as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&(sig_algs.len() as u16 * 2).to_be_bytes());
|
||||||
|
for a in sig_algs { exts.extend_from_slice(&a.to_be_bytes()); }
|
||||||
|
|
||||||
|
// supported_versions (TLS1.3 + TLS1.2)
|
||||||
|
let versions: [u16; 2] = [0x0304, 0x0303];
|
||||||
|
exts.extend_from_slice(&0x002bu16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((1 + versions.len() * 2) as u16).to_be_bytes());
|
||||||
|
exts.push((versions.len() * 2) as u8);
|
||||||
|
for v in versions { exts.extend_from_slice(&v.to_be_bytes()); }
|
||||||
|
|
||||||
|
// key_share (x25519)
|
||||||
|
let key = gen_key_share(rng);
|
||||||
|
let mut keyshare = Vec::with_capacity(4 + key.len());
|
||||||
|
keyshare.extend_from_slice(&0x001du16.to_be_bytes()); // group
|
||||||
|
keyshare.extend_from_slice(&(key.len() as u16).to_be_bytes());
|
||||||
|
keyshare.extend_from_slice(&key);
|
||||||
|
exts.extend_from_slice(&0x0033u16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((2 + keyshare.len()) as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&(keyshare.len() as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&keyshare);
|
||||||
|
|
||||||
|
// ALPN (http/1.1)
|
||||||
|
let alpn_proto = b"http/1.1";
|
||||||
|
exts.extend_from_slice(&0x0010u16.to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((2 + 1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&((1 + alpn_proto.len()) as u16).to_be_bytes());
|
||||||
|
exts.push(alpn_proto.len() as u8);
|
||||||
|
exts.extend_from_slice(alpn_proto);
|
||||||
|
|
||||||
|
// padding to reduce recognizability and keep length ~500 bytes
|
||||||
|
if exts.len() < 180 {
|
||||||
|
let pad_len = 180 - exts.len();
|
||||||
|
exts.extend_from_slice(&0x0015u16.to_be_bytes()); // padding extension
|
||||||
|
exts.extend_from_slice(&(pad_len as u16 + 2).to_be_bytes());
|
||||||
|
exts.extend_from_slice(&(pad_len as u16).to_be_bytes());
|
||||||
|
exts.resize(exts.len() + pad_len, 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extensions length prefix
|
||||||
|
body.extend_from_slice(&(exts.len() as u16).to_be_bytes());
|
||||||
|
body.extend_from_slice(&exts);
|
||||||
|
|
||||||
|
// === Handshake wrapper ===
|
||||||
|
let mut handshake = Vec::new();
|
||||||
|
handshake.push(0x01); // ClientHello
|
||||||
|
let len_bytes = (body.len() as u32).to_be_bytes();
|
||||||
|
handshake.extend_from_slice(&len_bytes[1..4]);
|
||||||
|
handshake.extend_from_slice(&body);
|
||||||
|
|
||||||
|
// === Record ===
|
||||||
|
let mut record = Vec::new();
|
||||||
|
record.push(TLS_RECORD_HANDSHAKE);
|
||||||
|
record.extend_from_slice(&[0x03, 0x01]); // legacy record version
|
||||||
|
record.extend_from_slice(&(handshake.len() as u16).to_be_bytes());
|
||||||
|
record.extend_from_slice(&handshake);
|
||||||
|
|
||||||
|
record
|
||||||
|
}
|
||||||
|
|
||||||
|
fn gen_key_share(rng: &SecureRandom) -> [u8; 32] {
|
||||||
|
let mut key = [0u8; 32];
|
||||||
|
key.copy_from_slice(&rng.bytes(32));
|
||||||
|
key
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn read_tls_record(stream: &mut TcpStream) -> Result<(u8, Vec<u8>)> {
|
||||||
|
let mut header = [0u8; 5];
|
||||||
|
stream.read_exact(&mut header).await?;
|
||||||
|
let len = u16::from_be_bytes([header[3], header[4]]) as usize;
|
||||||
|
let mut body = vec![0u8; len];
|
||||||
|
stream.read_exact(&mut body).await?;
|
||||||
|
Ok((header[0], body))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn parse_server_hello(body: &[u8]) -> Option<ParsedServerHello> {
|
||||||
|
if body.len() < 4 || body[0] != 0x02 {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let msg_len = u32::from_be_bytes([0, body[1], body[2], body[3]]) as usize;
|
||||||
|
if msg_len + 4 > body.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut pos = 4;
|
||||||
|
let version = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||||
|
pos += 2;
|
||||||
|
|
||||||
|
let mut random = [0u8; 32];
|
||||||
|
random.copy_from_slice(body.get(pos..pos + 32)?);
|
||||||
|
pos += 32;
|
||||||
|
|
||||||
|
let session_len = *body.get(pos)? as usize;
|
||||||
|
pos += 1;
|
||||||
|
let session_id = body.get(pos..pos + session_len)?.to_vec();
|
||||||
|
pos += session_len;
|
||||||
|
|
||||||
|
let cipher_suite = [*body.get(pos)?, *body.get(pos + 1)?];
|
||||||
|
pos += 2;
|
||||||
|
|
||||||
|
let compression = *body.get(pos)?;
|
||||||
|
pos += 1;
|
||||||
|
|
||||||
|
let ext_len = u16::from_be_bytes([*body.get(pos)?, *body.get(pos + 1)?]) as usize;
|
||||||
|
pos += 2;
|
||||||
|
let ext_end = pos.checked_add(ext_len)?;
|
||||||
|
if ext_end > body.len() {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut extensions = Vec::new();
|
||||||
|
while pos + 4 <= ext_end {
|
||||||
|
let etype = u16::from_be_bytes([body[pos], body[pos + 1]]);
|
||||||
|
let elen = u16::from_be_bytes([body[pos + 2], body[pos + 3]]) as usize;
|
||||||
|
pos += 4;
|
||||||
|
let data = body.get(pos..pos + elen)?.to_vec();
|
||||||
|
pos += elen;
|
||||||
|
extensions.push(TlsExtension { ext_type: etype, data });
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(ParsedServerHello {
|
||||||
|
version,
|
||||||
|
random,
|
||||||
|
session_id,
|
||||||
|
cipher_suite,
|
||||||
|
compression,
|
||||||
|
extensions,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn fetch_via_raw_tls(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
sni: &str,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
) -> Result<TlsFetchResult> {
|
||||||
|
let addr = format!("{host}:{port}");
|
||||||
|
let mut stream = timeout(connect_timeout, TcpStream::connect(addr)).await??;
|
||||||
|
|
||||||
|
let rng = SecureRandom::new();
|
||||||
|
let client_hello = build_client_hello(sni, &rng);
|
||||||
|
timeout(connect_timeout, async {
|
||||||
|
stream.write_all(&client_hello).await?;
|
||||||
|
stream.flush().await?;
|
||||||
|
Ok::<(), std::io::Error>(())
|
||||||
|
})
|
||||||
|
.await??;
|
||||||
|
|
||||||
|
let mut records = Vec::new();
|
||||||
|
// Read up to 4 records: ServerHello, CCS, and up to two ApplicationData.
|
||||||
|
for _ in 0..4 {
|
||||||
|
match timeout(connect_timeout, read_tls_record(&mut stream)).await {
|
||||||
|
Ok(Ok(rec)) => records.push(rec),
|
||||||
|
Ok(Err(e)) => return Err(e.into()),
|
||||||
|
Err(_) => break,
|
||||||
|
}
|
||||||
|
if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut app_sizes = Vec::new();
|
||||||
|
let mut server_hello = None;
|
||||||
|
for (t, body) in &records {
|
||||||
|
if *t == TLS_RECORD_HANDSHAKE && server_hello.is_none() {
|
||||||
|
server_hello = parse_server_hello(body);
|
||||||
|
} else if *t == TLS_RECORD_APPLICATION {
|
||||||
|
app_sizes.push(body.len());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let parsed = server_hello.ok_or_else(|| anyhow!("ServerHello not received"))?;
|
||||||
|
let total_app_data_len = app_sizes.iter().sum::<usize>().max(1024);
|
||||||
|
|
||||||
|
Ok(TlsFetchResult {
|
||||||
|
server_hello_parsed: parsed,
|
||||||
|
app_data_records_sizes: if app_sizes.is_empty() {
|
||||||
|
vec![total_app_data_len]
|
||||||
|
} else {
|
||||||
|
app_sizes
|
||||||
|
},
|
||||||
|
total_app_data_len,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch real TLS metadata for the given SNI: negotiated cipher and cert lengths.
|
||||||
|
pub async fn fetch_real_tls(
|
||||||
|
host: &str,
|
||||||
|
port: u16,
|
||||||
|
sni: &str,
|
||||||
|
connect_timeout: Duration,
|
||||||
|
) -> Result<TlsFetchResult> {
|
||||||
|
// Preferred path: raw TLS probe for accurate record sizing
|
||||||
|
match fetch_via_raw_tls(host, port, sni, connect_timeout).await {
|
||||||
|
Ok(res) => return Ok(res),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(sni = %sni, error = %e, "Raw TLS fetch failed, falling back to rustls");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fallback: rustls handshake to at least get certificate sizes
|
||||||
|
let addr = format!("{host}:{port}");
|
||||||
|
let stream = timeout(connect_timeout, TcpStream::connect(addr)).await??;
|
||||||
|
|
||||||
|
let config = build_client_config();
|
||||||
|
let connector = TlsConnector::from(config);
|
||||||
|
|
||||||
|
let server_name = ServerName::try_from(sni.to_owned())
|
||||||
|
.or_else(|_| ServerName::try_from(host.to_owned()))
|
||||||
|
.map_err(|_| RustlsError::General("invalid SNI".into()))?;
|
||||||
|
|
||||||
|
let tls_stream: TlsStream<TcpStream> = connector.connect(server_name, stream).await?;
|
||||||
|
|
||||||
|
// Extract negotiated parameters and certificates
|
||||||
|
let (_io, session) = tls_stream.get_ref();
|
||||||
|
let cipher_suite = session
|
||||||
|
.negotiated_cipher_suite()
|
||||||
|
.map(|s| u16::from(s.suite()).to_be_bytes())
|
||||||
|
.unwrap_or([0x13, 0x01]);
|
||||||
|
|
||||||
|
let certs: Vec<CertificateDer<'static>> = session
|
||||||
|
.peer_certificates()
|
||||||
|
.map(|slice| slice.to_vec())
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let total_cert_len: usize = certs.iter().map(|c| c.len()).sum::<usize>().max(1024);
|
||||||
|
|
||||||
|
// Heuristic: split across two records if large to mimic real servers a bit.
|
||||||
|
let app_data_records_sizes = if total_cert_len > 3000 {
|
||||||
|
vec![total_cert_len / 2, total_cert_len - total_cert_len / 2]
|
||||||
|
} else {
|
||||||
|
vec![total_cert_len]
|
||||||
|
};
|
||||||
|
|
||||||
|
let parsed = ParsedServerHello {
|
||||||
|
version: [0x03, 0x03],
|
||||||
|
random: [0u8; 32],
|
||||||
|
session_id: Vec::new(),
|
||||||
|
cipher_suite,
|
||||||
|
compression: 0,
|
||||||
|
extensions: Vec::new(),
|
||||||
|
};
|
||||||
|
|
||||||
|
debug!(
|
||||||
|
sni = %sni,
|
||||||
|
len = total_cert_len,
|
||||||
|
cipher = format!("0x{:04x}", u16::from_be_bytes(cipher_suite)),
|
||||||
|
"Fetched TLS metadata via rustls"
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(TlsFetchResult {
|
||||||
|
server_hello_parsed: parsed,
|
||||||
|
app_data_records_sizes: app_data_records_sizes.clone(),
|
||||||
|
total_app_data_len: app_data_records_sizes.iter().sum(),
|
||||||
|
})
|
||||||
|
}
|
||||||
7
src/tls_front/mod.rs
Normal file
7
src/tls_front/mod.rs
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
pub mod types;
|
||||||
|
pub mod cache;
|
||||||
|
pub mod fetcher;
|
||||||
|
pub mod emulator;
|
||||||
|
|
||||||
|
pub use cache::TlsFrontCache;
|
||||||
|
pub use types::{CachedTlsData, TlsFetchResult};
|
||||||
48
src/tls_front/types.rs
Normal file
48
src/tls_front/types.rs
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
/// Parsed representation of an unencrypted TLS ServerHello.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ParsedServerHello {
|
||||||
|
pub version: [u8; 2],
|
||||||
|
pub random: [u8; 32],
|
||||||
|
pub session_id: Vec<u8>,
|
||||||
|
pub cipher_suite: [u8; 2],
|
||||||
|
pub compression: u8,
|
||||||
|
pub extensions: Vec<TlsExtension>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Generic TLS extension container.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TlsExtension {
|
||||||
|
pub ext_type: u16,
|
||||||
|
pub data: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Basic certificate metadata (optional, informative).
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct ParsedCertificateInfo {
|
||||||
|
pub not_after_unix: Option<i64>,
|
||||||
|
pub not_before_unix: Option<i64>,
|
||||||
|
pub issuer_cn: Option<String>,
|
||||||
|
pub subject_cn: Option<String>,
|
||||||
|
pub san_names: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Cached data per SNI used by the emulator.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct CachedTlsData {
|
||||||
|
pub server_hello_template: ParsedServerHello,
|
||||||
|
pub cert_info: Option<ParsedCertificateInfo>,
|
||||||
|
pub app_data_records_sizes: Vec<usize>,
|
||||||
|
pub total_app_data_len: usize,
|
||||||
|
pub fetched_at: SystemTime,
|
||||||
|
pub domain: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Result of attempting to fetch real TLS artifacts.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct TlsFetchResult {
|
||||||
|
pub server_hello_parsed: ParsedServerHello,
|
||||||
|
pub app_data_records_sizes: Vec<usize>,
|
||||||
|
pub total_app_data_len: usize,
|
||||||
|
}
|
||||||
@@ -4,6 +4,14 @@ use crate::crypto::{AesCbc, crc32};
|
|||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
|
|
||||||
|
/// Commands sent to dedicated writer tasks to avoid mutex contention on TCP writes.
|
||||||
|
pub(crate) enum WriterCommand {
|
||||||
|
Data(Vec<u8>),
|
||||||
|
DataAndFlush(Vec<u8>),
|
||||||
|
Keepalive,
|
||||||
|
Close,
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
|
pub(crate) fn build_rpc_frame(seq_no: i32, payload: &[u8]) -> Vec<u8> {
|
||||||
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
let total_len = (4 + 4 + payload.len() + 4) as u32;
|
||||||
let mut frame = Vec::with_capacity(total_len as usize);
|
let mut frame = Vec::with_capacity(total_len as usize);
|
||||||
@@ -181,4 +189,12 @@ impl RpcWriter {
|
|||||||
self.send(payload).await?;
|
self.send(payload).await?;
|
||||||
self.writer.flush().await.map_err(ProxyError::Io)
|
self.writer.flush().await.map_err(ProxyError::Io)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub(crate) async fn send_keepalive(&mut self, payload: [u8; 4]) -> Result<()> {
|
||||||
|
// Keepalive is a frame with fl == 4 and 4 bytes payload.
|
||||||
|
let mut frame = Vec::with_capacity(8);
|
||||||
|
frame.extend_from_slice(&4u32.to_le_bytes());
|
||||||
|
frame.extend_from_slice(&payload);
|
||||||
|
self.send(&frame).await
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -13,6 +13,24 @@ use super::secret::download_proxy_secret;
|
|||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use std::time::SystemTime;
|
use std::time::SystemTime;
|
||||||
|
|
||||||
|
async fn retry_fetch(url: &str) -> Option<ProxyConfigData> {
|
||||||
|
let delays = [1u64, 5, 15];
|
||||||
|
for (i, d) in delays.iter().enumerate() {
|
||||||
|
match fetch_proxy_config(url).await {
|
||||||
|
Ok(cfg) => return Some(cfg),
|
||||||
|
Err(e) => {
|
||||||
|
if i == delays.len() - 1 {
|
||||||
|
warn!(error = %e, url, "fetch_proxy_config failed");
|
||||||
|
} else {
|
||||||
|
debug!(error = %e, url, "fetch_proxy_config retrying");
|
||||||
|
tokio::time::sleep(Duration::from_secs(*d)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Default)]
|
#[derive(Debug, Clone, Default)]
|
||||||
pub struct ProxyConfigData {
|
pub struct ProxyConfigData {
|
||||||
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
|
pub map: HashMap<i32, Vec<(IpAddr, u16)>>,
|
||||||
@@ -118,7 +136,8 @@ pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interv
|
|||||||
tick.tick().await;
|
tick.tick().await;
|
||||||
|
|
||||||
// Update proxy config v4
|
// Update proxy config v4
|
||||||
if let Ok(cfg) = fetch_proxy_config("https://core.telegram.org/getProxyConfig").await {
|
let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await;
|
||||||
|
if let Some(cfg) = cfg_v4 {
|
||||||
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await;
|
let changed = pool.update_proxy_maps(cfg.map.clone(), None).await;
|
||||||
if let Some(dc) = cfg.default_dc {
|
if let Some(dc) = cfg.default_dc {
|
||||||
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed);
|
pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed);
|
||||||
@@ -129,14 +148,20 @@ pub async fn me_config_updater(pool: Arc<MePool>, rng: Arc<SecureRandom>, interv
|
|||||||
} else {
|
} else {
|
||||||
debug!("ME config v4 unchanged");
|
debug!("ME config v4 unchanged");
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
warn!("getProxyConfig update failed");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update proxy config v6 (optional)
|
// Update proxy config v6 (optional)
|
||||||
if let Ok(cfg_v6) = fetch_proxy_config("https://core.telegram.org/getProxyConfigV6").await {
|
let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await;
|
||||||
let _ = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
|
if let Some(cfg_v6) = cfg_v6 {
|
||||||
|
let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await;
|
||||||
|
if changed {
|
||||||
|
info!("ME config updated (v6), reconciling connections");
|
||||||
|
pool.reconcile_connections(&rng).await;
|
||||||
|
} else {
|
||||||
|
debug!("ME config v6 unchanged");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
pool.reset_stun_state();
|
||||||
|
|
||||||
// Update proxy-secret
|
// Update proxy-secret
|
||||||
match download_proxy_secret().await {
|
match download_proxy_secret().await {
|
||||||
|
|||||||
@@ -5,25 +5,30 @@ use std::time::{Duration, Instant};
|
|||||||
|
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
|
use rand::Rng;
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::network::IpFamily;
|
use crate::network::IpFamily;
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
|
|
||||||
|
const HEALTH_INTERVAL_SECS: u64 = 1;
|
||||||
|
const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff
|
||||||
|
const MAX_CONCURRENT_PER_DC_DEFAULT: usize = 1;
|
||||||
|
|
||||||
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
|
pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_connections: usize) {
|
||||||
let mut backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
|
let mut backoff: HashMap<(i32, IpFamily), u64> = HashMap::new();
|
||||||
let mut last_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
let mut next_attempt: HashMap<(i32, IpFamily), Instant> = HashMap::new();
|
||||||
let mut inflight_single: HashSet<(i32, IpFamily)> = HashSet::new();
|
let mut inflight: HashMap<(i32, IpFamily), usize> = HashMap::new();
|
||||||
loop {
|
loop {
|
||||||
tokio::time::sleep(Duration::from_secs(30)).await;
|
tokio::time::sleep(Duration::from_secs(HEALTH_INTERVAL_SECS)).await;
|
||||||
check_family(
|
check_family(
|
||||||
IpFamily::V4,
|
IpFamily::V4,
|
||||||
&pool,
|
&pool,
|
||||||
&rng,
|
&rng,
|
||||||
&mut backoff,
|
&mut backoff,
|
||||||
&mut last_attempt,
|
&mut next_attempt,
|
||||||
&mut inflight_single,
|
&mut inflight,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
check_family(
|
check_family(
|
||||||
@@ -31,8 +36,8 @@ pub async fn me_health_monitor(pool: Arc<MePool>, rng: Arc<SecureRandom>, _min_c
|
|||||||
&pool,
|
&pool,
|
||||||
&rng,
|
&rng,
|
||||||
&mut backoff,
|
&mut backoff,
|
||||||
&mut last_attempt,
|
&mut next_attempt,
|
||||||
&mut inflight_single,
|
&mut inflight,
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
@@ -43,8 +48,8 @@ async fn check_family(
|
|||||||
pool: &Arc<MePool>,
|
pool: &Arc<MePool>,
|
||||||
rng: &Arc<SecureRandom>,
|
rng: &Arc<SecureRandom>,
|
||||||
backoff: &mut HashMap<(i32, IpFamily), u64>,
|
backoff: &mut HashMap<(i32, IpFamily), u64>,
|
||||||
last_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
next_attempt: &mut HashMap<(i32, IpFamily), Instant>,
|
||||||
inflight_single: &mut HashSet<(i32, IpFamily)>,
|
inflight: &mut HashMap<(i32, IpFamily), usize>,
|
||||||
) {
|
) {
|
||||||
let enabled = match family {
|
let enabled = match family {
|
||||||
IpFamily::V4 => pool.decision.ipv4_me,
|
IpFamily::V4 => pool.decision.ipv4_me,
|
||||||
@@ -80,95 +85,60 @@ async fn check_family(
|
|||||||
for (dc, dc_addrs) in entries {
|
for (dc, dc_addrs) in entries {
|
||||||
let has_coverage = dc_addrs.iter().any(|a| writer_addrs.contains(a));
|
let has_coverage = dc_addrs.iter().any(|a| writer_addrs.contains(a));
|
||||||
if has_coverage {
|
if has_coverage {
|
||||||
inflight_single.remove(&(dc, family));
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let key = (dc, family);
|
|
||||||
let delay = *backoff.get(&key).unwrap_or(&30);
|
|
||||||
let now = Instant::now();
|
|
||||||
if let Some(last) = last_attempt.get(&key) {
|
|
||||||
if now.duration_since(*last).as_secs() < delay {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if dc_addrs.len() == 1 {
|
|
||||||
// Single ME address: fast retries then slower background retries.
|
|
||||||
if inflight_single.contains(&key) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
inflight_single.insert(key);
|
|
||||||
let addr = dc_addrs[0];
|
|
||||||
let dc_id = dc;
|
|
||||||
let pool_clone = pool.clone();
|
|
||||||
let rng_clone = rng.clone();
|
|
||||||
let timeout = pool.me_one_timeout;
|
|
||||||
let quick_attempts = pool.me_one_retry.max(1);
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut success = false;
|
|
||||||
for _ in 0..quick_attempts {
|
|
||||||
let res = tokio::time::timeout(timeout, pool_clone.connect_one(addr, rng_clone.as_ref())).await;
|
|
||||||
match res {
|
|
||||||
Ok(Ok(())) => {
|
|
||||||
info!(%addr, dc = %dc_id, ?family, "ME reconnected for DC coverage");
|
|
||||||
success = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Ok(Err(e)) => debug!(%addr, dc = %dc_id, error = %e, ?family, "ME reconnect failed"),
|
|
||||||
Err(_) => debug!(%addr, dc = %dc_id, ?family, "ME reconnect timed out"),
|
|
||||||
}
|
|
||||||
tokio::time::sleep(Duration::from_millis(1000)).await;
|
|
||||||
}
|
|
||||||
if success {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
let timeout_ms = timeout.as_millis();
|
|
||||||
warn!(
|
|
||||||
dc = %dc_id,
|
|
||||||
?family,
|
|
||||||
attempts = quick_attempts,
|
|
||||||
timeout_ms,
|
|
||||||
"DC={} has no ME coverage: {} tries * {} ms... retry in 5 seconds...",
|
|
||||||
dc_id,
|
|
||||||
quick_attempts,
|
|
||||||
timeout_ms
|
|
||||||
);
|
|
||||||
loop {
|
|
||||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
|
||||||
let res = tokio::time::timeout(timeout, pool_clone.connect_one(addr, rng_clone.as_ref())).await;
|
|
||||||
match res {
|
|
||||||
Ok(Ok(())) => {
|
|
||||||
info!(%addr, dc = %dc_id, ?family, "ME reconnected for DC coverage");
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Ok(Err(e)) => debug!(%addr, dc = %dc_id, error = %e, ?family, "ME reconnect failed"),
|
|
||||||
Err(_) => debug!(%addr, dc = %dc_id, ?family, "ME reconnect timed out"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// will drop inflight flag in outer loop when coverage detected
|
|
||||||
});
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
warn!(dc = %dc, delay, ?family, "DC has no ME coverage, reconnecting...");
|
let key = (dc, family);
|
||||||
let mut shuffled = dc_addrs.clone();
|
let now = Instant::now();
|
||||||
shuffled.shuffle(&mut rand::rng());
|
if let Some(ts) = next_attempt.get(&key) {
|
||||||
let mut reconnected = false;
|
if now < *ts {
|
||||||
for addr in shuffled {
|
continue;
|
||||||
match pool.connect_one(addr, rng.as_ref()).await {
|
|
||||||
Ok(()) => {
|
|
||||||
info!(%addr, dc = %dc, ?family, "ME reconnected for DC coverage");
|
|
||||||
backoff.insert(key, 30);
|
|
||||||
last_attempt.insert(key, now);
|
|
||||||
reconnected = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
Err(e) => debug!(%addr, dc = %dc, error = %e, ?family, "ME reconnect failed"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !reconnected {
|
|
||||||
let next = (*backoff.get(&key).unwrap_or(&30)).saturating_mul(2).min(300);
|
let max_concurrent = pool.me_reconnect_max_concurrent_per_dc.max(1) as usize;
|
||||||
backoff.insert(key, next);
|
if *inflight.get(&key).unwrap_or(&0) >= max_concurrent {
|
||||||
last_attempt.insert(key, now);
|
return;
|
||||||
|
}
|
||||||
|
*inflight.entry(key).or_insert(0) += 1;
|
||||||
|
|
||||||
|
let mut shuffled = dc_addrs.clone();
|
||||||
|
shuffled.shuffle(&mut rand::rng());
|
||||||
|
let mut success = false;
|
||||||
|
for addr in shuffled {
|
||||||
|
let res = tokio::time::timeout(pool.me_one_timeout, pool.connect_one(addr, rng.as_ref())).await;
|
||||||
|
match res {
|
||||||
|
Ok(Ok(())) => {
|
||||||
|
info!(%addr, dc = %dc, ?family, "ME reconnected for DC coverage");
|
||||||
|
pool.stats.increment_me_reconnect_success();
|
||||||
|
backoff.insert(key, pool.me_reconnect_backoff_base.as_millis() as u64);
|
||||||
|
let jitter = pool.me_reconnect_backoff_base.as_millis() as u64 / JITTER_FRAC_NUM;
|
||||||
|
let wait = pool.me_reconnect_backoff_base
|
||||||
|
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||||
|
next_attempt.insert(key, now + wait);
|
||||||
|
success = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
Ok(Err(e)) => {
|
||||||
|
pool.stats.increment_me_reconnect_attempt();
|
||||||
|
debug!(%addr, dc = %dc, error = %e, ?family, "ME reconnect failed")
|
||||||
|
}
|
||||||
|
Err(_) => debug!(%addr, dc = %dc, ?family, "ME reconnect timed out"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !success {
|
||||||
|
pool.stats.increment_me_reconnect_attempt();
|
||||||
|
let curr = *backoff.get(&key).unwrap_or(&(pool.me_reconnect_backoff_base.as_millis() as u64));
|
||||||
|
let next_ms = (curr.saturating_mul(2)).min(pool.me_reconnect_backoff_cap.as_millis() as u64);
|
||||||
|
backoff.insert(key, next_ms);
|
||||||
|
let jitter = next_ms / JITTER_FRAC_NUM;
|
||||||
|
let wait = Duration::from_millis(next_ms)
|
||||||
|
+ Duration::from_millis(rand::rng().random_range(0..=jitter.max(1)));
|
||||||
|
next_attempt.insert(key, now + wait);
|
||||||
|
warn!(dc = %dc, backoff_ms = next_ms, ?family, "DC has no ME coverage, scheduled reconnect");
|
||||||
|
}
|
||||||
|
if let Some(v) = inflight.get_mut(&key) {
|
||||||
|
*v = v.saturating_sub(1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, Ordering};
|
use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering};
|
||||||
use bytes::BytesMut;
|
use bytes::BytesMut;
|
||||||
use rand::Rng;
|
use rand::Rng;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use tokio::sync::{Mutex, RwLock};
|
use tokio::sync::{Mutex, RwLock, mpsc, Notify};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, info, warn};
|
use tracing::{debug, info, warn};
|
||||||
use std::time::Duration;
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use crate::crypto::SecureRandom;
|
use crate::crypto::SecureRandom;
|
||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
@@ -18,18 +18,18 @@ use crate::protocol::constants::*;
|
|||||||
|
|
||||||
use super::ConnRegistry;
|
use super::ConnRegistry;
|
||||||
use super::registry::{BoundConn, ConnMeta};
|
use super::registry::{BoundConn, ConnMeta};
|
||||||
use super::codec::RpcWriter;
|
use super::codec::{RpcWriter, WriterCommand};
|
||||||
use super::reader::reader_loop;
|
use super::reader::reader_loop;
|
||||||
use super::MeResponse;
|
use super::MeResponse;
|
||||||
|
|
||||||
const ME_ACTIVE_PING_SECS: u64 = 25;
|
const ME_ACTIVE_PING_SECS: u64 = 25;
|
||||||
const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
|
const ME_ACTIVE_PING_JITTER_SECS: i64 = 5;
|
||||||
|
const ME_KEEPALIVE_PAYLOAD_LEN: usize = 4;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct MeWriter {
|
pub struct MeWriter {
|
||||||
pub id: u64,
|
pub id: u64,
|
||||||
pub addr: SocketAddr,
|
pub addr: SocketAddr,
|
||||||
pub writer: Arc<Mutex<RpcWriter>>,
|
pub tx: mpsc::Sender<WriterCommand>,
|
||||||
pub cancel: CancellationToken,
|
pub cancel: CancellationToken,
|
||||||
pub degraded: Arc<AtomicBool>,
|
pub degraded: Arc<AtomicBool>,
|
||||||
pub draining: Arc<AtomicBool>,
|
pub draining: Arc<AtomicBool>,
|
||||||
@@ -47,11 +47,24 @@ pub struct MePool {
|
|||||||
pub(super) nat_ip_detected: Arc<RwLock<Option<IpAddr>>>,
|
pub(super) nat_ip_detected: Arc<RwLock<Option<IpAddr>>>,
|
||||||
pub(super) nat_probe: bool,
|
pub(super) nat_probe: bool,
|
||||||
pub(super) nat_stun: Option<String>,
|
pub(super) nat_stun: Option<String>,
|
||||||
|
pub(super) nat_stun_servers: Vec<String>,
|
||||||
pub(super) detected_ipv6: Option<Ipv6Addr>,
|
pub(super) detected_ipv6: Option<Ipv6Addr>,
|
||||||
pub(super) nat_probe_attempts: std::sync::atomic::AtomicU8,
|
pub(super) nat_probe_attempts: std::sync::atomic::AtomicU8,
|
||||||
pub(super) nat_probe_disabled: std::sync::atomic::AtomicBool,
|
pub(super) nat_probe_disabled: std::sync::atomic::AtomicBool,
|
||||||
|
pub(super) stun_backoff_until: Arc<RwLock<Option<Instant>>>,
|
||||||
pub(super) me_one_retry: u8,
|
pub(super) me_one_retry: u8,
|
||||||
pub(super) me_one_timeout: Duration,
|
pub(super) me_one_timeout: Duration,
|
||||||
|
pub(super) me_keepalive_enabled: bool,
|
||||||
|
pub(super) me_keepalive_interval: Duration,
|
||||||
|
pub(super) me_keepalive_jitter: Duration,
|
||||||
|
pub(super) me_keepalive_payload_random: bool,
|
||||||
|
pub(super) me_warmup_stagger_enabled: bool,
|
||||||
|
pub(super) me_warmup_step_delay: Duration,
|
||||||
|
pub(super) me_warmup_step_jitter: Duration,
|
||||||
|
pub(super) me_reconnect_max_concurrent_per_dc: u32,
|
||||||
|
pub(super) me_reconnect_backoff_base: Duration,
|
||||||
|
pub(super) me_reconnect_backoff_cap: Duration,
|
||||||
|
pub(super) me_reconnect_fast_retry_count: u32,
|
||||||
pub(super) proxy_map_v4: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
pub(super) proxy_map_v4: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
||||||
pub(super) proxy_map_v6: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
pub(super) proxy_map_v6: Arc<RwLock<HashMap<i32, Vec<(IpAddr, u16)>>>>,
|
||||||
pub(super) default_dc: AtomicI32,
|
pub(super) default_dc: AtomicI32,
|
||||||
@@ -59,6 +72,9 @@ pub struct MePool {
|
|||||||
pub(super) ping_tracker: Arc<Mutex<HashMap<i64, (std::time::Instant, u64)>>>,
|
pub(super) ping_tracker: Arc<Mutex<HashMap<i64, (std::time::Instant, u64)>>>,
|
||||||
pub(super) rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
pub(super) rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
||||||
pub(super) nat_reflection_cache: Arc<Mutex<NatReflectionCache>>,
|
pub(super) nat_reflection_cache: Arc<Mutex<NatReflectionCache>>,
|
||||||
|
pub(super) writer_available: Arc<Notify>,
|
||||||
|
pub(super) conn_count: AtomicUsize,
|
||||||
|
pub(super) stats: Arc<crate::stats::Stats>,
|
||||||
pool_size: usize,
|
pool_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -75,6 +91,7 @@ impl MePool {
|
|||||||
nat_ip: Option<IpAddr>,
|
nat_ip: Option<IpAddr>,
|
||||||
nat_probe: bool,
|
nat_probe: bool,
|
||||||
nat_stun: Option<String>,
|
nat_stun: Option<String>,
|
||||||
|
nat_stun_servers: Vec<String>,
|
||||||
detected_ipv6: Option<Ipv6Addr>,
|
detected_ipv6: Option<Ipv6Addr>,
|
||||||
me_one_retry: u8,
|
me_one_retry: u8,
|
||||||
me_one_timeout_ms: u64,
|
me_one_timeout_ms: u64,
|
||||||
@@ -83,6 +100,18 @@ impl MePool {
|
|||||||
default_dc: Option<i32>,
|
default_dc: Option<i32>,
|
||||||
decision: NetworkDecision,
|
decision: NetworkDecision,
|
||||||
rng: Arc<SecureRandom>,
|
rng: Arc<SecureRandom>,
|
||||||
|
stats: Arc<crate::stats::Stats>,
|
||||||
|
me_keepalive_enabled: bool,
|
||||||
|
me_keepalive_interval_secs: u64,
|
||||||
|
me_keepalive_jitter_secs: u64,
|
||||||
|
me_keepalive_payload_random: bool,
|
||||||
|
me_warmup_stagger_enabled: bool,
|
||||||
|
me_warmup_step_delay_ms: u64,
|
||||||
|
me_warmup_step_jitter_ms: u64,
|
||||||
|
me_reconnect_max_concurrent_per_dc: u32,
|
||||||
|
me_reconnect_backoff_base_ms: u64,
|
||||||
|
me_reconnect_backoff_cap_ms: u64,
|
||||||
|
me_reconnect_fast_retry_count: u32,
|
||||||
) -> Arc<Self> {
|
) -> Arc<Self> {
|
||||||
Arc::new(Self {
|
Arc::new(Self {
|
||||||
registry: Arc::new(ConnRegistry::new()),
|
registry: Arc::new(ConnRegistry::new()),
|
||||||
@@ -96,11 +125,25 @@ impl MePool {
|
|||||||
nat_ip_detected: Arc::new(RwLock::new(None)),
|
nat_ip_detected: Arc::new(RwLock::new(None)),
|
||||||
nat_probe,
|
nat_probe,
|
||||||
nat_stun,
|
nat_stun,
|
||||||
|
nat_stun_servers,
|
||||||
detected_ipv6,
|
detected_ipv6,
|
||||||
nat_probe_attempts: std::sync::atomic::AtomicU8::new(0),
|
nat_probe_attempts: std::sync::atomic::AtomicU8::new(0),
|
||||||
nat_probe_disabled: std::sync::atomic::AtomicBool::new(false),
|
nat_probe_disabled: std::sync::atomic::AtomicBool::new(false),
|
||||||
|
stun_backoff_until: Arc::new(RwLock::new(None)),
|
||||||
me_one_retry,
|
me_one_retry,
|
||||||
me_one_timeout: Duration::from_millis(me_one_timeout_ms),
|
me_one_timeout: Duration::from_millis(me_one_timeout_ms),
|
||||||
|
stats,
|
||||||
|
me_keepalive_enabled,
|
||||||
|
me_keepalive_interval: Duration::from_secs(me_keepalive_interval_secs),
|
||||||
|
me_keepalive_jitter: Duration::from_secs(me_keepalive_jitter_secs),
|
||||||
|
me_keepalive_payload_random,
|
||||||
|
me_warmup_stagger_enabled,
|
||||||
|
me_warmup_step_delay: Duration::from_millis(me_warmup_step_delay_ms),
|
||||||
|
me_warmup_step_jitter: Duration::from_millis(me_warmup_step_jitter_ms),
|
||||||
|
me_reconnect_max_concurrent_per_dc,
|
||||||
|
me_reconnect_backoff_base: Duration::from_millis(me_reconnect_backoff_base_ms),
|
||||||
|
me_reconnect_backoff_cap: Duration::from_millis(me_reconnect_backoff_cap_ms),
|
||||||
|
me_reconnect_fast_retry_count,
|
||||||
pool_size: 2,
|
pool_size: 2,
|
||||||
proxy_map_v4: Arc::new(RwLock::new(proxy_map_v4)),
|
proxy_map_v4: Arc::new(RwLock::new(proxy_map_v4)),
|
||||||
proxy_map_v6: Arc::new(RwLock::new(proxy_map_v6)),
|
proxy_map_v6: Arc::new(RwLock::new(proxy_map_v6)),
|
||||||
@@ -109,6 +152,8 @@ impl MePool {
|
|||||||
ping_tracker: Arc::new(Mutex::new(HashMap::new())),
|
ping_tracker: Arc::new(Mutex::new(HashMap::new())),
|
||||||
rtt_stats: Arc::new(Mutex::new(HashMap::new())),
|
rtt_stats: Arc::new(Mutex::new(HashMap::new())),
|
||||||
nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())),
|
nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())),
|
||||||
|
writer_available: Arc::new(Notify::new()),
|
||||||
|
conn_count: AtomicUsize::new(0),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -116,6 +161,11 @@ impl MePool {
|
|||||||
self.proxy_tag.is_some()
|
self.proxy_tag.is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn reset_stun_state(&self) {
|
||||||
|
self.nat_probe_attempts.store(0, Ordering::Relaxed);
|
||||||
|
self.nat_probe_disabled.store(false, Ordering::Relaxed);
|
||||||
|
}
|
||||||
|
|
||||||
pub fn translate_our_addr(&self, addr: SocketAddr) -> SocketAddr {
|
pub fn translate_our_addr(&self, addr: SocketAddr) -> SocketAddr {
|
||||||
let ip = self.translate_ip_for_nat(addr.ip());
|
let ip = self.translate_ip_for_nat(addr.ip());
|
||||||
SocketAddr::new(ip, addr.port())
|
SocketAddr::new(ip, addr.port())
|
||||||
@@ -132,7 +182,11 @@ impl MePool {
|
|||||||
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
|
pub async fn reconcile_connections(self: &Arc<Self>, rng: &SecureRandom) {
|
||||||
use std::collections::HashSet;
|
use std::collections::HashSet;
|
||||||
let writers = self.writers.read().await;
|
let writers = self.writers.read().await;
|
||||||
let current: HashSet<SocketAddr> = writers.iter().map(|w| w.addr).collect();
|
let current: HashSet<SocketAddr> = writers
|
||||||
|
.iter()
|
||||||
|
.filter(|w| !w.draining.load(Ordering::Relaxed))
|
||||||
|
.map(|w| w.addr)
|
||||||
|
.collect();
|
||||||
drop(writers);
|
drop(writers);
|
||||||
|
|
||||||
for family in self.family_order() {
|
for family in self.family_order() {
|
||||||
@@ -175,12 +229,36 @@ impl MePool {
|
|||||||
let mut guard = self.proxy_map_v6.write().await;
|
let mut guard = self.proxy_map_v6.write().await;
|
||||||
if !v6.is_empty() && *guard != v6 {
|
if !v6.is_empty() && *guard != v6 {
|
||||||
*guard = v6;
|
*guard = v6;
|
||||||
|
changed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Ensure negative DC entries mirror positives when absent (Telegram convention).
|
||||||
|
{
|
||||||
|
let mut guard = self.proxy_map_v4.write().await;
|
||||||
|
let keys: Vec<i32> = guard.keys().cloned().collect();
|
||||||
|
for k in keys.iter().cloned().filter(|k| *k > 0) {
|
||||||
|
if !guard.contains_key(&-k) {
|
||||||
|
if let Some(addrs) = guard.get(&k).cloned() {
|
||||||
|
guard.insert(-k, addrs);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut guard = self.proxy_map_v6.write().await;
|
||||||
|
let keys: Vec<i32> = guard.keys().cloned().collect();
|
||||||
|
for k in keys.iter().cloned().filter(|k| *k > 0) {
|
||||||
|
if !guard.contains_key(&-k) {
|
||||||
|
if let Some(addrs) = guard.get(&k).cloned() {
|
||||||
|
guard.insert(-k, addrs);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
changed
|
changed
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn update_secret(&self, new_secret: Vec<u8>) -> bool {
|
pub async fn update_secret(self: &Arc<Self>, new_secret: Vec<u8>) -> bool {
|
||||||
if new_secret.len() < 32 {
|
if new_secret.len() < 32 {
|
||||||
warn!(len = new_secret.len(), "proxy-secret update ignored (too short)");
|
warn!(len = new_secret.len(), "proxy-secret update ignored (too short)");
|
||||||
return false;
|
return false;
|
||||||
@@ -195,10 +273,14 @@ impl MePool {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn reconnect_all(&self) {
|
pub async fn reconnect_all(self: &Arc<Self>) {
|
||||||
// Graceful: do not drop all at once. New connections will use updated secret.
|
let ws = self.writers.read().await.clone();
|
||||||
// Existing writers remain until health monitor replaces them.
|
for w in ws {
|
||||||
// No-op here to avoid total outage.
|
if let Ok(()) = self.connect_one(w.addr, self.rng.as_ref()).await {
|
||||||
|
self.mark_writer_draining(w.id).await;
|
||||||
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn key_selector(&self) -> u32 {
|
pub(super) async fn key_selector(&self) -> u32 {
|
||||||
@@ -277,7 +359,24 @@ impl MePool {
|
|||||||
return Err(ProxyError::Proxy("Too many ME DC init failures, falling back to direct".into()));
|
return Err(ProxyError::Proxy("Too many ME DC init failures, falling back to direct".into()));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Additional connections up to pool_size total (round-robin across DCs)
|
// Additional connections up to pool_size total (round-robin across DCs), staggered to de-phase lifecycles.
|
||||||
|
if self.me_warmup_stagger_enabled {
|
||||||
|
let mut delay_ms = 0u64;
|
||||||
|
for (dc, addrs) in dc_addrs.iter() {
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
if self.connection_count() >= pool_size {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let addr = SocketAddr::new(*ip, *port);
|
||||||
|
let jitter = rand::rng().random_range(0..=self.me_warmup_step_jitter.as_millis() as u64);
|
||||||
|
delay_ms = delay_ms.saturating_add(self.me_warmup_step_delay.as_millis() as u64 + jitter);
|
||||||
|
tokio::time::sleep(Duration::from_millis(delay_ms)).await;
|
||||||
|
if let Err(e) = self.connect_one(addr, rng.as_ref()).await {
|
||||||
|
debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed (staggered)");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
for (dc, addrs) in dc_addrs.iter() {
|
for (dc, addrs) in dc_addrs.iter() {
|
||||||
for (ip, port) in addrs {
|
for (ip, port) in addrs {
|
||||||
if self.connection_count() >= pool_size {
|
if self.connection_count() >= pool_size {
|
||||||
@@ -292,6 +391,7 @@ impl MePool {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if !self.decision.effective_multipath && self.connection_count() > 0 {
|
if !self.decision.effective_multipath && self.connection_count() > 0 {
|
||||||
break;
|
break;
|
||||||
@@ -317,21 +417,61 @@ impl MePool {
|
|||||||
let cancel = CancellationToken::new();
|
let cancel = CancellationToken::new();
|
||||||
let degraded = Arc::new(AtomicBool::new(false));
|
let degraded = Arc::new(AtomicBool::new(false));
|
||||||
let draining = Arc::new(AtomicBool::new(false));
|
let draining = Arc::new(AtomicBool::new(false));
|
||||||
let rpc_w = Arc::new(Mutex::new(RpcWriter {
|
let (tx, mut rx) = mpsc::channel::<WriterCommand>(4096);
|
||||||
|
let tx_for_keepalive = tx.clone();
|
||||||
|
let keepalive_random = self.me_keepalive_payload_random;
|
||||||
|
let stats = self.stats.clone();
|
||||||
|
let mut rpc_writer = RpcWriter {
|
||||||
writer: hs.wr,
|
writer: hs.wr,
|
||||||
key: hs.write_key,
|
key: hs.write_key,
|
||||||
iv: hs.write_iv,
|
iv: hs.write_iv,
|
||||||
seq_no: 0,
|
seq_no: 0,
|
||||||
}));
|
};
|
||||||
|
let cancel_wr = cancel.clone();
|
||||||
|
tokio::spawn(async move {
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
cmd = rx.recv() => {
|
||||||
|
match cmd {
|
||||||
|
Some(WriterCommand::Data(payload)) => {
|
||||||
|
if rpc_writer.send(&payload).await.is_err() { break; }
|
||||||
|
}
|
||||||
|
Some(WriterCommand::DataAndFlush(payload)) => {
|
||||||
|
if rpc_writer.send_and_flush(&payload).await.is_err() { break; }
|
||||||
|
}
|
||||||
|
Some(WriterCommand::Keepalive) => {
|
||||||
|
let mut payload = [0u8; ME_KEEPALIVE_PAYLOAD_LEN];
|
||||||
|
if keepalive_random {
|
||||||
|
rand::rng().fill(&mut payload);
|
||||||
|
}
|
||||||
|
match rpc_writer.send_keepalive(payload).await {
|
||||||
|
Ok(()) => {
|
||||||
|
stats.increment_me_keepalive_sent();
|
||||||
|
}
|
||||||
|
Err(_) => {
|
||||||
|
stats.increment_me_keepalive_failed();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Some(WriterCommand::Close) | None => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
_ = cancel_wr.cancelled() => break,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
let writer = MeWriter {
|
let writer = MeWriter {
|
||||||
id: writer_id,
|
id: writer_id,
|
||||||
addr,
|
addr,
|
||||||
writer: rpc_w.clone(),
|
tx: tx.clone(),
|
||||||
cancel: cancel.clone(),
|
cancel: cancel.clone(),
|
||||||
degraded: degraded.clone(),
|
degraded: degraded.clone(),
|
||||||
draining: draining.clone(),
|
draining: draining.clone(),
|
||||||
};
|
};
|
||||||
self.writers.write().await.push(writer.clone());
|
self.writers.write().await.push(writer.clone());
|
||||||
|
self.conn_count.fetch_add(1, Ordering::Relaxed);
|
||||||
|
self.writer_available.notify_waiters();
|
||||||
|
|
||||||
let reg = self.registry.clone();
|
let reg = self.registry.clone();
|
||||||
let writers_arc = self.writers_arc();
|
let writers_arc = self.writers_arc();
|
||||||
@@ -339,11 +479,19 @@ impl MePool {
|
|||||||
let rtt_stats = self.rtt_stats.clone();
|
let rtt_stats = self.rtt_stats.clone();
|
||||||
let pool = Arc::downgrade(self);
|
let pool = Arc::downgrade(self);
|
||||||
let cancel_ping = cancel.clone();
|
let cancel_ping = cancel.clone();
|
||||||
let rpc_w_ping = rpc_w.clone();
|
let tx_ping = tx.clone();
|
||||||
let ping_tracker_ping = ping_tracker.clone();
|
let ping_tracker_ping = ping_tracker.clone();
|
||||||
|
let cleanup_done = Arc::new(AtomicBool::new(false));
|
||||||
|
let cleanup_for_reader = cleanup_done.clone();
|
||||||
|
let cleanup_for_ping = cleanup_done.clone();
|
||||||
|
let keepalive_enabled = self.me_keepalive_enabled;
|
||||||
|
let keepalive_interval = self.me_keepalive_interval;
|
||||||
|
let keepalive_jitter = self.me_keepalive_jitter;
|
||||||
|
let cancel_reader_token = cancel.clone();
|
||||||
|
let cancel_ping_token = cancel_ping.clone();
|
||||||
|
let cancel_keepalive_token = cancel.clone();
|
||||||
|
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
let cancel_reader = cancel.clone();
|
|
||||||
let res = reader_loop(
|
let res = reader_loop(
|
||||||
hs.rd,
|
hs.rd,
|
||||||
hs.read_key,
|
hs.read_key,
|
||||||
@@ -351,16 +499,21 @@ impl MePool {
|
|||||||
reg.clone(),
|
reg.clone(),
|
||||||
BytesMut::new(),
|
BytesMut::new(),
|
||||||
BytesMut::new(),
|
BytesMut::new(),
|
||||||
rpc_w.clone(),
|
tx.clone(),
|
||||||
ping_tracker.clone(),
|
ping_tracker.clone(),
|
||||||
rtt_stats.clone(),
|
rtt_stats.clone(),
|
||||||
writer_id,
|
writer_id,
|
||||||
degraded.clone(),
|
degraded.clone(),
|
||||||
cancel_reader.clone(),
|
cancel_reader_token.clone(),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
if let Some(pool) = pool.upgrade() {
|
if let Some(pool) = pool.upgrade() {
|
||||||
pool.remove_writer_and_close_clients(writer_id).await;
|
if cleanup_for_reader
|
||||||
|
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
|
||||||
|
.is_ok()
|
||||||
|
{
|
||||||
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if let Err(e) = res {
|
if let Err(e) = res {
|
||||||
warn!(error = %e, "ME reader ended");
|
warn!(error = %e, "ME reader ended");
|
||||||
@@ -378,7 +531,7 @@ impl MePool {
|
|||||||
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
|
.random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS);
|
||||||
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
|
let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64;
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
_ = cancel_ping.cancelled() => {
|
_ = cancel_ping_token.cancelled() => {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
_ = tokio::time::sleep(Duration::from_secs(wait)) => {}
|
_ = tokio::time::sleep(Duration::from_secs(wait)) => {}
|
||||||
@@ -389,20 +542,45 @@ impl MePool {
|
|||||||
p.extend_from_slice(&sent_id.to_le_bytes());
|
p.extend_from_slice(&sent_id.to_le_bytes());
|
||||||
{
|
{
|
||||||
let mut tracker = ping_tracker_ping.lock().await;
|
let mut tracker = ping_tracker_ping.lock().await;
|
||||||
|
tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120));
|
||||||
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
|
tracker.insert(sent_id, (std::time::Instant::now(), writer_id));
|
||||||
}
|
}
|
||||||
ping_id = ping_id.wrapping_add(1);
|
ping_id = ping_id.wrapping_add(1);
|
||||||
if let Err(e) = rpc_w_ping.lock().await.send_and_flush(&p).await {
|
if tx_ping.send(WriterCommand::DataAndFlush(p)).await.is_err() {
|
||||||
debug!(error = %e, "Active ME ping failed, removing dead writer");
|
debug!("Active ME ping failed, removing dead writer");
|
||||||
cancel_ping.cancel();
|
cancel_ping.cancel();
|
||||||
if let Some(pool) = pool_ping.upgrade() {
|
if let Some(pool) = pool_ping.upgrade() {
|
||||||
pool.remove_writer_and_close_clients(writer_id).await;
|
if cleanup_for_ping
|
||||||
|
.compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed)
|
||||||
|
.is_ok()
|
||||||
|
{
|
||||||
|
pool.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
|
if keepalive_enabled {
|
||||||
|
let tx_keepalive = tx_for_keepalive;
|
||||||
|
let cancel_keepalive = cancel_keepalive_token;
|
||||||
|
tokio::spawn(async move {
|
||||||
|
// Per-writer jittered start to avoid phase sync.
|
||||||
|
let initial_jitter_ms = rand::rng().random_range(0..=keepalive_jitter.as_millis().max(1) as u64);
|
||||||
|
tokio::time::sleep(Duration::from_millis(initial_jitter_ms)).await;
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
_ = cancel_keepalive.cancelled() => break,
|
||||||
|
_ = tokio::time::sleep(keepalive_interval + Duration::from_millis(rand::rng().random_range(0..=keepalive_jitter.as_millis() as u64))) => {}
|
||||||
|
}
|
||||||
|
if tx_keepalive.send(WriterCommand::Keepalive).await.is_err() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -430,7 +608,7 @@ impl MePool {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) async fn remove_writer_and_close_clients(&self, writer_id: u64) {
|
pub(crate) async fn remove_writer_and_close_clients(self: &Arc<Self>, writer_id: u64) {
|
||||||
let conns = self.remove_writer_only(writer_id).await;
|
let conns = self.remove_writer_only(writer_id).await;
|
||||||
for bound in conns {
|
for bound in conns {
|
||||||
let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await;
|
let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await;
|
||||||
@@ -444,8 +622,11 @@ impl MePool {
|
|||||||
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
|
if let Some(pos) = ws.iter().position(|w| w.id == writer_id) {
|
||||||
let w = ws.remove(pos);
|
let w = ws.remove(pos);
|
||||||
w.cancel.cancel();
|
w.cancel.cancel();
|
||||||
|
let _ = w.tx.send(WriterCommand::Close).await;
|
||||||
|
self.conn_count.fetch_sub(1, Ordering::Relaxed);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
self.rtt_stats.lock().await.remove(&writer_id);
|
||||||
self.registry.writer_lost(writer_id).await
|
self.registry.writer_lost(writer_id).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -459,8 +640,14 @@ impl MePool {
|
|||||||
|
|
||||||
let pool = Arc::downgrade(self);
|
let pool = Arc::downgrade(self);
|
||||||
tokio::spawn(async move {
|
tokio::spawn(async move {
|
||||||
|
let deadline = Instant::now() + Duration::from_secs(300);
|
||||||
loop {
|
loop {
|
||||||
if let Some(p) = pool.upgrade() {
|
if let Some(p) = pool.upgrade() {
|
||||||
|
if Instant::now() >= deadline {
|
||||||
|
warn!(writer_id, "Drain timeout, force-closing");
|
||||||
|
let _ = p.remove_writer_and_close_clients(writer_id).await;
|
||||||
|
break;
|
||||||
|
}
|
||||||
if p.registry.is_writer_empty(writer_id).await {
|
if p.registry.is_writer_empty(writer_id).await {
|
||||||
let _ = p.remove_writer_only(writer_id).await;
|
let _ = p.remove_writer_only(writer_id).await;
|
||||||
break;
|
break;
|
||||||
|
|||||||
@@ -98,16 +98,18 @@ impl MePool {
|
|||||||
family: IpFamily,
|
family: IpFamily,
|
||||||
) -> Option<std::net::SocketAddr> {
|
) -> Option<std::net::SocketAddr> {
|
||||||
const STUN_CACHE_TTL: Duration = Duration::from_secs(600);
|
const STUN_CACHE_TTL: Duration = Duration::from_secs(600);
|
||||||
// If STUN probing was disabled after attempts, reuse cached (even stale) or skip.
|
// Backoff window
|
||||||
if self.nat_probe_disabled.load(std::sync::atomic::Ordering::Relaxed) {
|
if let Some(until) = *self.stun_backoff_until.read().await {
|
||||||
if let Ok(cache) = self.nat_reflection_cache.try_lock() {
|
if Instant::now() < until {
|
||||||
let slot = match family {
|
if let Ok(cache) = self.nat_reflection_cache.try_lock() {
|
||||||
IpFamily::V4 => cache.v4,
|
let slot = match family {
|
||||||
IpFamily::V6 => cache.v6,
|
IpFamily::V4 => cache.v4,
|
||||||
};
|
IpFamily::V6 => cache.v6,
|
||||||
return slot.map(|(_, addr)| addr);
|
};
|
||||||
|
return slot.map(|(_, addr)| addr);
|
||||||
|
}
|
||||||
|
return None;
|
||||||
}
|
}
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
||||||
@@ -123,48 +125,42 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let attempt = self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
let attempt = self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||||
if attempt >= 2 {
|
let servers = if !self.nat_stun_servers.is_empty() {
|
||||||
self.nat_probe_disabled.store(true, std::sync::atomic::Ordering::Relaxed);
|
self.nat_stun_servers.clone()
|
||||||
return None;
|
} else if let Some(s) = &self.nat_stun {
|
||||||
}
|
vec![s.clone()]
|
||||||
|
} else {
|
||||||
|
vec!["stun.l.google.com:19302".to_string()]
|
||||||
|
};
|
||||||
|
|
||||||
let stun_addr = self
|
for stun_addr in servers {
|
||||||
.nat_stun
|
match stun_probe_dual(&stun_addr).await {
|
||||||
.clone()
|
Ok(res) => {
|
||||||
.unwrap_or_else(|| "stun.l.google.com:19302".to_string());
|
let picked: Option<StunProbeResult> = match family {
|
||||||
match stun_probe_dual(&stun_addr).await {
|
IpFamily::V4 => res.v4,
|
||||||
Ok(res) => {
|
IpFamily::V6 => res.v6,
|
||||||
let picked: Option<StunProbeResult> = match family {
|
};
|
||||||
IpFamily::V4 => res.v4,
|
if let Some(result) = picked {
|
||||||
IpFamily::V6 => res.v6,
|
info!(local = %result.local_addr, reflected = %result.reflected_addr, family = ?family, stun = %stun_addr, "NAT probe: reflected address");
|
||||||
};
|
self.nat_probe_attempts.store(0, std::sync::atomic::Ordering::Relaxed);
|
||||||
if let Some(result) = picked {
|
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
||||||
info!(local = %result.local_addr, reflected = %result.reflected_addr, family = ?family, "NAT probe: reflected address");
|
let slot = match family {
|
||||||
if let Ok(mut cache) = self.nat_reflection_cache.try_lock() {
|
IpFamily::V4 => &mut cache.v4,
|
||||||
let slot = match family {
|
IpFamily::V6 => &mut cache.v6,
|
||||||
IpFamily::V4 => &mut cache.v4,
|
};
|
||||||
IpFamily::V6 => &mut cache.v6,
|
*slot = Some((Instant::now(), result.reflected_addr));
|
||||||
};
|
}
|
||||||
*slot = Some((Instant::now(), result.reflected_addr));
|
return Some(result.reflected_addr);
|
||||||
}
|
}
|
||||||
Some(result.reflected_addr)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
Err(e) => {
|
||||||
Err(e) => {
|
warn!(error = %e, stun = %stun_addr, attempt = attempt + 1, "NAT probe failed, trying next server");
|
||||||
let attempts = attempt + 1;
|
|
||||||
if attempts <= 2 {
|
|
||||||
warn!(error = %e, attempt = attempts, "NAT probe failed");
|
|
||||||
} else {
|
|
||||||
debug!(error = %e, attempt = attempts, "NAT probe suppressed after max attempts");
|
|
||||||
}
|
}
|
||||||
if attempts >= 2 {
|
|
||||||
self.nat_probe_disabled.store(true, std::sync::atomic::Ordering::Relaxed);
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
let backoff = Duration::from_secs(60 * 2u64.pow((attempt as u32).min(6)));
|
||||||
|
*self.stun_backoff_until.write().await = Some(Instant::now() + backoff);
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::time::Instant;
|
|||||||
use bytes::{Bytes, BytesMut};
|
use bytes::{Bytes, BytesMut};
|
||||||
use tokio::io::AsyncReadExt;
|
use tokio::io::AsyncReadExt;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::Mutex;
|
use tokio::sync::{Mutex, mpsc};
|
||||||
use tokio_util::sync::CancellationToken;
|
use tokio_util::sync::CancellationToken;
|
||||||
use tracing::{debug, trace, warn};
|
use tracing::{debug, trace, warn};
|
||||||
|
|
||||||
@@ -14,7 +14,7 @@ use crate::crypto::{AesCbc, crc32};
|
|||||||
use crate::error::{ProxyError, Result};
|
use crate::error::{ProxyError, Result};
|
||||||
use crate::protocol::constants::*;
|
use crate::protocol::constants::*;
|
||||||
|
|
||||||
use super::codec::RpcWriter;
|
use super::codec::WriterCommand;
|
||||||
use super::{ConnRegistry, MeResponse};
|
use super::{ConnRegistry, MeResponse};
|
||||||
|
|
||||||
pub(crate) async fn reader_loop(
|
pub(crate) async fn reader_loop(
|
||||||
@@ -24,7 +24,7 @@ pub(crate) async fn reader_loop(
|
|||||||
reg: Arc<ConnRegistry>,
|
reg: Arc<ConnRegistry>,
|
||||||
enc_leftover: BytesMut,
|
enc_leftover: BytesMut,
|
||||||
mut dec: BytesMut,
|
mut dec: BytesMut,
|
||||||
writer: Arc<Mutex<RpcWriter>>,
|
tx: mpsc::Sender<WriterCommand>,
|
||||||
ping_tracker: Arc<Mutex<HashMap<i64, (Instant, u64)>>>,
|
ping_tracker: Arc<Mutex<HashMap<i64, (Instant, u64)>>>,
|
||||||
rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
rtt_stats: Arc<Mutex<HashMap<u64, (f64, f64)>>>,
|
||||||
_writer_id: u64,
|
_writer_id: u64,
|
||||||
@@ -33,6 +33,8 @@ pub(crate) async fn reader_loop(
|
|||||||
) -> Result<()> {
|
) -> Result<()> {
|
||||||
let mut raw = enc_leftover;
|
let mut raw = enc_leftover;
|
||||||
let mut expected_seq: i32 = 0;
|
let mut expected_seq: i32 = 0;
|
||||||
|
let mut crc_errors = 0u32;
|
||||||
|
let mut seq_mismatch = 0u32;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut tmp = [0u8; 16_384];
|
let mut tmp = [0u8; 16_384];
|
||||||
@@ -80,12 +82,20 @@ pub(crate) async fn reader_loop(
|
|||||||
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
let ec = u32::from_le_bytes(frame[pe..pe + 4].try_into().unwrap());
|
||||||
if crc32(&frame[..pe]) != ec {
|
if crc32(&frame[..pe]) != ec {
|
||||||
warn!("CRC mismatch in data frame");
|
warn!("CRC mismatch in data frame");
|
||||||
|
crc_errors += 1;
|
||||||
|
if crc_errors > 3 {
|
||||||
|
return Err(ProxyError::Proxy("Too many CRC mismatches".into()));
|
||||||
|
}
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
let seq_no = i32::from_le_bytes(frame[4..8].try_into().unwrap());
|
let seq_no = i32::from_le_bytes(frame[4..8].try_into().unwrap());
|
||||||
if seq_no != expected_seq {
|
if seq_no != expected_seq {
|
||||||
warn!(seq_no, expected = expected_seq, "ME RPC seq mismatch");
|
warn!(seq_no, expected = expected_seq, "ME RPC seq mismatch");
|
||||||
|
seq_mismatch += 1;
|
||||||
|
if seq_mismatch > 10 {
|
||||||
|
return Err(ProxyError::Proxy("Too many seq mismatches".into()));
|
||||||
|
}
|
||||||
expected_seq = seq_no.wrapping_add(1);
|
expected_seq = seq_no.wrapping_add(1);
|
||||||
} else {
|
} else {
|
||||||
expected_seq = expected_seq.wrapping_add(1);
|
expected_seq = expected_seq.wrapping_add(1);
|
||||||
@@ -108,7 +118,7 @@ pub(crate) async fn reader_loop(
|
|||||||
let routed = reg.route(cid, MeResponse::Data { flags, data }).await;
|
let routed = reg.route(cid, MeResponse::Data { flags, data }).await;
|
||||||
if !routed {
|
if !routed {
|
||||||
reg.unregister(cid).await;
|
reg.unregister(cid).await;
|
||||||
send_close_conn(&writer, cid).await;
|
send_close_conn(&tx, cid).await;
|
||||||
}
|
}
|
||||||
} else if pt == RPC_SIMPLE_ACK_U32 && body.len() >= 12 {
|
} else if pt == RPC_SIMPLE_ACK_U32 && body.len() >= 12 {
|
||||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||||
@@ -118,7 +128,7 @@ pub(crate) async fn reader_loop(
|
|||||||
let routed = reg.route(cid, MeResponse::Ack(cfm)).await;
|
let routed = reg.route(cid, MeResponse::Ack(cfm)).await;
|
||||||
if !routed {
|
if !routed {
|
||||||
reg.unregister(cid).await;
|
reg.unregister(cid).await;
|
||||||
send_close_conn(&writer, cid).await;
|
send_close_conn(&tx, cid).await;
|
||||||
}
|
}
|
||||||
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
|
} else if pt == RPC_CLOSE_EXT_U32 && body.len() >= 8 {
|
||||||
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
let cid = u64::from_le_bytes(body[0..8].try_into().unwrap());
|
||||||
@@ -136,8 +146,8 @@ pub(crate) async fn reader_loop(
|
|||||||
let mut pong = Vec::with_capacity(12);
|
let mut pong = Vec::with_capacity(12);
|
||||||
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
pong.extend_from_slice(&RPC_PONG_U32.to_le_bytes());
|
||||||
pong.extend_from_slice(&ping_id.to_le_bytes());
|
pong.extend_from_slice(&ping_id.to_le_bytes());
|
||||||
if let Err(e) = writer.lock().await.send_and_flush(&pong).await {
|
if tx.send(WriterCommand::DataAndFlush(pong)).await.is_err() {
|
||||||
warn!(error = %e, "PONG send failed");
|
warn!("PONG send failed");
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else if pt == RPC_PONG_U32 && body.len() >= 8 {
|
} else if pt == RPC_PONG_U32 && body.len() >= 8 {
|
||||||
@@ -171,12 +181,10 @@ pub(crate) async fn reader_loop(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn send_close_conn(writer: &Arc<Mutex<RpcWriter>>, conn_id: u64) {
|
async fn send_close_conn(tx: &mpsc::Sender<WriterCommand>, conn_id: u64) {
|
||||||
let mut p = Vec::with_capacity(12);
|
let mut p = Vec::with_capacity(12);
|
||||||
p.extend_from_slice(&RPC_CLOSE_CONN_U32.to_le_bytes());
|
p.extend_from_slice(&RPC_CLOSE_CONN_U32.to_le_bytes());
|
||||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
p.extend_from_slice(&conn_id.to_le_bytes());
|
||||||
|
|
||||||
if let Err(e) = writer.lock().await.send_and_flush(&p).await {
|
let _ = tx.send(WriterCommand::DataAndFlush(p)).await;
|
||||||
debug!(conn_id, error = %e, "Failed to send RPC_CLOSE_CONN");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -5,7 +5,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use tokio::sync::{mpsc, Mutex, RwLock};
|
use tokio::sync::{mpsc, Mutex, RwLock};
|
||||||
|
|
||||||
use super::codec::RpcWriter;
|
use super::codec::WriterCommand;
|
||||||
use super::MeResponse;
|
use super::MeResponse;
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
@@ -25,12 +25,12 @@ pub struct BoundConn {
|
|||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ConnWriter {
|
pub struct ConnWriter {
|
||||||
pub writer_id: u64,
|
pub writer_id: u64,
|
||||||
pub writer: Arc<Mutex<RpcWriter>>,
|
pub tx: mpsc::Sender<WriterCommand>,
|
||||||
}
|
}
|
||||||
|
|
||||||
struct RegistryInner {
|
struct RegistryInner {
|
||||||
map: HashMap<u64, mpsc::Sender<MeResponse>>,
|
map: HashMap<u64, mpsc::Sender<MeResponse>>,
|
||||||
writers: HashMap<u64, Arc<Mutex<RpcWriter>>>,
|
writers: HashMap<u64, mpsc::Sender<WriterCommand>>,
|
||||||
writer_for_conn: HashMap<u64, u64>,
|
writer_for_conn: HashMap<u64, u64>,
|
||||||
conns_for_writer: HashMap<u64, HashSet<u64>>,
|
conns_for_writer: HashMap<u64, HashSet<u64>>,
|
||||||
meta: HashMap<u64, ConnMeta>,
|
meta: HashMap<u64, ConnMeta>,
|
||||||
@@ -96,13 +96,13 @@ impl ConnRegistry {
|
|||||||
&self,
|
&self,
|
||||||
conn_id: u64,
|
conn_id: u64,
|
||||||
writer_id: u64,
|
writer_id: u64,
|
||||||
writer: Arc<Mutex<RpcWriter>>,
|
tx: mpsc::Sender<WriterCommand>,
|
||||||
meta: ConnMeta,
|
meta: ConnMeta,
|
||||||
) {
|
) {
|
||||||
let mut inner = self.inner.write().await;
|
let mut inner = self.inner.write().await;
|
||||||
inner.meta.entry(conn_id).or_insert(meta);
|
inner.meta.entry(conn_id).or_insert(meta);
|
||||||
inner.writer_for_conn.insert(conn_id, writer_id);
|
inner.writer_for_conn.insert(conn_id, writer_id);
|
||||||
inner.writers.entry(writer_id).or_insert_with(|| writer.clone());
|
inner.writers.entry(writer_id).or_insert_with(|| tx.clone());
|
||||||
inner
|
inner
|
||||||
.conns_for_writer
|
.conns_for_writer
|
||||||
.entry(writer_id)
|
.entry(writer_id)
|
||||||
@@ -114,7 +114,7 @@ impl ConnRegistry {
|
|||||||
let inner = self.inner.read().await;
|
let inner = self.inner.read().await;
|
||||||
let writer_id = inner.writer_for_conn.get(&conn_id).cloned()?;
|
let writer_id = inner.writer_for_conn.get(&conn_id).cloned()?;
|
||||||
let writer = inner.writers.get(&writer_id).cloned()?;
|
let writer = inner.writers.get(&writer_id).cloned()?;
|
||||||
Some(ConnWriter { writer_id, writer })
|
Some(ConnWriter { writer_id, tx: writer })
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn writer_lost(&self, writer_id: u64) -> Vec<BoundConn> {
|
pub async fn writer_lost(&self, writer_id: u64) -> Vec<BoundConn> {
|
||||||
|
|||||||
@@ -31,8 +31,17 @@ pub async fn me_rotation_task(pool: Arc<MePool>, rng: Arc<SecureRandom>, interva
|
|||||||
info!(addr = %w.addr, writer_id = w.id, "Rotating ME connection");
|
info!(addr = %w.addr, writer_id = w.id, "Rotating ME connection");
|
||||||
match pool.connect_one(w.addr, rng.as_ref()).await {
|
match pool.connect_one(w.addr, rng.as_ref()).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
// Mark old writer for graceful drain; removal happens when sessions finish.
|
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||||
pool.mark_writer_draining(w.id).await;
|
let ws = pool.writers.read().await;
|
||||||
|
let new_alive = ws.iter().any(|nw|
|
||||||
|
nw.id != w.id && nw.addr == w.addr && !nw.degraded.load(Ordering::Relaxed) && !nw.draining.load(Ordering::Relaxed)
|
||||||
|
);
|
||||||
|
drop(ws);
|
||||||
|
if new_alive {
|
||||||
|
pool.mark_writer_draining(w.id).await;
|
||||||
|
} else {
|
||||||
|
warn!(addr = %w.addr, writer_id = w.id, "New writer died, keeping old");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
warn!(addr = %w.addr, writer_id = w.id, error = %e, "ME rotation connect failed");
|
warn!(addr = %w.addr, writer_id = w.id, error = %e, "ME rotation connect failed");
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ use crate::network::IpFamily;
|
|||||||
use crate::protocol::constants::RPC_CLOSE_EXT_U32;
|
use crate::protocol::constants::RPC_CLOSE_EXT_U32;
|
||||||
|
|
||||||
use super::MePool;
|
use super::MePool;
|
||||||
|
use super::codec::WriterCommand;
|
||||||
use super::wire::build_proxy_req_payload;
|
use super::wire::build_proxy_req_payload;
|
||||||
use rand::seq::SliceRandom;
|
use rand::seq::SliceRandom;
|
||||||
use super::registry::ConnMeta;
|
use super::registry::ConnMeta;
|
||||||
@@ -43,18 +44,15 @@ impl MePool {
|
|||||||
loop {
|
loop {
|
||||||
if let Some(current) = self.registry.get_writer(conn_id).await {
|
if let Some(current) = self.registry.get_writer(conn_id).await {
|
||||||
let send_res = {
|
let send_res = {
|
||||||
if let Ok(mut guard) = current.writer.try_lock() {
|
current
|
||||||
let r = guard.send(&payload).await;
|
.tx
|
||||||
drop(guard);
|
.send(WriterCommand::Data(payload.clone()))
|
||||||
r
|
.await
|
||||||
} else {
|
|
||||||
current.writer.lock().await.send(&payload).await
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
match send_res {
|
match send_res {
|
||||||
Ok(()) => return Ok(()),
|
Ok(()) => return Ok(()),
|
||||||
Err(e) => {
|
Err(_) => {
|
||||||
warn!(error = %e, writer_id = current.writer_id, "ME write failed");
|
warn!(writer_id = current.writer_id, "ME writer channel closed");
|
||||||
self.remove_writer_and_close_clients(current.writer_id).await;
|
self.remove_writer_and_close_clients(current.writer_id).await;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
@@ -64,7 +62,26 @@ impl MePool {
|
|||||||
let mut writers_snapshot = {
|
let mut writers_snapshot = {
|
||||||
let ws = self.writers.read().await;
|
let ws = self.writers.read().await;
|
||||||
if ws.is_empty() {
|
if ws.is_empty() {
|
||||||
return Err(ProxyError::Proxy("All ME connections dead".into()));
|
drop(ws);
|
||||||
|
for family in self.family_order() {
|
||||||
|
let map = match family {
|
||||||
|
IpFamily::V4 => self.proxy_map_v4.read().await.clone(),
|
||||||
|
IpFamily::V6 => self.proxy_map_v6.read().await.clone(),
|
||||||
|
};
|
||||||
|
for (_dc, addrs) in map.iter() {
|
||||||
|
for (ip, port) in addrs {
|
||||||
|
let addr = SocketAddr::new(*ip, *port);
|
||||||
|
if self.connect_one(addr, self.rng.as_ref()).await.is_ok() {
|
||||||
|
self.writer_available.notify_waiters();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if tokio::time::timeout(Duration::from_secs(3), self.writer_available.notified()).await.is_err() {
|
||||||
|
return Err(ProxyError::Proxy("All ME connections dead (waited 3s)".into()));
|
||||||
|
}
|
||||||
|
continue;
|
||||||
}
|
}
|
||||||
ws.clone()
|
ws.clone()
|
||||||
};
|
};
|
||||||
@@ -96,9 +113,10 @@ impl MePool {
|
|||||||
writers_snapshot = ws2.clone();
|
writers_snapshot = ws2.clone();
|
||||||
drop(ws2);
|
drop(ws2);
|
||||||
candidate_indices = self.candidate_indices_for_dc(&writers_snapshot, target_dc).await;
|
candidate_indices = self.candidate_indices_for_dc(&writers_snapshot, target_dc).await;
|
||||||
break;
|
if !candidate_indices.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
drop(map_guard);
|
|
||||||
}
|
}
|
||||||
if candidate_indices.is_empty() {
|
if candidate_indices.is_empty() {
|
||||||
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
|
return Err(ProxyError::Proxy("No ME writers available for target DC".into()));
|
||||||
@@ -120,22 +138,15 @@ impl MePool {
|
|||||||
if w.draining.load(Ordering::Relaxed) {
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if let Ok(mut guard) = w.writer.try_lock() {
|
if w.tx.send(WriterCommand::Data(payload.clone())).await.is_ok() {
|
||||||
let send_res = guard.send(&payload).await;
|
self.registry
|
||||||
drop(guard);
|
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
||||||
match send_res {
|
.await;
|
||||||
Ok(()) => {
|
return Ok(());
|
||||||
self.registry
|
} else {
|
||||||
.bind_writer(conn_id, w.id, w.writer.clone(), meta.clone())
|
warn!(writer_id = w.id, "ME writer channel closed");
|
||||||
.await;
|
self.remove_writer_and_close_clients(w.id).await;
|
||||||
return Ok(());
|
continue;
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
warn!(error = %e, writer_id = w.id, "ME write failed");
|
|
||||||
self.remove_writer_and_close_clients(w.id).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -143,15 +154,15 @@ impl MePool {
|
|||||||
if w.draining.load(Ordering::Relaxed) {
|
if w.draining.load(Ordering::Relaxed) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
match w.writer.lock().await.send(&payload).await {
|
match w.tx.send(WriterCommand::Data(payload.clone())).await {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
self.registry
|
self.registry
|
||||||
.bind_writer(conn_id, w.id, w.writer.clone(), meta.clone())
|
.bind_writer(conn_id, w.id, w.tx.clone(), meta.clone())
|
||||||
.await;
|
.await;
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(_) => {
|
||||||
warn!(error = %e, writer_id = w.id, "ME write failed (blocking)");
|
warn!(writer_id = w.id, "ME writer channel closed (blocking)");
|
||||||
self.remove_writer_and_close_clients(w.id).await;
|
self.remove_writer_and_close_clients(w.id).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -163,8 +174,8 @@ impl MePool {
|
|||||||
let mut p = Vec::with_capacity(12);
|
let mut p = Vec::with_capacity(12);
|
||||||
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
p.extend_from_slice(&RPC_CLOSE_EXT_U32.to_le_bytes());
|
||||||
p.extend_from_slice(&conn_id.to_le_bytes());
|
p.extend_from_slice(&conn_id.to_le_bytes());
|
||||||
if let Err(e) = w.writer.lock().await.send_and_flush(&p).await {
|
if w.tx.send(WriterCommand::DataAndFlush(p)).await.is_err() {
|
||||||
debug!(error = %e, "ME close write failed");
|
debug!("ME close write failed");
|
||||||
self.remove_writer_and_close_clients(w.writer_id).await;
|
self.remove_writer_and_close_clients(w.writer_id).await;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@@ -176,7 +187,7 @@ impl MePool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
pub fn connection_count(&self) -> usize {
|
pub fn connection_count(&self) -> usize {
|
||||||
self.writers.try_read().map(|w| w.len()).unwrap_or(0)
|
self.conn_count.load(Ordering::Relaxed)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) async fn candidate_indices_for_dc(
|
pub(super) async fn candidate_indices_for_dc(
|
||||||
|
|||||||
@@ -122,6 +122,38 @@ pub fn get_local_addr(stream: &TcpStream) -> Option<SocketAddr> {
|
|||||||
stream.local_addr().ok()
|
stream.local_addr().ok()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Resolve primary IP address of a network interface by name.
|
||||||
|
/// Returns the first address matching the requested family (IPv4/IPv6).
|
||||||
|
#[cfg(unix)]
|
||||||
|
pub fn resolve_interface_ip(name: &str, want_ipv6: bool) -> Option<IpAddr> {
|
||||||
|
use nix::ifaddrs::getifaddrs;
|
||||||
|
|
||||||
|
if let Ok(addrs) = getifaddrs() {
|
||||||
|
for iface in addrs {
|
||||||
|
if iface.interface_name == name {
|
||||||
|
if let Some(address) = iface.address {
|
||||||
|
if let Some(v4) = address.as_sockaddr_in() {
|
||||||
|
if !want_ipv6 {
|
||||||
|
return Some(IpAddr::V4(v4.ip()));
|
||||||
|
}
|
||||||
|
} else if let Some(v6) = address.as_sockaddr_in6() {
|
||||||
|
if want_ipv6 {
|
||||||
|
return Some(IpAddr::V6(v6.ip().clone()));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Stub for non-Unix platforms: interface name resolution unsupported.
|
||||||
|
#[cfg(not(unix))]
|
||||||
|
pub fn resolve_interface_ip(_name: &str, _want_ipv6: bool) -> Option<IpAddr> {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Get peer address of a socket
|
/// Get peer address of a socket
|
||||||
pub fn get_peer_addr(stream: &TcpStream) -> Option<SocketAddr> {
|
pub fn get_peer_addr(stream: &TcpStream) -> Option<SocketAddr> {
|
||||||
stream.peer_addr().ok()
|
stream.peer_addr().ok()
|
||||||
|
|||||||
@@ -5,6 +5,7 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::net::{SocketAddr, IpAddr};
|
use std::net::{SocketAddr, IpAddr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||||
use std::time::Duration;
|
use std::time::Duration;
|
||||||
use tokio::net::TcpStream;
|
use tokio::net::TcpStream;
|
||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
@@ -15,7 +16,7 @@ use tracing::{debug, warn, info, trace};
|
|||||||
use crate::config::{UpstreamConfig, UpstreamType};
|
use crate::config::{UpstreamConfig, UpstreamType};
|
||||||
use crate::error::{Result, ProxyError};
|
use crate::error::{Result, ProxyError};
|
||||||
use crate::protocol::constants::{TG_DATACENTERS_V4, TG_DATACENTERS_V6, TG_DATACENTER_PORT};
|
use crate::protocol::constants::{TG_DATACENTERS_V4, TG_DATACENTERS_V6, TG_DATACENTER_PORT};
|
||||||
use crate::transport::socket::create_outgoing_socket_bound;
|
use crate::transport::socket::{create_outgoing_socket_bound, resolve_interface_ip};
|
||||||
use crate::transport::socks::{connect_socks4, connect_socks5};
|
use crate::transport::socks::{connect_socks4, connect_socks5};
|
||||||
|
|
||||||
/// Number of Telegram datacenters
|
/// Number of Telegram datacenters
|
||||||
@@ -84,6 +85,8 @@ struct UpstreamState {
|
|||||||
dc_latency: [LatencyEma; NUM_DCS],
|
dc_latency: [LatencyEma; NUM_DCS],
|
||||||
/// Per-DC IP version preference (learned from connectivity tests)
|
/// Per-DC IP version preference (learned from connectivity tests)
|
||||||
dc_ip_pref: [IpPreference; NUM_DCS],
|
dc_ip_pref: [IpPreference; NUM_DCS],
|
||||||
|
/// Round-robin counter for bind_addresses selection
|
||||||
|
bind_rr: Arc<AtomicUsize>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl UpstreamState {
|
impl UpstreamState {
|
||||||
@@ -95,6 +98,7 @@ impl UpstreamState {
|
|||||||
last_check: std::time::Instant::now(),
|
last_check: std::time::Instant::now(),
|
||||||
dc_latency: [LatencyEma::new(0.3); NUM_DCS],
|
dc_latency: [LatencyEma::new(0.3); NUM_DCS],
|
||||||
dc_ip_pref: [IpPreference::Unknown; NUM_DCS],
|
dc_ip_pref: [IpPreference::Unknown; NUM_DCS],
|
||||||
|
bind_rr: Arc::new(AtomicUsize::new(0)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,21 +170,85 @@ impl UpstreamManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn resolve_bind_address(
|
||||||
|
interface: &Option<String>,
|
||||||
|
bind_addresses: &Option<Vec<String>>,
|
||||||
|
target: SocketAddr,
|
||||||
|
rr: Option<&AtomicUsize>,
|
||||||
|
) -> Option<IpAddr> {
|
||||||
|
let want_ipv6 = target.is_ipv6();
|
||||||
|
|
||||||
|
if let Some(addrs) = bind_addresses {
|
||||||
|
let candidates: Vec<IpAddr> = addrs
|
||||||
|
.iter()
|
||||||
|
.filter_map(|s| s.parse::<IpAddr>().ok())
|
||||||
|
.filter(|ip| ip.is_ipv6() == want_ipv6)
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if !candidates.is_empty() {
|
||||||
|
if let Some(counter) = rr {
|
||||||
|
let idx = counter.fetch_add(1, Ordering::Relaxed) % candidates.len();
|
||||||
|
return Some(candidates[idx]);
|
||||||
|
}
|
||||||
|
return candidates.first().copied();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(iface) = interface {
|
||||||
|
if let Ok(ip) = iface.parse::<IpAddr>() {
|
||||||
|
if ip.is_ipv6() == want_ipv6 {
|
||||||
|
return Some(ip);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
#[cfg(unix)]
|
||||||
|
if let Some(ip) = resolve_interface_ip(iface, want_ipv6) {
|
||||||
|
return Some(ip);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
/// Select upstream using latency-weighted random selection.
|
/// Select upstream using latency-weighted random selection.
|
||||||
async fn select_upstream(&self, dc_idx: Option<i16>) -> Option<usize> {
|
async fn select_upstream(&self, dc_idx: Option<i16>, scope: Option<&str>) -> Option<usize> {
|
||||||
let upstreams = self.upstreams.read().await;
|
let upstreams = self.upstreams.read().await;
|
||||||
if upstreams.is_empty() {
|
if upstreams.is_empty() {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
// Scope filter:
|
||||||
let healthy: Vec<usize> = upstreams.iter()
|
// If scope is set: only scoped and matched items
|
||||||
|
// If scope is not set: only unscoped items
|
||||||
|
let filtered_upstreams : Vec<usize> = upstreams.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.filter(|(_, u)| u.healthy)
|
.filter(|(_, u)| {
|
||||||
|
scope.map_or(
|
||||||
|
u.config.scopes.is_empty(),
|
||||||
|
|req_scope| {
|
||||||
|
u.config.scopes
|
||||||
|
.split(',')
|
||||||
|
.map(str::trim)
|
||||||
|
.any(|s| s == req_scope)
|
||||||
|
}
|
||||||
|
)
|
||||||
|
})
|
||||||
.map(|(i, _)| i)
|
.map(|(i, _)| i)
|
||||||
.collect();
|
.collect();
|
||||||
|
|
||||||
|
// Healthy filter
|
||||||
|
let healthy: Vec<usize> = filtered_upstreams.iter()
|
||||||
|
.filter(|&&i| upstreams[i].healthy)
|
||||||
|
.copied()
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
if filtered_upstreams.is_empty() {
|
||||||
|
warn!(scope = scope, "No upstreams available! Using first (direct?)");
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
if healthy.is_empty() {
|
if healthy.is_empty() {
|
||||||
return Some(rand::rng().gen_range(0..upstreams.len()));
|
warn!(scope = scope, "No healthy upstreams available! Using random.");
|
||||||
|
return Some(filtered_upstreams[rand::rng().gen_range(0..filtered_upstreams.len())]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if healthy.len() == 1 {
|
if healthy.len() == 1 {
|
||||||
@@ -222,18 +290,28 @@ impl UpstreamManager {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Connect to target through a selected upstream.
|
/// Connect to target through a selected upstream.
|
||||||
pub async fn connect(&self, target: SocketAddr, dc_idx: Option<i16>) -> Result<TcpStream> {
|
pub async fn connect(&self, target: SocketAddr, dc_idx: Option<i16>, scope: Option<&str>) -> Result<TcpStream> {
|
||||||
let idx = self.select_upstream(dc_idx).await
|
let idx = self.select_upstream(dc_idx, scope).await
|
||||||
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
|
.ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?;
|
||||||
|
|
||||||
let upstream = {
|
let mut upstream = {
|
||||||
let guard = self.upstreams.read().await;
|
let guard = self.upstreams.read().await;
|
||||||
guard[idx].config.clone()
|
guard[idx].config.clone()
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Set scope for configuration copy
|
||||||
|
if let Some(s) = scope {
|
||||||
|
upstream.selected_scope = s.to_string();
|
||||||
|
}
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
|
|
||||||
match self.connect_via_upstream(&upstream, target).await {
|
let bind_rr = {
|
||||||
|
let guard = self.upstreams.read().await;
|
||||||
|
guard.get(idx).map(|u| u.bind_rr.clone())
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.connect_via_upstream(&upstream, target, bind_rr).await {
|
||||||
Ok(stream) => {
|
Ok(stream) => {
|
||||||
let rtt_ms = start.elapsed().as_secs_f64() * 1000.0;
|
let rtt_ms = start.elapsed().as_secs_f64() * 1000.0;
|
||||||
let mut guard = self.upstreams.write().await;
|
let mut guard = self.upstreams.write().await;
|
||||||
@@ -265,13 +343,27 @@ impl UpstreamManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn connect_via_upstream(&self, config: &UpstreamConfig, target: SocketAddr) -> Result<TcpStream> {
|
async fn connect_via_upstream(
|
||||||
|
&self,
|
||||||
|
config: &UpstreamConfig,
|
||||||
|
target: SocketAddr,
|
||||||
|
bind_rr: Option<Arc<AtomicUsize>>,
|
||||||
|
) -> Result<TcpStream> {
|
||||||
match &config.upstream_type {
|
match &config.upstream_type {
|
||||||
UpstreamType::Direct { interface } => {
|
UpstreamType::Direct { interface, bind_addresses } => {
|
||||||
let bind_ip = interface.as_ref()
|
let bind_ip = Self::resolve_bind_address(
|
||||||
.and_then(|s| s.parse::<IpAddr>().ok());
|
interface,
|
||||||
|
bind_addresses,
|
||||||
|
target,
|
||||||
|
bind_rr.as_deref(),
|
||||||
|
);
|
||||||
|
|
||||||
let socket = create_outgoing_socket_bound(target, bind_ip)?;
|
let socket = create_outgoing_socket_bound(target, bind_ip)?;
|
||||||
|
if let Some(ip) = bind_ip {
|
||||||
|
debug!(bind = %ip, target = %target, "Bound outgoing socket");
|
||||||
|
} else if interface.is_some() || bind_addresses.is_some() {
|
||||||
|
debug!(target = %target, "No matching bind address for target family");
|
||||||
|
}
|
||||||
|
|
||||||
socket.set_nonblocking(true)?;
|
socket.set_nonblocking(true)?;
|
||||||
match socket.connect(&target.into()) {
|
match socket.connect(&target.into()) {
|
||||||
@@ -294,8 +386,12 @@ impl UpstreamManager {
|
|||||||
let proxy_addr: SocketAddr = address.parse()
|
let proxy_addr: SocketAddr = address.parse()
|
||||||
.map_err(|_| ProxyError::Config("Invalid SOCKS4 address".to_string()))?;
|
.map_err(|_| ProxyError::Config("Invalid SOCKS4 address".to_string()))?;
|
||||||
|
|
||||||
let bind_ip = interface.as_ref()
|
let bind_ip = Self::resolve_bind_address(
|
||||||
.and_then(|s| s.parse::<IpAddr>().ok());
|
interface,
|
||||||
|
&None,
|
||||||
|
proxy_addr,
|
||||||
|
bind_rr.as_deref(),
|
||||||
|
);
|
||||||
|
|
||||||
let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?;
|
let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?;
|
||||||
|
|
||||||
@@ -313,16 +409,24 @@ impl UpstreamManager {
|
|||||||
if let Some(e) = stream.take_error()? {
|
if let Some(e) = stream.take_error()? {
|
||||||
return Err(ProxyError::Io(e));
|
return Err(ProxyError::Io(e));
|
||||||
}
|
}
|
||||||
|
// replace socks user_id with config.selected_scope, if set
|
||||||
|
let scope: Option<&str> = Some(config.selected_scope.as_str())
|
||||||
|
.filter(|s| !s.is_empty());
|
||||||
|
let _user_id: Option<&str> = scope.or(user_id.as_deref());
|
||||||
|
|
||||||
connect_socks4(&mut stream, target, user_id.as_deref()).await?;
|
connect_socks4(&mut stream, target, _user_id).await?;
|
||||||
Ok(stream)
|
Ok(stream)
|
||||||
},
|
},
|
||||||
UpstreamType::Socks5 { address, interface, username, password } => {
|
UpstreamType::Socks5 { address, interface, username, password } => {
|
||||||
let proxy_addr: SocketAddr = address.parse()
|
let proxy_addr: SocketAddr = address.parse()
|
||||||
.map_err(|_| ProxyError::Config("Invalid SOCKS5 address".to_string()))?;
|
.map_err(|_| ProxyError::Config("Invalid SOCKS5 address".to_string()))?;
|
||||||
|
|
||||||
let bind_ip = interface.as_ref()
|
let bind_ip = Self::resolve_bind_address(
|
||||||
.and_then(|s| s.parse::<IpAddr>().ok());
|
interface,
|
||||||
|
&None,
|
||||||
|
proxy_addr,
|
||||||
|
bind_rr.as_deref(),
|
||||||
|
);
|
||||||
|
|
||||||
let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?;
|
let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?;
|
||||||
|
|
||||||
@@ -341,7 +445,14 @@ impl UpstreamManager {
|
|||||||
return Err(ProxyError::Io(e));
|
return Err(ProxyError::Io(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
connect_socks5(&mut stream, target, username.as_deref(), password.as_deref()).await?;
|
debug!(config = ?config, "Socks5 connection");
|
||||||
|
// replace socks user:pass with config.selected_scope, if set
|
||||||
|
let scope: Option<&str> = Some(config.selected_scope.as_str())
|
||||||
|
.filter(|s| !s.is_empty());
|
||||||
|
let _username: Option<&str> = scope.or(username.as_deref());
|
||||||
|
let _password: Option<&str> = scope.or(password.as_deref());
|
||||||
|
|
||||||
|
connect_socks5(&mut stream, target, _username, _password).await?;
|
||||||
Ok(stream)
|
Ok(stream)
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
@@ -358,18 +469,18 @@ impl UpstreamManager {
|
|||||||
ipv4_enabled: bool,
|
ipv4_enabled: bool,
|
||||||
ipv6_enabled: bool,
|
ipv6_enabled: bool,
|
||||||
) -> Vec<StartupPingResult> {
|
) -> Vec<StartupPingResult> {
|
||||||
let upstreams: Vec<(usize, UpstreamConfig)> = {
|
let upstreams: Vec<(usize, UpstreamConfig, Arc<AtomicUsize>)> = {
|
||||||
let guard = self.upstreams.read().await;
|
let guard = self.upstreams.read().await;
|
||||||
guard.iter().enumerate()
|
guard.iter().enumerate()
|
||||||
.map(|(i, u)| (i, u.config.clone()))
|
.map(|(i, u)| (i, u.config.clone(), u.bind_rr.clone()))
|
||||||
.collect()
|
.collect()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut all_results = Vec::new();
|
let mut all_results = Vec::new();
|
||||||
|
|
||||||
for (upstream_idx, upstream_config) in &upstreams {
|
for (upstream_idx, upstream_config, bind_rr) in &upstreams {
|
||||||
let upstream_name = match &upstream_config.upstream_type {
|
let upstream_name = match &upstream_config.upstream_type {
|
||||||
UpstreamType::Direct { interface } => {
|
UpstreamType::Direct { interface, .. } => {
|
||||||
format!("direct{}", interface.as_ref().map(|i| format!(" ({})", i)).unwrap_or_default())
|
format!("direct{}", interface.as_ref().map(|i| format!(" ({})", i)).unwrap_or_default())
|
||||||
}
|
}
|
||||||
UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address),
|
UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address),
|
||||||
@@ -384,7 +495,7 @@ impl UpstreamManager {
|
|||||||
|
|
||||||
let result = tokio::time::timeout(
|
let result = tokio::time::timeout(
|
||||||
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
self.ping_single_dc(&upstream_config, addr_v6)
|
self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr_v6)
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
let ping_result = match result {
|
let ping_result = match result {
|
||||||
@@ -435,7 +546,7 @@ impl UpstreamManager {
|
|||||||
|
|
||||||
let result = tokio::time::timeout(
|
let result = tokio::time::timeout(
|
||||||
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
self.ping_single_dc(&upstream_config, addr_v4)
|
self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr_v4)
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
let ping_result = match result {
|
let ping_result = match result {
|
||||||
@@ -498,7 +609,7 @@ impl UpstreamManager {
|
|||||||
}
|
}
|
||||||
let result = tokio::time::timeout(
|
let result = tokio::time::timeout(
|
||||||
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
Duration::from_secs(DC_PING_TIMEOUT_SECS),
|
||||||
self.ping_single_dc(&upstream_config, addr)
|
self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr)
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
let ping_result = match result {
|
let ping_result = match result {
|
||||||
@@ -567,9 +678,14 @@ impl UpstreamManager {
|
|||||||
all_results
|
all_results
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn ping_single_dc(&self, config: &UpstreamConfig, target: SocketAddr) -> Result<f64> {
|
async fn ping_single_dc(
|
||||||
|
&self,
|
||||||
|
config: &UpstreamConfig,
|
||||||
|
bind_rr: Option<Arc<AtomicUsize>>,
|
||||||
|
target: SocketAddr,
|
||||||
|
) -> Result<f64> {
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let _stream = self.connect_via_upstream(config, target).await?;
|
let _stream = self.connect_via_upstream(config, target, bind_rr).await?;
|
||||||
Ok(start.elapsed().as_secs_f64() * 1000.0)
|
Ok(start.elapsed().as_secs_f64() * 1000.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -609,15 +725,16 @@ impl UpstreamManager {
|
|||||||
let count = self.upstreams.read().await.len();
|
let count = self.upstreams.read().await.len();
|
||||||
|
|
||||||
for i in 0..count {
|
for i in 0..count {
|
||||||
let config = {
|
let (config, bind_rr) = {
|
||||||
let guard = self.upstreams.read().await;
|
let guard = self.upstreams.read().await;
|
||||||
guard[i].config.clone()
|
let u = &guard[i];
|
||||||
|
(u.config.clone(), u.bind_rr.clone())
|
||||||
};
|
};
|
||||||
|
|
||||||
let start = Instant::now();
|
let start = Instant::now();
|
||||||
let result = tokio::time::timeout(
|
let result = tokio::time::timeout(
|
||||||
Duration::from_secs(10),
|
Duration::from_secs(10),
|
||||||
self.connect_via_upstream(&config, dc_addr)
|
self.connect_via_upstream(&config, dc_addr, Some(bind_rr.clone()))
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
match result {
|
match result {
|
||||||
@@ -646,7 +763,7 @@ impl UpstreamManager {
|
|||||||
let start2 = Instant::now();
|
let start2 = Instant::now();
|
||||||
let result2 = tokio::time::timeout(
|
let result2 = tokio::time::timeout(
|
||||||
Duration::from_secs(10),
|
Duration::from_secs(10),
|
||||||
self.connect_via_upstream(&config, fallback_addr)
|
self.connect_via_upstream(&config, fallback_addr, Some(bind_rr.clone()))
|
||||||
).await;
|
).await;
|
||||||
|
|
||||||
let mut guard = self.upstreams.write().await;
|
let mut guard = self.upstreams.write().await;
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ Type=simple
|
|||||||
WorkingDirectory=/bin
|
WorkingDirectory=/bin
|
||||||
ExecStart=/bin/telemt /etc/telemt.toml
|
ExecStart=/bin/telemt /etc/telemt.toml
|
||||||
Restart=on-failure
|
Restart=on-failure
|
||||||
|
LimitNOFILE=65536
|
||||||
|
|
||||||
[Install]
|
[Install]
|
||||||
WantedBy=multi-user.target
|
WantedBy=multi-user.target
|
||||||
|
|||||||
Reference in New Issue
Block a user