diff --git a/.cargo/deny.toml b/.cargo/deny.toml
new file mode 100644
index 0000000..09a5dd9
--- /dev/null
+++ b/.cargo/deny.toml
@@ -0,0 +1,15 @@
+[bans]
+multiple-versions = "deny"
+wildcards = "allow"
+highlight = "all"
+
+# Explicitly flag the weak cryptography so the agent is forced to justify its existence
+[[bans.skip]]
+name = "md-5"
+version = "*"
+reason = "MUST VERIFY: Only allowed for legacy checksums, never for security."
+
+[[bans.skip]]
+name = "sha1"
+version = "*"
+reason = "MUST VERIFY: Only allowed for backwards compatibility."
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index effe3ea..799f2ce 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -45,6 +45,18 @@ jobs:
- name: Run tests
run: cargo test --verbose
+ - name: Stress quota-lock suites (PR only)
+ if: github.event_name == 'pull_request'
+ env:
+ RUST_TEST_THREADS: 16
+ run: |
+ set -euo pipefail
+ for i in $(seq 1 12); do
+ echo "[quota-lock-stress] iteration ${i}/12"
+ cargo test quota_lock_ --bin telemt -- --nocapture --test-threads 16
+ cargo test relay_quota_wake --bin telemt -- --nocapture --test-threads 16
+ done
+
# clippy dont fail on warnings because of active development of telemt
# and many warnings
- name: Run clippy
diff --git a/.gitignore b/.gitignore
index 3a45e41..bc782ca 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,4 @@ target
#.idea/
proxy-secret
+coverage-html/
\ No newline at end of file
diff --git a/AGENTS.md b/AGENTS.md
index e6c5f2e..c17cc76 100644
--- a/AGENTS.md
+++ b/AGENTS.md
@@ -5,6 +5,22 @@ Your responses are precise, minimal, and architecturally sound. You are working
---
+### Context: The Telemt Project
+
+You are working on **Telemt**, a high-performance, production-grade Telegram MTProxy implementation written in Rust. It is explicitly designed to operate in highly hostile network environments and evade advanced network censorship.
+
+**Adversarial Threat Model:**
+The proxy operates under constant surveillance by DPI (Deep Packet Inspection) systems and active scanners (state firewalls, mobile operator fraud controls). These entities actively probe IPs, analyze protocol handshakes, and look for known proxy signatures to block or throttle traffic.
+
+**Core Architectural Pillars:**
+1. **TLS-Fronting (TLS-F) & TCP-Splitting (TCP-S):** To the outside world, Telemt looks like a standard TLS server. If a client presents a valid MTProxy key, the connection is handled internally. If a censor's scanner, web browser, or unauthorized crawler connects, Telemt seamlessly splices the TCP connection (L4) to a real, legitimate HTTPS fallback server (e.g., Nginx) without modifying the `ClientHello` or terminating the TLS handshake.
+2. **Middle-End (ME) Orchestration:** A highly concurrent, generation-based pool managing upstream connections to Telegram Datacenters (DCs). It utilizes an **Adaptive Floor** (dynamically scaling writer connections based on traffic), **Hardswaps** (zero-downtime pool reconfiguration), and **STUN/NAT** reflection mechanisms.
+3. **Strict KDF Routing:** Cryptographic Key Derivation Functions (KDF) in this protocol strictly rely on the exact pairing of Source IP/Port and Destination IP/Port. Deviations or missing port logic will silently break the MTProto handshake.
+4. **Data Plane vs. Control Plane Isolation:** The Data Plane (readers, writers, payload relay, TCP splicing) must remain strictly non-blocking, zero-allocation in hot paths, and highly resilient to network backpressure. The Control Plane (API, metrics, pool generation swaps, config reloads) orchestrates the state asynchronously without stalling the Data Plane.
+
+Any modification you make must preserve Telemt's invisibility to censors, its strict memory-safety invariants, and its hot-path throughput.
+
+
### 0. Priority Resolution — Scope Control
This section resolves conflicts between code quality enforcement and scope limitation.
@@ -374,6 +390,12 @@ you MUST explain why existing invariants remain valid.
- Do not modify existing tests unless the task explicitly requires it.
- Do not weaken assertions.
- Preserve determinism in testable components.
+- Bug-first forces the discipline of proving you understand a bug before you fix it. Tests written after a fix almost always pass trivially and catch nothing new.
+- Invariants over scenarios is the core shift. The route_mode table alone would have caught both BUG-1 and BUG-2 before they were written — "snapshot equals watch state after any transition burst" is a two-line property test that fails immediately on the current diverged-atomics code.
+- Differential/model catches logic drift over time.
+- Scheduler pressure is specifically aimed at the concurrent state bugs that keep reappearing. A single-threaded happy-path test of set_mode will never find subtle bugs; 10,000 concurrent calls will find it on the first run.
+- Mutation gate answers your original complaint directly. It measures test power. If you can remove a bounds check and nothing breaks, the suite isn't covering that branch yet — it just says so explicitly.
+- Dead parameter is a code smell rule.
### 15. Security Constraints
diff --git a/Cargo.lock b/Cargo.lock
index 787e357..8159a22 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -20,7 +20,7 @@ checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0"
dependencies = [
"cfg-if",
"cipher",
- "cpufeatures",
+ "cpufeatures 0.2.17",
]
[[package]]
@@ -46,6 +46,15 @@ dependencies = [
"memchr",
]
+[[package]]
+name = "alloca"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e5a7d05ea6aea7e9e64d25b9156ba2fee3fdd659e34e41063cd2fc7cd020d7f4"
+dependencies = [
+ "cc",
+]
+
[[package]]
name = "allocator-api2"
version = "0.2.21"
@@ -81,9 +90,9 @@ checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c"
[[package]]
name = "arc-swap"
-version = "1.8.2"
+version = "1.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5"
+checksum = "a07d1f37ff60921c83bdfc7407723bdefe89b44b98a9b772f225c8f9d67141a6"
dependencies = [
"rustversion",
]
@@ -102,9 +111,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50"
[[package]]
name = "asn1-rs"
-version = "0.5.2"
+version = "0.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0"
+checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60"
dependencies = [
"asn1-rs-derive",
"asn1-rs-impl",
@@ -112,31 +121,31 @@ dependencies = [
"nom",
"num-traits",
"rusticata-macros",
- "thiserror 1.0.69",
+ "thiserror 2.0.18",
"time",
]
[[package]]
name = "asn1-rs-derive"
-version = "0.4.0"
+version = "0.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c"
+checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
- "synstructure 0.12.6",
+ "syn",
+ "synstructure",
]
[[package]]
name = "asn1-rs-impl"
-version = "0.1.0"
+version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed"
+checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7"
dependencies = [
"proc-macro2",
"quote",
- "syn 1.0.109",
+ "syn",
]
[[package]]
@@ -147,7 +156,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -162,6 +171,28 @@ version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
+[[package]]
+name = "aws-lc-rs"
+version = "1.16.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a054912289d18629dc78375ba2c3726a3afe3ff71b4edba9dedfca0e3446d1fc"
+dependencies = [
+ "aws-lc-sys",
+ "zeroize",
+]
+
+[[package]]
+name = "aws-lc-sys"
+version = "0.39.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1fa7e52a4c5c547c741610a2c6f123f3881e409b714cd27e6798ef020c514f0a"
+dependencies = [
+ "cc",
+ "cmake",
+ "dunce",
+ "fs_extra",
+]
+
[[package]]
name = "base64"
version = "0.22.1"
@@ -212,7 +243,7 @@ dependencies = [
"cc",
"cfg-if",
"constant_time_eq",
- "cpufeatures",
+ "cpufeatures 0.2.17",
]
[[package]]
@@ -273,21 +304,23 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a0dd1ca384932ff3641c8718a02769f1698e7563dc6974ffd03346116310423"
dependencies = [
"find-msvc-tools",
+ "jobserver",
+ "libc",
"shlex",
]
+[[package]]
+name = "cesu8"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6d43a04d8753f35258c91f8ec639f792891f748a1edbd759cf1dcea3382ad83c"
+
[[package]]
name = "cfg-if"
version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
-[[package]]
-name = "cfg_aliases"
-version = "0.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e"
-
[[package]]
name = "cfg_aliases"
version = "0.2.1"
@@ -302,7 +335,18 @@ checksum = "c3613f74bd2eac03dad61bd53dbe620703d4371614fe0bc3b9f04dd36fe4e818"
dependencies = [
"cfg-if",
"cipher",
- "cpufeatures",
+ "cpufeatures 0.2.17",
+]
+
+[[package]]
+name = "chacha20"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6f8d983286843e49675a4b7a2d174efe136dc93a18d69130dd18198a6c167601"
+dependencies = [
+ "cfg-if",
+ "cpufeatures 0.3.0",
+ "rand_core 0.10.0",
]
[[package]]
@@ -312,7 +356,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10cd79432192d1c0f4e1a0fef9527696cc039165d729fb41b3f4f4f354c2dc35"
dependencies = [
"aead",
- "chacha20",
+ "chacha20 0.9.1",
"cipher",
"poly1305",
"zeroize",
@@ -395,6 +439,25 @@ version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9"
+[[package]]
+name = "cmake"
+version = "0.1.57"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d"
+dependencies = [
+ "cc",
+]
+
+[[package]]
+name = "combine"
+version = "4.6.7"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ba5a308b75df32fe02788e748662718f03fde005016435c444eea572398219fd"
+dependencies = [
+ "bytes",
+ "memchr",
+]
+
[[package]]
name = "const-oid"
version = "0.9.6"
@@ -407,6 +470,16 @@ version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b"
+[[package]]
+name = "core-foundation"
+version = "0.10.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
+]
+
[[package]]
name = "core-foundation-sys"
version = "0.8.7"
@@ -422,6 +495,15 @@ dependencies = [
"libc",
]
+[[package]]
+name = "cpufeatures"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8b2a41393f66f16b0823bb79094d54ac5fbd34ab292ddafb9a0456ac9f87d201"
+dependencies = [
+ "libc",
+]
+
[[package]]
name = "crc32c"
version = "0.6.8"
@@ -442,25 +524,24 @@ dependencies = [
[[package]]
name = "criterion"
-version = "0.5.1"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f2b12d017a929603d80db1831cd3a24082f8137ce19c69e6447f54f5fc8d692f"
+checksum = "950046b2aa2492f9a536f5f4f9a3de7b9e2476e575e05bd6c333371add4d98f3"
dependencies = [
+ "alloca",
"anes",
"cast",
"ciborium",
"clap",
"criterion-plot",
- "is-terminal",
"itertools",
"num-traits",
- "once_cell",
"oorandom",
+ "page_size",
"plotters",
"rayon",
"regex",
"serde",
- "serde_derive",
"serde_json",
"tinytemplate",
"walkdir",
@@ -468,9 +549,9 @@ dependencies = [
[[package]]
name = "criterion-plot"
-version = "0.5.0"
+version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6b50826342786a51a89e2da3a28f1c32b06e387201bc2d19791f622c673706b1"
+checksum = "d8d80a2f4f5b554395e47b5d8305bc3d27813bacb73493eb1001e8f76dae29ea"
dependencies = [
"cast",
"itertools",
@@ -552,12 +633,39 @@ dependencies = [
]
[[package]]
-name = "dashmap"
-version = "5.5.3"
+name = "curve25519-dalek"
+version = "4.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856"
+checksum = "97fb8b7c4503de7d6ae7b42ab72a5a59857b4c937ec27a3d4539dba95b5ab2be"
dependencies = [
"cfg-if",
+ "cpufeatures 0.2.17",
+ "curve25519-dalek-derive",
+ "fiat-crypto",
+ "rustc_version",
+ "subtle",
+ "zeroize",
+]
+
+[[package]]
+name = "curve25519-dalek-derive"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3"
+dependencies = [
+ "proc-macro2",
+ "quote",
+ "syn",
+]
+
+[[package]]
+name = "dashmap"
+version = "6.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf"
+dependencies = [
+ "cfg-if",
+ "crossbeam-utils",
"hashbrown 0.14.5",
"lock_api",
"once_cell",
@@ -582,9 +690,9 @@ dependencies = [
[[package]]
name = "der-parser"
-version = "8.2.0"
+version = "10.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e"
+checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6"
dependencies = [
"asn1-rs",
"displaydoc",
@@ -622,9 +730,15 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
+[[package]]
+name = "dunce"
+version = "1.0.5"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
+
[[package]]
name = "dynosaur"
version = "0.3.0"
@@ -642,7 +756,7 @@ checksum = "0b0713d5c1d52e774c5cd7bb8b043d7c0fc4f921abfb678556140bfbe6ab2364"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -670,7 +784,7 @@ dependencies = [
"heck",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -696,15 +810,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be"
[[package]]
-name = "filetime"
-version = "0.2.27"
+name = "fiat-crypto"
+version = "0.2.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db"
-dependencies = [
- "cfg-if",
- "libc",
- "libredox",
-]
+checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d"
[[package]]
name = "find-msvc-tools"
@@ -739,6 +848,12 @@ dependencies = [
"percent-encoding",
]
+[[package]]
+name = "fs_extra"
+version = "1.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c"
+
[[package]]
name = "fsevent-sys"
version = "4.1.0"
@@ -804,7 +919,7 @@ checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -882,6 +997,7 @@ dependencies = [
"cfg-if",
"libc",
"r-efi 6.0.0",
+ "rand_core 0.10.0",
"wasip2",
"wasip3",
]
@@ -958,12 +1074,6 @@ version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea"
-[[package]]
-name = "hermit-abi"
-version = "0.5.2"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c"
-
[[package]]
name = "hex"
version = "0.4.3"
@@ -986,7 +1096,7 @@ dependencies = [
"idna",
"ipnet",
"once_cell",
- "rand",
+ "rand 0.9.2",
"ring",
"thiserror 2.0.18",
"tinyvec",
@@ -1008,7 +1118,7 @@ dependencies = [
"moka",
"once_cell",
"parking_lot",
- "rand",
+ "rand 0.9.2",
"resolv-conf",
"smallvec",
"thiserror 2.0.18",
@@ -1116,7 +1226,6 @@ dependencies = [
"tokio",
"tokio-rustls",
"tower-service",
- "webpki-roots 1.0.6",
]
[[package]]
@@ -1286,17 +1395,6 @@ dependencies = [
"serde_core",
]
-[[package]]
-name = "inotify"
-version = "0.9.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff"
-dependencies = [
- "bitflags 1.3.2",
- "inotify-sys",
- "libc",
-]
-
[[package]]
name = "inotify"
version = "0.11.1"
@@ -1347,9 +1445,9 @@ checksum = "d98f6fed1fde3f8c21bc40a1abb88dd75e67924f9cffc3ef95607bad8017f8e2"
[[package]]
name = "ipnetwork"
-version = "0.20.0"
+version = "0.21.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e"
+checksum = "cf370abdafd54d13e54a620e8c3e1145f28e46cc9d704bc6d94414559df41763"
dependencies = [
"serde",
]
@@ -1364,22 +1462,11 @@ dependencies = [
"serde",
]
-[[package]]
-name = "is-terminal"
-version = "0.4.17"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46"
-dependencies = [
- "hermit-abi",
- "libc",
- "windows-sys 0.61.2",
-]
-
[[package]]
name = "itertools"
-version = "0.10.5"
+version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473"
+checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186"
dependencies = [
"either",
]
@@ -1390,6 +1477,38 @@ version = "1.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682"
+[[package]]
+name = "jni"
+version = "0.21.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "1a87aa2bb7d2af34197c04845522473242e1aa17c12f4935d5856491a7fb8c97"
+dependencies = [
+ "cesu8",
+ "cfg-if",
+ "combine",
+ "jni-sys",
+ "log",
+ "thiserror 1.0.69",
+ "walkdir",
+ "windows-sys 0.45.0",
+]
+
+[[package]]
+name = "jni-sys"
+version = "0.3.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130"
+
+[[package]]
+name = "jobserver"
+version = "0.1.34"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33"
+dependencies = [
+ "getrandom 0.3.4",
+ "libc",
+]
+
[[package]]
name = "js-sys"
version = "0.3.91"
@@ -1438,18 +1557,6 @@ version = "0.2.183"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5b646652bf6661599e1da8901b3b9522896f01e736bad5f723fe7a3a27f899d"
-[[package]]
-name = "libredox"
-version = "0.1.14"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "1744e39d1d6a9948f4f388969627434e31128196de472883b39f148769bfe30a"
-dependencies = [
- "bitflags 2.11.0",
- "libc",
- "plain",
- "redox_syscall 0.7.3",
-]
-
[[package]]
name = "linux-raw-sys"
version = "0.12.1"
@@ -1538,18 +1645,6 @@ version = "0.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a"
-[[package]]
-name = "mio"
-version = "0.8.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c"
-dependencies = [
- "libc",
- "log",
- "wasi",
- "windows-sys 0.48.0",
-]
-
[[package]]
name = "mio"
version = "1.1.1"
@@ -1581,13 +1676,13 @@ dependencies = [
[[package]]
name = "nix"
-version = "0.28.0"
+version = "0.31.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4"
+checksum = "5d6d0705320c1e6ba1d912b5e37cf18071b6c2e9b7fa8215a1e8a7651966f5d3"
dependencies = [
"bitflags 2.11.0",
"cfg-if",
- "cfg_aliases 0.1.1",
+ "cfg_aliases",
"libc",
"memoffset",
]
@@ -1602,25 +1697,6 @@ dependencies = [
"minimal-lexical",
]
-[[package]]
-name = "notify"
-version = "6.1.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d"
-dependencies = [
- "bitflags 2.11.0",
- "crossbeam-channel",
- "filetime",
- "fsevent-sys",
- "inotify 0.9.6",
- "kqueue",
- "libc",
- "log",
- "mio 0.8.11",
- "walkdir",
- "windows-sys 0.48.0",
-]
-
[[package]]
name = "notify"
version = "8.2.0"
@@ -1629,11 +1705,11 @@ checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3"
dependencies = [
"bitflags 2.11.0",
"fsevent-sys",
- "inotify 0.11.1",
+ "inotify",
"kqueue",
"libc",
"log",
- "mio 1.1.1",
+ "mio",
"notify-types",
"walkdir",
"windows-sys 0.60.2",
@@ -1693,9 +1769,9 @@ dependencies = [
[[package]]
name = "oid-registry"
-version = "0.6.1"
+version = "0.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff"
+checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7"
dependencies = [
"asn1-rs",
]
@@ -1722,6 +1798,22 @@ version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381"
+[[package]]
+name = "openssl-probe"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe"
+
+[[package]]
+name = "page_size"
+version = "0.6.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da"
+dependencies = [
+ "libc",
+ "winapi",
+]
+
[[package]]
name = "parking_lot"
version = "0.12.5"
@@ -1740,7 +1832,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1"
dependencies = [
"cfg-if",
"libc",
- "redox_syscall 0.5.18",
+ "redox_syscall",
"smallvec",
"windows-link",
]
@@ -1768,7 +1860,7 @@ checksum = "d9b20ed30f105399776b9c883e68e536ef602a16ae6f596d2c473591d6ad64c6"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -1793,12 +1885,6 @@ dependencies = [
"spki",
]
-[[package]]
-name = "plain"
-version = "0.2.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "b4596b6d070b27117e987119b4dac604f3c58cfb0b191112e24771b2faeac1a6"
-
[[package]]
name = "plotters"
version = "0.3.7"
@@ -1833,7 +1919,7 @@ version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8159bd90725d2df49889a078b54f4f79e87f1f8a8444194cdca81d38f5393abf"
dependencies = [
- "cpufeatures",
+ "cpufeatures 0.2.17",
"opaque-debug",
"universal-hash",
]
@@ -1845,7 +1931,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"opaque-debug",
"universal-hash",
]
@@ -1887,7 +1973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b"
dependencies = [
"proc-macro2",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -1909,7 +1995,7 @@ dependencies = [
"bit-vec",
"bitflags 2.11.0",
"num-traits",
- "rand",
+ "rand 0.9.2",
"rand_chacha",
"rand_xorshift",
"regex-syntax",
@@ -1931,7 +2017,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20"
dependencies = [
"bytes",
- "cfg_aliases 0.2.1",
+ "cfg_aliases",
"pin-project-lite",
"quinn-proto",
"quinn-udp",
@@ -1950,10 +2036,11 @@ version = "0.11.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "434b42fec591c96ef50e21e886936e66d3cc3f737104fdb9b737c40ffb94c098"
dependencies = [
+ "aws-lc-rs",
"bytes",
"getrandom 0.3.4",
"lru-slab",
- "rand",
+ "rand 0.9.2",
"ring",
"rustc-hash",
"rustls",
@@ -1971,7 +2058,7 @@ version = "0.5.14"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd"
dependencies = [
- "cfg_aliases 0.2.1",
+ "cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.3",
@@ -2010,6 +2097,17 @@ dependencies = [
"rand_core 0.9.5",
]
+[[package]]
+name = "rand"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8"
+dependencies = [
+ "chacha20 0.10.0",
+ "getrandom 0.4.2",
+ "rand_core 0.10.0",
+]
+
[[package]]
name = "rand_chacha"
version = "0.9.0"
@@ -2038,6 +2136,12 @@ dependencies = [
"getrandom 0.3.4",
]
+[[package]]
+name = "rand_core"
+version = "0.10.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba"
+
[[package]]
name = "rand_xorshift"
version = "0.4.0"
@@ -2076,15 +2180,6 @@ dependencies = [
"bitflags 2.11.0",
]
-[[package]]
-name = "redox_syscall"
-version = "0.7.3"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "6ce70a74e890531977d37e532c34d45e9055d2409ed08ddba14529471ed0be16"
-dependencies = [
- "bitflags 2.11.0",
-]
-
[[package]]
name = "regex"
version = "1.12.3"
@@ -2116,9 +2211,9 @@ checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a"
[[package]]
name = "reqwest"
-version = "0.12.28"
+version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147"
+checksum = "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801"
dependencies = [
"base64",
"bytes",
@@ -2136,9 +2231,7 @@ dependencies = [
"quinn",
"rustls",
"rustls-pki-types",
- "serde",
- "serde_json",
- "serde_urlencoded",
+ "rustls-platform-verifier",
"sync_wrapper",
"tokio",
"tokio-rustls",
@@ -2149,7 +2242,6 @@ dependencies = [
"wasm-bindgen",
"wasm-bindgen-futures",
"web-sys",
- "webpki-roots 1.0.6",
]
[[package]]
@@ -2228,6 +2320,7 @@ version = "0.23.37"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "758025cb5fccfd3bc2fd74708fd4682be41d99e5dff73c377c0646c6012c73a4"
dependencies = [
+ "aws-lc-rs",
"once_cell",
"ring",
"rustls-pki-types",
@@ -2236,6 +2329,18 @@ dependencies = [
"zeroize",
]
+[[package]]
+name = "rustls-native-certs"
+version = "0.8.3"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
+dependencies = [
+ "openssl-probe",
+ "rustls-pki-types",
+ "schannel",
+ "security-framework",
+]
+
[[package]]
name = "rustls-pki-types"
version = "1.14.0"
@@ -2247,11 +2352,39 @@ dependencies = [
]
[[package]]
-name = "rustls-webpki"
-version = "0.103.9"
+name = "rustls-platform-verifier"
+version = "0.6.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53"
+checksum = "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784"
dependencies = [
+ "core-foundation",
+ "core-foundation-sys",
+ "jni",
+ "log",
+ "once_cell",
+ "rustls",
+ "rustls-native-certs",
+ "rustls-platform-verifier-android",
+ "rustls-webpki",
+ "security-framework",
+ "security-framework-sys",
+ "webpki-root-certs",
+ "windows-sys 0.61.2",
+]
+
+[[package]]
+name = "rustls-platform-verifier-android"
+version = "0.1.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f"
+
+[[package]]
+name = "rustls-webpki"
+version = "0.103.10"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "df33b2b81ac578cabaf06b89b0631153a3f416b0a886e8a7a1707fb51abbd1ef"
+dependencies = [
+ "aws-lc-rs",
"ring",
"rustls-pki-types",
"untrusted",
@@ -2290,6 +2423,15 @@ dependencies = [
"winapi-util",
]
+[[package]]
+name = "schannel"
+version = "0.1.29"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "91c1b7e4904c873ef0710c1f407dde2e6287de2bebc1bbbf7d430bb7cbffd939"
+dependencies = [
+ "windows-sys 0.61.2",
+]
+
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -2304,7 +2446,30 @@ checksum = "22f968c5ea23d555e670b449c1c5e7b2fc399fdaec1d304a17cd48e288abc107"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
+]
+
+[[package]]
+name = "security-framework"
+version = "3.7.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d"
+dependencies = [
+ "bitflags 2.11.0",
+ "core-foundation",
+ "core-foundation-sys",
+ "libc",
+ "security-framework-sys",
+]
+
+[[package]]
+name = "security-framework-sys"
+version = "2.17.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3"
+dependencies = [
+ "core-foundation-sys",
+ "libc",
]
[[package]]
@@ -2350,7 +2515,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2368,11 +2533,11 @@ dependencies = [
[[package]]
name = "serde_spanned"
-version = "0.6.9"
+version = "1.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
+checksum = "f8bbf91e5a4d6315eee45e704372590b30e260ee83af6639d64557f51b067776"
dependencies = [
- "serde",
+ "serde_core",
]
[[package]]
@@ -2394,7 +2559,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"digest",
]
@@ -2405,7 +2570,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283"
dependencies = [
"cfg-if",
- "cpufeatures",
+ "cpufeatures 0.2.17",
"digest",
]
@@ -2428,10 +2593,10 @@ dependencies = [
"libc",
"log",
"lru_time_cache",
- "notify 8.2.0",
+ "notify",
"percent-encoding",
"pin-project",
- "rand",
+ "rand 0.9.2",
"sealed",
"sendfd",
"serde",
@@ -2462,7 +2627,7 @@ dependencies = [
"chacha20poly1305",
"hkdf",
"md-5",
- "rand",
+ "rand 0.9.2",
"ring-compat",
"sha1",
]
@@ -2555,23 +2720,18 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596"
+[[package]]
+name = "static_assertions"
+version = "1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f"
+
[[package]]
name = "subtle"
version = "2.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292"
-[[package]]
-name = "syn"
-version = "1.0.109"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237"
-dependencies = [
- "proc-macro2",
- "quote",
- "unicode-ident",
-]
-
[[package]]
name = "syn"
version = "2.0.117"
@@ -2592,18 +2752,6 @@ dependencies = [
"futures-core",
]
-[[package]]
-name = "synstructure"
-version = "0.12.6"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f"
-dependencies = [
- "proc-macro2",
- "quote",
- "syn 1.0.109",
- "unicode-xid",
-]
-
[[package]]
name = "synstructure"
version = "0.13.2"
@@ -2612,7 +2760,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2623,7 +2771,7 @@ checksum = "7b2093cf4c8eb1e67749a6762251bc9cd836b6fc171623bd0a9d324d37af2417"
[[package]]
name = "telemt"
-version = "3.3.28"
+version = "3.3.29"
dependencies = [
"aes",
"anyhow",
@@ -2650,12 +2798,12 @@ dependencies = [
"lru",
"md-5",
"nix",
- "notify 6.1.1",
+ "notify",
"num-bigint",
"num-traits",
"parking_lot",
"proptest",
- "rand",
+ "rand 0.10.0",
"regex",
"reqwest",
"rustls",
@@ -2664,7 +2812,9 @@ dependencies = [
"sha1",
"sha2",
"shadowsocks",
- "socket2 0.5.10",
+ "socket2 0.6.3",
+ "static_assertions",
+ "subtle",
"thiserror 2.0.18",
"tokio",
"tokio-rustls",
@@ -2674,7 +2824,8 @@ dependencies = [
"tracing",
"tracing-subscriber",
"url",
- "webpki-roots 0.26.11",
+ "webpki-roots",
+ "x25519-dalek",
"x509-parser",
"zeroize",
]
@@ -2718,7 +2869,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2729,7 +2880,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2815,7 +2966,7 @@ checksum = "27ad5e34374e03cfffefc301becb44e9dc3c17584f414349ebe29ed26661822d"
dependencies = [
"bytes",
"libc",
- "mio 1.1.1",
+ "mio",
"parking_lot",
"pin-project-lite",
"signal-hook-registry",
@@ -2833,7 +2984,7 @@ checksum = "5c55a2eff8b69ce66c84f85e1da1c233edc36ceb85a2058d11b0d6a3c7e7569c"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -2904,44 +3055,42 @@ dependencies = [
[[package]]
name = "toml"
-version = "0.8.23"
+version = "1.0.7+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
-dependencies = [
- "serde",
- "serde_spanned",
- "toml_datetime",
- "toml_edit",
-]
-
-[[package]]
-name = "toml_datetime"
-version = "0.6.11"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
-dependencies = [
- "serde",
-]
-
-[[package]]
-name = "toml_edit"
-version = "0.22.27"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
+checksum = "dd28d57d8a6f6e458bc0b8784f8fdcc4b99a437936056fa122cb234f18656a96"
dependencies = [
"indexmap",
- "serde",
+ "serde_core",
"serde_spanned",
"toml_datetime",
- "toml_write",
+ "toml_parser",
+ "toml_writer",
"winnow",
]
[[package]]
-name = "toml_write"
-version = "0.1.2"
+name = "toml_datetime"
+version = "1.0.1+spec-1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
+checksum = "9b320e741db58cac564e26c607d3cc1fdc4a88fd36c879568c07856ed83ff3e9"
+dependencies = [
+ "serde_core",
+]
+
+[[package]]
+name = "toml_parser"
+version = "1.0.10+spec-1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "7df25b4befd31c4816df190124375d5a20c6b6921e2cad937316de3fccd63420"
+dependencies = [
+ "winnow",
+]
+
+[[package]]
+name = "toml_writer"
+version = "1.0.7+spec-1.1.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "f17aaa1c6e3dc22b1da4b6bba97d066e354c7945cac2f7852d4e4e7ca7a6b56d"
[[package]]
name = "tower"
@@ -3007,7 +3156,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3057,7 +3206,7 @@ checksum = "70977707304198400eb4835a78f6a9f928bf41bba420deb8fdb175cd965d77a7"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3245,7 +3394,7 @@ dependencies = [
"bumpalo",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
"wasm-bindgen-shared",
]
@@ -3313,12 +3462,12 @@ dependencies = [
]
[[package]]
-name = "webpki-roots"
-version = "0.26.11"
+name = "webpki-root-certs"
+version = "1.0.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9"
+checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca"
dependencies = [
- "webpki-roots 1.0.6",
+ "rustls-pki-types",
]
[[package]]
@@ -3336,6 +3485,22 @@ version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72069c3113ab32ab29e5584db3c6ec55d416895e60715417b5b883a357c3e471"
+[[package]]
+name = "winapi"
+version = "0.3.9"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419"
+dependencies = [
+ "winapi-i686-pc-windows-gnu",
+ "winapi-x86_64-pc-windows-gnu",
+]
+
+[[package]]
+name = "winapi-i686-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
+
[[package]]
name = "winapi-util"
version = "0.1.11"
@@ -3345,6 +3510,12 @@ dependencies = [
"windows-sys 0.61.2",
]
+[[package]]
+name = "winapi-x86_64-pc-windows-gnu"
+version = "0.4.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
+
[[package]]
name = "windows-core"
version = "0.62.2"
@@ -3366,7 +3537,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3377,7 +3548,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3404,6 +3575,15 @@ dependencies = [
"windows-link",
]
+[[package]]
+name = "windows-sys"
+version = "0.45.0"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
+dependencies = [
+ "windows-targets 0.42.2",
+]
+
[[package]]
name = "windows-sys"
version = "0.48.0"
@@ -3440,6 +3620,21 @@ dependencies = [
"windows-link",
]
+[[package]]
+name = "windows-targets"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071"
+dependencies = [
+ "windows_aarch64_gnullvm 0.42.2",
+ "windows_aarch64_msvc 0.42.2",
+ "windows_i686_gnu 0.42.2",
+ "windows_i686_msvc 0.42.2",
+ "windows_x86_64_gnu 0.42.2",
+ "windows_x86_64_gnullvm 0.42.2",
+ "windows_x86_64_msvc 0.42.2",
+]
+
[[package]]
name = "windows-targets"
version = "0.48.5"
@@ -3488,6 +3683,12 @@ dependencies = [
"windows_x86_64_msvc 0.53.1",
]
+[[package]]
+name = "windows_aarch64_gnullvm"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8"
+
[[package]]
name = "windows_aarch64_gnullvm"
version = "0.48.5"
@@ -3506,6 +3707,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53"
+[[package]]
+name = "windows_aarch64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43"
+
[[package]]
name = "windows_aarch64_msvc"
version = "0.48.5"
@@ -3524,6 +3731,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006"
+[[package]]
+name = "windows_i686_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f"
+
[[package]]
name = "windows_i686_gnu"
version = "0.48.5"
@@ -3554,6 +3767,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c"
+[[package]]
+name = "windows_i686_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060"
+
[[package]]
name = "windows_i686_msvc"
version = "0.48.5"
@@ -3572,6 +3791,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2"
+[[package]]
+name = "windows_x86_64_gnu"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36"
+
[[package]]
name = "windows_x86_64_gnu"
version = "0.48.5"
@@ -3590,6 +3815,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499"
+[[package]]
+name = "windows_x86_64_gnullvm"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3"
+
[[package]]
name = "windows_x86_64_gnullvm"
version = "0.48.5"
@@ -3608,6 +3839,12 @@ version = "0.53.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1"
+[[package]]
+name = "windows_x86_64_msvc"
+version = "0.42.2"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0"
+
[[package]]
name = "windows_x86_64_msvc"
version = "0.48.5"
@@ -3628,12 +3865,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
[[package]]
name = "winnow"
-version = "0.7.15"
+version = "1.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "df79d97927682d2fd8adb29682d1140b343be4ac0f08fd68b7765d9c059d3945"
-dependencies = [
- "memchr",
-]
+checksum = "a90e88e4667264a994d34e6d1ab2d26d398dcdca8b7f52bec8668957517fc7d8"
[[package]]
name = "winreg"
@@ -3675,7 +3909,7 @@ dependencies = [
"heck",
"indexmap",
"prettyplease",
- "syn 2.0.117",
+ "syn",
"wasm-metadata",
"wit-bindgen-core",
"wit-component",
@@ -3691,7 +3925,7 @@ dependencies = [
"prettyplease",
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
"wit-bindgen-core",
"wit-bindgen-rust",
]
@@ -3740,10 +3974,22 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9"
[[package]]
-name = "x509-parser"
-version = "0.15.1"
+name = "x25519-dalek"
+version = "2.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da"
+checksum = "c7e468321c81fb07fa7f4c636c3972b9100f0346e5b6a9f2bd0603a52f7ed277"
+dependencies = [
+ "curve25519-dalek",
+ "rand_core 0.6.4",
+ "serde",
+ "zeroize",
+]
+
+[[package]]
+name = "x509-parser"
+version = "0.18.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+checksum = "d43b0f71ce057da06bc0851b23ee24f3f86190b07203dd8f567d0b706a185202"
dependencies = [
"asn1-rs",
"data-encoding",
@@ -3752,7 +3998,7 @@ dependencies = [
"nom",
"oid-registry",
"rusticata-macros",
- "thiserror 1.0.69",
+ "thiserror 2.0.18",
"time",
]
@@ -3775,8 +4021,8 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
- "synstructure 0.13.2",
+ "syn",
+ "synstructure",
]
[[package]]
@@ -3796,7 +4042,7 @@ checksum = "0e8bc7269b54418e7aeeef514aa68f8690b8c0489a06b0136e5f57c4c5ccab89"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3816,8 +4062,8 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
- "synstructure 0.13.2",
+ "syn",
+ "synstructure",
]
[[package]]
@@ -3837,7 +4083,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
@@ -3870,7 +4116,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3"
dependencies = [
"proc-macro2",
"quote",
- "syn 2.0.117",
+ "syn",
]
[[package]]
diff --git a/Cargo.toml b/Cargo.toml
index 97855f3..53082db 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -1,6 +1,6 @@
[package]
name = "telemt"
-version = "3.3.28"
+version = "3.3.29"
edition = "2024"
[dependencies]
@@ -22,17 +22,19 @@ hmac = "0.12"
crc32fast = "1.4"
crc32c = "0.6"
zeroize = { version = "1.8", features = ["derive"] }
+subtle = "2.6"
+static_assertions = "1.1"
# Network
-socket2 = { version = "0.5", features = ["all"] }
-nix = { version = "0.28", default-features = false, features = ["net"] }
+socket2 = { version = "0.6", features = ["all"] }
+nix = { version = "0.31", default-features = false, features = ["net", "fs"] }
shadowsocks = { version = "1.24", features = ["aead-cipher-2022"] }
# Serialization
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
-toml = "0.8"
-x509-parser = "0.15"
+toml = "1.0"
+x509-parser = "0.18"
# Utils
bytes = "1.9"
@@ -40,10 +42,10 @@ thiserror = "2.0"
tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
parking_lot = "0.12"
-dashmap = "5.5"
+dashmap = "6.1"
arc-swap = "1.7"
lru = "0.16"
-rand = "0.9"
+rand = "0.10"
chrono = { version = "0.4", features = ["serde"] }
hex = "0.4"
base64 = "0.22"
@@ -52,23 +54,24 @@ regex = "1.11"
crossbeam-queue = "0.3"
num-bigint = "0.4"
num-traits = "0.2"
+x25519-dalek = "2"
anyhow = "1.0"
# HTTP
-reqwest = { version = "0.12", features = ["rustls-tls"], default-features = false }
-notify = { version = "6", features = ["macos_fsevent"] }
-ipnetwork = "0.20"
+reqwest = { version = "0.13", features = ["rustls"], default-features = false }
+notify = "8.2"
+ipnetwork = { version = "0.21", features = ["serde"] }
hyper = { version = "1", features = ["server", "http1"] }
hyper-util = { version = "0.1", features = ["tokio", "server-auto"] }
http-body-util = "0.1"
httpdate = "1.0"
tokio-rustls = { version = "0.26", default-features = false, features = ["tls12"] }
rustls = { version = "0.23", default-features = false, features = ["std", "tls12", "ring"] }
-webpki-roots = "0.26"
+webpki-roots = "1.0"
[dev-dependencies]
tokio-test = "0.4"
-criterion = "0.5"
+criterion = "0.8"
proptest = "1.4"
futures = "0.3"
diff --git a/benches/crypto_bench.rs b/benches/crypto_bench.rs
index 0089abe..940791c 100644
--- a/benches/crypto_bench.rs
+++ b/benches/crypto_bench.rs
@@ -1,5 +1,5 @@
// Cryptobench
-use criterion::{black_box, criterion_group, Criterion};
+use criterion::{Criterion, black_box, criterion_group};
fn bench_aes_ctr(c: &mut Criterion) {
c.bench_function("aes_ctr_encrypt_64kb", |b| {
@@ -9,4 +9,4 @@ fn bench_aes_ctr(c: &mut Criterion) {
black_box(enc.encrypt(&data))
})
});
-}
\ No newline at end of file
+}
diff --git a/docs/CONFIG_PARAMS.en.md b/docs/CONFIG_PARAMS.en.md
index 90da08a..3eee3a7 100644
--- a/docs/CONFIG_PARAMS.en.md
+++ b/docs/CONFIG_PARAMS.en.md
@@ -260,6 +260,129 @@ This document lists all configuration keys accepted by `config.toml`.
| tls_full_cert_ttl_secs | `u64` | `90` | — | TTL for sending full cert payload per (domain, client IP) tuple. |
| alpn_enforce | `bool` | `true` | — | Enforces ALPN echo behavior based on client preference. |
| mask_proxy_protocol | `u8` | `0` | — | PROXY protocol mode for mask backend (`0` disabled, `1` v1, `2` v2). |
+| mask_shape_hardening | `bool` | `true` | — | Enables client->mask shape-channel hardening by applying controlled tail padding to bucket boundaries on mask relay shutdown. |
+| mask_shape_hardening_aggressive_mode | `bool` | `false` | Requires `mask_shape_hardening = true`. | Opt-in aggressive shaping profile: allows shaping on backend-silent non-EOF paths and switches above-cap blur to strictly positive random tail. |
+| mask_shape_bucket_floor_bytes | `usize` | `512` | Must be `> 0`; should be `<= mask_shape_bucket_cap_bytes`. | Minimum bucket size used by shape-channel hardening. |
+| mask_shape_bucket_cap_bytes | `usize` | `4096` | Must be `>= mask_shape_bucket_floor_bytes`. | Maximum bucket size used by shape-channel hardening; traffic above cap is not padded further. |
+| mask_shape_above_cap_blur | `bool` | `false` | Requires `mask_shape_hardening = true`; requires `mask_shape_above_cap_blur_max_bytes > 0`. | Adds bounded randomized tail bytes even when forwarded size already exceeds cap. |
+| mask_shape_above_cap_blur_max_bytes | `usize` | `512` | Must be `<= 1048576`; must be `> 0` when `mask_shape_above_cap_blur = true`. | Maximum randomized extra bytes appended above cap. |
+| mask_timing_normalization_enabled | `bool` | `false` | Requires `mask_timing_normalization_floor_ms > 0`; requires `ceiling >= floor`. | Enables timing envelope normalization on masking outcomes. |
+| mask_timing_normalization_floor_ms | `u64` | `0` | Must be `> 0` when timing normalization is enabled; must be `<= ceiling`. | Lower bound (ms) for masking outcome normalization target. |
+| mask_timing_normalization_ceiling_ms | `u64` | `0` | Must be `>= floor`; must be `<= 60000`. | Upper bound (ms) for masking outcome normalization target. |
+
+### Shape-channel hardening notes (`[censorship]`)
+
+These parameters are designed to reduce one specific fingerprint source during masking: the exact number of bytes sent from proxy to `mask_host` for invalid or probing traffic.
+
+Without hardening, a censor can often correlate probe input length with backend-observed length very precisely (for example: `5 + body_sent` on early TLS reject paths). That creates a length-based classifier signal.
+
+When `mask_shape_hardening = true`, Telemt pads the **client->mask** stream tail to a bucket boundary at relay shutdown:
+
+- Total bytes sent to mask are first measured.
+- A bucket is selected using powers of two starting from `mask_shape_bucket_floor_bytes`.
+- Padding is added only if total bytes are below `mask_shape_bucket_cap_bytes`.
+- If bytes already exceed cap, no extra padding is added.
+
+This means multiple nearby probe sizes collapse into the same backend-observed size class, making active classification harder.
+
+What each parameter changes in practice:
+
+- `mask_shape_hardening`
+ Enables or disables this entire length-shaping stage on the fallback path.
+ When `false`, backend-observed length stays close to the real forwarded probe length.
+ When `true`, clean relay shutdown can append random padding bytes to move the total into a bucket.
+
+- `mask_shape_bucket_floor_bytes`
+ Sets the first bucket boundary used for small probes.
+ Example: with floor `512`, a malformed probe that would otherwise forward `37` bytes can be expanded to `512` bytes on clean EOF.
+ Larger floor values hide very small probes better, but increase egress cost.
+
+- `mask_shape_bucket_cap_bytes`
+ Sets the largest bucket Telemt will pad up to with bucket logic.
+ Example: with cap `4096`, a forwarded total of `1800` bytes may be padded to `2048` or `4096` depending on the bucket ladder, but a total already above `4096` will not be bucket-padded further.
+ Larger cap values increase the range over which size classes are collapsed, but also increase worst-case overhead.
+
+- Clean EOF matters in conservative mode
+ In the default profile, shape padding is intentionally conservative: it is applied on clean relay shutdown, not on every timeout/drip path.
+ This avoids introducing new timeout-tail artifacts that some backends or tests interpret as a separate fingerprint.
+
+Practical trade-offs:
+
+- Better anti-fingerprinting on size/shape channel.
+- Slightly higher egress overhead for small probes due to padding.
+- Behavior is intentionally conservative and enabled by default.
+
+Recommended starting profile:
+
+- `mask_shape_hardening = true` (default)
+- `mask_shape_bucket_floor_bytes = 512`
+- `mask_shape_bucket_cap_bytes = 4096`
+
+### Aggressive mode notes (`[censorship]`)
+
+`mask_shape_hardening_aggressive_mode` is an opt-in profile for higher anti-classifier pressure.
+
+- Default is `false` to preserve conservative timeout/no-tail behavior.
+- Requires `mask_shape_hardening = true`.
+- When enabled, backend-silent non-EOF masking paths may be shaped.
+- When enabled together with above-cap blur, the random extra tail uses `[1, max]` instead of `[0, max]`.
+
+What changes when aggressive mode is enabled:
+
+- Backend-silent timeout paths can be shaped
+ In default mode, a client that keeps the socket half-open and times out will usually not receive shape padding on that path.
+ In aggressive mode, Telemt may still shape that backend-silent session if no backend bytes were returned.
+ This is specifically aimed at active probes that try to avoid EOF in order to preserve an exact backend-observed length.
+
+- Above-cap blur always adds at least one byte
+ In default mode, above-cap blur may choose `0`, so some oversized probes still land on their exact base forwarded length.
+ In aggressive mode, that exact-base sample is removed by construction.
+
+- Tradeoff
+ Aggressive mode improves resistance to active length classifiers, but it is more opinionated and less conservative.
+ If your deployment prioritizes strict compatibility with timeout/no-tail semantics, leave it disabled.
+ If your threat model includes repeated active probing by a censor, this mode is the stronger profile.
+
+Use this mode only when your threat model prioritizes classifier resistance over strict compatibility with conservative masking semantics.
+
+### Above-cap blur notes (`[censorship]`)
+
+`mask_shape_above_cap_blur` adds a second-stage blur for very large probes that are already above `mask_shape_bucket_cap_bytes`.
+
+- A random tail in `[0, mask_shape_above_cap_blur_max_bytes]` is appended in default mode.
+- In aggressive mode, the random tail becomes strictly positive: `[1, mask_shape_above_cap_blur_max_bytes]`.
+- This reduces exact-size leakage above cap at bounded overhead.
+- Keep `mask_shape_above_cap_blur_max_bytes` conservative to avoid unnecessary egress growth.
+
+Operational meaning:
+
+- Without above-cap blur
+ A probe that forwards `5005` bytes will still look like `5005` bytes to the backend if it is already above cap.
+
+- With above-cap blur enabled
+ That same probe may look like any value in a bounded window above its base length.
+ Example with `mask_shape_above_cap_blur_max_bytes = 64`:
+ backend-observed size becomes `5005..5069` in default mode, or `5006..5069` in aggressive mode.
+
+- Choosing `mask_shape_above_cap_blur_max_bytes`
+ Small values reduce cost but preserve more separability between far-apart oversized classes.
+ Larger values blur oversized classes more aggressively, but add more egress overhead and more output variance.
+
+### Timing normalization envelope notes (`[censorship]`)
+
+`mask_timing_normalization_enabled` smooths timing differences between masking outcomes by applying a target duration envelope.
+
+- A random target is selected in `[mask_timing_normalization_floor_ms, mask_timing_normalization_ceiling_ms]`.
+- Fast paths are delayed up to the selected target.
+- Slow paths are not forced to finish by the ceiling (the envelope is best-effort shaping, not truncation).
+
+Recommended starting profile for timing shaping:
+
+- `mask_timing_normalization_enabled = true`
+- `mask_timing_normalization_floor_ms = 180`
+- `mask_timing_normalization_ceiling_ms = 320`
+
+If your backend or network is very bandwidth-constrained, reduce cap first. If probes are still too distinguishable in your environment, increase floor gradually.
## [access]
diff --git a/docs/VPS_DOUBLE_HOP.en.md b/docs/VPS_DOUBLE_HOP.en.md
new file mode 100644
index 0000000..9463b79
--- /dev/null
+++ b/docs/VPS_DOUBLE_HOP.en.md
@@ -0,0 +1,283 @@
+
+
+## Concept
+- **Server A** (__conditionally Russian Federation_):\
+ Entry point, receives Telegram proxy user traffic via **HAProxy** (port `443`)\
+ and sends it to the tunnel to Server **B**.\
+ Internal IP in the tunnel — `10.10.10.2`\
+ Port for HAProxy clients — `443\tcp`
+- **Server B** (_conditionally Netherlands_):\
+ Exit point, runs **telemt** and accepts client connections through Server **A**.\
+ The server must have unrestricted access to Telegram servers.\
+ Internal IP in the tunnel — `10.10.10.1`\
+ AmneziaWG port — `8443\udp`\
+ Port for telemt clients — `443\tcp`
+
+---
+
+## Step 1. Setting up the AmneziaWG tunnel (A <-> B)
+[AmneziaWG](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module) must be installed on all servers.\
+All following commands are given for **Ubuntu 24.04**.\
+For RHEL-based distributions, installation instructions are available at the link above.
+
+### Installing AmneziaWG (Servers A and B)
+The following steps must be performed on each server:
+
+#### 1. Adding the AmneziaWG repository and installing required packages:
+```bash
+sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
+sudo add-apt-repository ppa:amnezia/ppa && \
+sudo apt-get install -y amneziawg
+```
+
+#### 2. Generating a unique key pair:
+```bash
+cd /etc/amnezia/amneziawg && \
+awg genkey | tee private.key | awg pubkey > public.key
+```
+
+As a result, you will get two files in the `/etc/amnezia/amneziawg` folder:\
+`private.key` - private, and\
+`public.key` - public server keys
+
+#### 3. Configuring network interfaces:
+Obfuscation parameters `S1`, `S2`, `H1`, `H2`, `H3`, `H4` must be strictly identical on both servers.\
+Parameters `Jc`, `Jmin` and `Jmax` can differ.\
+Parameters `I1-I5` ([Custom Protocol Signature](https://docs.amnezia.org/documentation/amnezia-wg/)) must be specified on the client side (Server **A**).
+
+Recommendations for choosing values:
+
+```text
+Jc — 1 ≤ Jc ≤ 128; from 4 to 12 inclusive
+Jmin — Jmax > Jmin < 1280*; recommended 8
+Jmax — Jmin < Jmax ≤ 1280*; recommended 80
+S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
+recommended range from 15 to 150 inclusive
+S2 — S2 ≤ 1188* (1280* - 92 = 1188);
+recommended range from 15 to 150 inclusive
+H1/H2/H3/H4 — must be unique and differ from each other;
+recommended range from 5 to 2147483647 inclusive
+
+* It is assumed that the Internet connection has an MTU of 1280.
+```
+
+> [!IMPORTANT]
+> It is recommended to use your own, unique values.\
+> You can use the [generator](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/e8b269ff0089a27effd88f8d925179b78e5666c4/awg-gen.html) to select parameters.
+
+#### Server B Configuration (Netherlands):
+
+Create the interface configuration file (`awg0`)
+```bash
+nano /etc/amnezia/amneziawg/awg0.conf
+```
+
+File content
+```ini
+[Interface]
+Address = 10.10.10.1/24
+ListenPort = 8443
+PrivateKey =
+SaveConfig = true
+Jc = 4
+Jmin = 8
+Jmax = 80
+S1 = 29
+S2 = 15
+H1 = 2087563914
+H2 = 188817757
+H3 = 101784570
+H4 = 432174303
+
+[Peer]
+PublicKey =
+AllowedIPs = 10.10.10.2/32
+```
+`ListenPort` - the port on which the server will wait for connections, you can choose any free one.\
+`` - the content of the `private.key` file from Server **B**.\
+`` - the content of the `public.key` file from Server **A**.
+
+Open the port on the firewall (if enabled):
+```bash
+sudo ufw allow from to any port 8443 proto udp
+```
+
+`` - the external IP address of Server **A**.
+
+#### Server A Configuration (Russian Federation):
+Create the interface configuration file (awg0)
+
+```bash
+nano /etc/amnezia/amneziawg/awg0.conf
+```
+
+File content
+```ini
+[Interface]
+Address = 10.10.10.2/24
+PrivateKey =
+Jc = 4
+Jmin = 8
+Jmax = 80
+S1 = 29
+S2 = 15
+H1 = 2087563914
+H2 = 188817757
+H3 = 101784570
+H4 = 432174303
+I1 =
+I2 =
+I3 =
+I4 =
+I5 =
+
+[Peer]
+PublicKey =
+Endpoint = :8443
+AllowedIPs = 10.10.10.1/32
+PersistentKeepalive = 25
+```
+
+`` - the content of the `private.key` file from Server **A**.\
+`` - the content of the `public.key` file from Server **B**.\
+`` - the public IP address of Server **B**.
+
+Enable the tunnel on both servers:
+```bash
+sudo systemctl enable --now awg-quick@awg0
+```
+
+Make sure Server B is accessible from Server A through the tunnel.
+```bash
+ping 10.10.10.1
+PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
+64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
+64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
+64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
+^C
+```
+---
+
+## Step 2. Installing telemt on Server B (conditionally Netherlands)
+Installation and configuration are described [here](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) or [here](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
+It is assumed that telemt expects connections on port `443\tcp`.
+
+In the telemt config, you must enable the `Proxy` protocol and restrict connections to it only through the tunnel.
+```toml
+[server]
+port = 443
+listen_addr_ipv4 = "10.10.10.1"
+proxy_protocol = true
+```
+
+Also, for correct link generation, specify the FQDN or IP address and port of Server `A`
+```toml
+[general.links]
+show = "*"
+public_host = ""
+public_port = 443
+```
+
+Open the port on the firewall (if enabled):
+```bash
+sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
+```
+
+---
+
+## Step 3. Configuring HAProxy on Server A (Russian Federation)
+Since the version in the standard Ubuntu repository is relatively old, it makes sense to use the official Docker image.\
+[Instructions](https://docs.docker.com/engine/install/ubuntu/) for installing Docker on Ubuntu.
+
+> [!WARNING]
+> By default, regular users do not have rights to use ports < 1024.
+> Attempts to run HAProxy on port 443 can lead to errors:
+> ```
+> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
+> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
+> ```
+> There are two simple ways to bypass this restriction, choose one:
+> 1. At the OS level, change the net.ipv4.ip_unprivileged_port_start setting to allow users to use all ports:
+> ```
+> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
+> ```
+> or
+>
+> 2. Run HAProxy as root:
+> Uncomment the `user: "root"` parameter in docker-compose.yaml.
+
+#### Create a folder for HAProxy:
+```bash
+mkdir -p /opt/docker-compose/haproxy && cd $_
+```
+
+#### Create the docker-compose.yaml file
+`nano docker-compose.yaml`
+
+File content
+```yaml
+services:
+ haproxy:
+ image: haproxy:latest
+ container_name: haproxy
+ restart: unless-stopped
+ # user: "root"
+ network_mode: "host"
+ volumes:
+ - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "1m"
+ max-file: "1"
+```
+
+#### Create the haproxy.cfg config file
+Accept connections on port 443\tcp and send them through the tunnel to Server `B` 10.10.10.1:443
+
+`nano haproxy.cfg`
+
+File content
+
+```haproxy
+global
+ log stdout format raw local0
+ maxconn 10000
+
+defaults
+ log global
+ mode tcp
+ option tcplog
+ option clitcpka
+ option srvtcpka
+ timeout connect 5s
+ timeout client 2h
+ timeout server 2h
+ timeout check 5s
+
+frontend tcp_in_443
+ bind *:443
+ maxconn 8000
+ option tcp-smart-accept
+ default_backend telemt_nodes
+
+backend telemt_nodes
+ option tcp-smart-connect
+ server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
+
+
+```
+> [!WARNING]
+> **The file must end with an empty line, otherwise HAProxy will not start!**
+
+#### Allow port 443\tcp in the firewall (if enabled)
+```bash
+sudo ufw allow 443/tcp
+```
+
+#### Start the HAProxy container
+```bash
+docker compose up -d
+```
+
+If everything is configured correctly, you can now try connecting Telegram clients using links from the telemt log\api.
diff --git a/docs/VPS_DOUBLE_HOP.ru.md b/docs/VPS_DOUBLE_HOP.ru.md
new file mode 100644
index 0000000..625c64c
--- /dev/null
+++ b/docs/VPS_DOUBLE_HOP.ru.md
@@ -0,0 +1,287 @@
+
+
+## Концепция
+- **Сервер A** (_РФ_):\
+ Точка входа, принимает трафик пользователей Telegram-прокси через **HAProxy** (порт `443`)\
+ и отправляет в туннель на Сервер **B**.\
+ Внутренний IP в туннеле — `10.10.10.2`\
+ Порт для клиентов HAProxy — `443\tcp`
+- **Сервер B** (_условно Нидерланды_):\
+ Точка выхода, на нем работает **telemt** и принимает подключения клиентов через Сервер **A**.\
+ На сервере должен быть неограниченный доступ до серверов Telegram.\
+ Внутренний IP в туннеле — `10.10.10.1`\
+ Порт AmneziaWG — `8443\udp`\
+ Порт для клиентов telemt — `443\tcp`
+
+---
+
+## Шаг 1. Настройка туннеля AmneziaWG (A <-> B)
+
+На всех серверах необходимо установить [amneziawg](https://github.com/amnezia-vpn/amneziawg-linux-kernel-module).\
+Далее все команды даны для **Ununtu 24.04**.\
+Для RHEL-based дистрибутивов инструкция по установке есть по ссылке выше.
+
+### Установка AmneziaWG (Сервера A и B)
+На каждом из серверов необходимо выполнить следующие шаги:
+
+#### 1. Добавление репозитория AmneziaWG и установка необходимых пакетов:
+```bash
+sudo apt install -y software-properties-common python3-launchpadlib gnupg2 linux-headers-$(uname -r) && \
+sudo add-apt-repository ppa:amnezia/ppa && \
+sudo apt-get install -y amneziawg
+```
+
+#### 2. Генерация уникальной пары ключей:
+```bash
+cd /etc/amnezia/amneziawg && \
+awg genkey | tee private.key | awg pubkey > public.key
+```
+В результате вы получите в папке `/etc/amnezia/amneziawg` два файла:\
+`private.key` - приватный и\
+`public.key` - публичный ключи сервера
+
+#### 3. Настройка сетевых интерфейсов:
+
+Параметры обфускации `S1`, `S2`, `H1`, `H2`, `H3`, `H4` должны быть строго идентичными на обоих серверах.\
+Параметры `Jc`, `Jmin` и `Jmax` могут отличатся.\
+Параметры `I1-I5` [(Custom Protocol Signature)](https://docs.amnezia.org/documentation/amnezia-wg/) нужно указывать на стороне _клиента_ (Сервер **А**).
+
+Рекомендации по выбору значений:
+```text
+Jc — 1 ≤ Jc ≤ 128; от 4 до 12 включительно
+Jmin — Jmax > Jmin < 1280*; рекомендовано 8
+Jmax — Jmin < Jmax ≤ 1280*; рекомендовано 80
+S1 — S1 ≤ 1132* (1280* - 148 = 1132); S1 + 56 ≠ S2;
+рекомендованный диапазон от 15 до 150 включительно
+S2 — S2 ≤ 1188* (1280* - 92 = 1188);
+рекомендованный диапазон от 15 до 150 включительно
+H1/H2/H3/H4 — должны быть уникальны и отличаться друг от друга;
+рекомендованный диапазон от 5 до 2147483647 включительно
+
+* Предполагается, что подключение к Интернету имеет MTU 1280.
+```
+> [!IMPORTANT]
+> Рекомендуется использовать собственные, уникальные значения.\
+> Для выбора параметров можете воспользоваться [генератором](https://htmlpreview.github.io/?https://gist.githubusercontent.com/avbor/955782b5c37b06240b243aa375baeac5/raw/e8b269ff0089a27effd88f8d925179b78e5666c4/awg-gen.html).
+
+#### Конфигурация Сервера B (_Нидерланды_):
+
+Создаем файл конфигурации интерфейса (`awg0`)
+```bash
+nano /etc/amnezia/amneziawg/awg0.conf
+```
+
+Содержимое файла
+```ini
+[Interface]
+Address = 10.10.10.1/24
+ListenPort = 8443
+PrivateKey =
+SaveConfig = true
+Jc = 4
+Jmin = 8
+Jmax = 80
+S1 = 29
+S2 = 15
+H1 = 2087563914
+H2 = 188817757
+H3 = 101784570
+H4 = 432174303
+
+[Peer]
+PublicKey =
+AllowedIPs = 10.10.10.2/32
+```
+
+`ListenPort` - порт, на котором сервер будет ждать подключения, можете выбрать любой свободный.\
+`` - содержимое файла `private.key` с сервера **B**.\
+`` - содержимое файла `public.key` с сервера **A**.
+
+Открываем порт на фаерволе (если включен):
+```bash
+sudo ufw allow from to any port 8443 proto udp
+```
+
+`` - внешний IP адрес Сервера **A**.
+
+#### Конфигурация Сервера A (_РФ_):
+
+Создаем файл конфигурации интерфейса (`awg0`)
+```bash
+nano /etc/amnezia/amneziawg/awg0.conf
+```
+
+Содержимое файла
+```ini
+[Interface]
+Address = 10.10.10.2/24
+PrivateKey =
+Jc = 4
+Jmin = 8
+Jmax = 80
+S1 = 29
+S2 = 15
+H1 = 2087563914
+H2 = 188817757
+H3 = 101784570
+H4 = 432174303
+I1 =
+I2 =
+I3 =
+I4 =
+I5 =
+
+[Peer]
+PublicKey =
+Endpoint = :8443
+AllowedIPs = 10.10.10.1/32
+PersistentKeepalive = 25
+```
+
+`` - содержимое файла `private.key` с сервера **A**.\
+`` - содержимое файла `public.key` с сервера **B**.\
+`` - публичный IP адресс сервера **B**.
+
+#### Включаем туннель на обоих серверах:
+```bash
+sudo systemctl enable --now awg-quick@awg0
+```
+
+Убедитесь, что с Сервера `A` доступен Сервер `B` через туннель.
+```bash
+ping 10.10.10.1
+PING 10.10.10.1 (10.10.10.1) 56(84) bytes of data.
+64 bytes from 10.10.10.1: icmp_seq=1 ttl=64 time=35.1 ms
+64 bytes from 10.10.10.1: icmp_seq=2 ttl=64 time=35.0 ms
+64 bytes from 10.10.10.1: icmp_seq=3 ttl=64 time=35.1 ms
+^C
+
+```
+
+---
+
+## Шаг 2. Установка telemt на Сервере B (_условно Нидерланды_)
+
+Установка и настройка описаны [здесь](https://github.com/telemt/telemt/blob/main/docs/QUICK_START_GUIDE.ru.md) или [здесь](https://gitlab.com/An0nX/telemt-docker#-quick-start-docker-compose).\
+Подразумевается что telemt ожидает подключения на порту `443\tcp`.
+
+В конфиге telemt необходимо включить протокол `Proxy` и ограничить подключения к нему только через туннель.
+
+```toml
+[server]
+port = 443
+listen_addr_ipv4 = "10.10.10.1"
+proxy_protocol = true
+```
+
+А также, для правильной генерации ссылок, указать FQDN или IP адрес и порт Сервера `A`
+
+```toml
+[general.links]
+show = "*"
+public_host = ""
+public_port = 443
+```
+
+Открываем порт на фаерволе (если включен):
+```bash
+sudo ufw allow from 10.10.10.2 to any port 443 proto tcp
+```
+
+---
+
+### Шаг 3. Настройка HAProxy на Сервере A (_РФ_)
+
+Т.к. в стандартном репозитории Ubuntu версия относительно старая, имеет смысл воспользоваться официальным образом Docker.\
+[Инструкция](https://docs.docker.com/engine/install/ubuntu/) по установке Docker на Ubuntu.
+
+> [!WARNING]
+> По умолчанию у обычных пользователей нет прав на использование портов < 1024.\
+> Попытки запустить HAProxy на 443 порту могут приводить к ошибкам:
+> ```
+> [ALERT] (8) : Binding [/usr/local/etc/haproxy/haproxy.cfg:17] for frontend tcp_in_443:
+> protocol tcpv4: cannot bind socket (Permission denied) for [0.0.0.0:443].
+> ```
+> Есть два простых способа обойти это ограничение, выберите что-то одно:
+> 1. На уровне ОС изменить настройку net.ipv4.ip_unprivileged_port_start, разрешив пользователям использовать все порты:
+> ```
+> echo "net.ipv4.ip_unprivileged_port_start = 0" | sudo tee -a /etc/sysctl.conf && sudo sysctl -p
+> ```
+> или
+>
+> 2. Запустить HAProxy под root:\
+> Раскомментируйте в docker-compose.yaml параметр `user: "root"`.
+
+#### Создаем папку для HAProxy:
+```bash
+mkdir -p /opt/docker-compose/haproxy && cd $_
+```
+#### Создаем файл docker-compose.yaml
+
+`nano docker-compose.yaml`
+
+Содержимое файла
+```yaml
+services:
+ haproxy:
+ image: haproxy:latest
+ container_name: haproxy
+ restart: unless-stopped
+ # user: "root"
+ network_mode: "host"
+ volumes:
+ - ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
+ logging:
+ driver: "json-file"
+ options:
+ max-size: "1m"
+ max-file: "1"
+```
+#### Создаем файл конфига haproxy.cfg
+Принимаем подключения на порту 443\tcp и отправляем их через туннель на Сервер `B` 10.10.10.1:443
+
+`nano haproxy.cfg`
+
+Содержимое файла
+```haproxy
+global
+ log stdout format raw local0
+ maxconn 10000
+
+defaults
+ log global
+ mode tcp
+ option tcplog
+ option clitcpka
+ option srvtcpka
+ timeout connect 5s
+ timeout client 2h
+ timeout server 2h
+ timeout check 5s
+
+frontend tcp_in_443
+ bind *:443
+ maxconn 8000
+ option tcp-smart-accept
+ default_backend telemt_nodes
+
+backend telemt_nodes
+ option tcp-smart-connect
+ server server_a 10.10.10.1:443 check inter 5s rise 2 fall 3 send-proxy-v2
+
+
+```
+>[!WARNING]
+>**Файл должен заканчиваться пустой строкой, иначе HAProxy не запуститься!**
+
+#### Разрешаем порт 443\tcp в фаерволе (если включен)
+```bash
+sudo ufw allow 443/tcp
+```
+
+#### Запускаем контейнер HAProxy
+```bash
+docker compose up -d
+```
+
+Если все настроено верно, то теперь можно пробовать подключить клиентов Telegram с использованием ссылок из лога\api telemt.
diff --git a/docs/model/FakeTLS.png b/docs/model/FakeTLS.png
new file mode 100644
index 0000000..5f6782e
Binary files /dev/null and b/docs/model/FakeTLS.png differ
diff --git a/docs/model/architecture.png b/docs/model/architecture.png
new file mode 100644
index 0000000..71d4a17
Binary files /dev/null and b/docs/model/architecture.png differ
diff --git a/src/api/http_utils.rs b/src/api/http_utils.rs
index e04bd04..9dfe526 100644
--- a/src/api/http_utils.rs
+++ b/src/api/http_utils.rs
@@ -24,10 +24,7 @@ pub(super) fn success_response(
.unwrap()
}
-pub(super) fn error_response(
- request_id: u64,
- failure: ApiFailure,
-) -> hyper::Response> {
+pub(super) fn error_response(request_id: u64, failure: ApiFailure) -> hyper::Response> {
let payload = ErrorResponse {
ok: false,
error: ErrorBody {
diff --git a/src/api/mod.rs b/src/api/mod.rs
index 0e2edd4..c1e3557 100644
--- a/src/api/mod.rs
+++ b/src/api/mod.rs
@@ -1,3 +1,5 @@
+#![allow(clippy::too_many_arguments)]
+
use std::convert::Infallible;
use std::net::{IpAddr, SocketAddr};
use std::path::PathBuf;
@@ -19,8 +21,8 @@ use crate::ip_tracker::UserIpTracker;
use crate::proxy::route_mode::RouteRuntimeController;
use crate::startup::StartupTracker;
use crate::stats::Stats;
-use crate::transport::middle_proxy::MePool;
use crate::transport::UpstreamManager;
+use crate::transport::middle_proxy::MePool;
mod config_store;
mod events;
@@ -36,8 +38,8 @@ mod runtime_zero;
mod users;
use config_store::{current_revision, parse_if_match};
-use http_utils::{error_response, read_json, read_optional_json, success_response};
use events::ApiEventStore;
+use http_utils::{error_response, read_json, read_optional_json, success_response};
use model::{
ApiFailure, CreateUserRequest, HealthData, PatchUserRequest, RotateSecretRequest, SummaryData,
};
@@ -55,11 +57,11 @@ use runtime_stats::{
MinimalCacheEntry, build_dcs_data, build_me_writers_data, build_minimal_all_data,
build_upstreams_data, build_zero_all_data,
};
+use runtime_watch::spawn_runtime_watchers;
use runtime_zero::{
build_limits_effective_data, build_runtime_gates_data, build_security_posture_data,
build_system_info_data,
};
-use runtime_watch::spawn_runtime_watchers;
use users::{create_user, delete_user, patch_user, rotate_secret, users_from_config};
pub(super) struct ApiRuntimeState {
@@ -208,15 +210,15 @@ async fn handle(
));
}
- if !api_cfg.whitelist.is_empty()
- && !api_cfg
- .whitelist
- .iter()
- .any(|net| net.contains(peer.ip()))
+ if !api_cfg.whitelist.is_empty() && !api_cfg.whitelist.iter().any(|net| net.contains(peer.ip()))
{
return Ok(error_response(
request_id,
- ApiFailure::new(StatusCode::FORBIDDEN, "forbidden", "Source IP is not allowed"),
+ ApiFailure::new(
+ StatusCode::FORBIDDEN,
+ "forbidden",
+ "Source IP is not allowed",
+ ),
));
}
@@ -347,7 +349,8 @@ async fn handle(
}
("GET", "/v1/runtime/connections/summary") => {
let revision = current_revision(&shared.config_path).await?;
- let data = build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
+ let data =
+ build_runtime_connections_summary_data(shared.as_ref(), cfg.as_ref()).await;
Ok(success_response(StatusCode::OK, data, revision))
}
("GET", "/v1/runtime/events/recent") => {
@@ -389,13 +392,16 @@ async fn handle(
let (data, revision) = match result {
Ok(ok) => ok,
Err(error) => {
- shared.runtime_events.record("api.user.create.failed", error.code);
+ shared
+ .runtime_events
+ .record("api.user.create.failed", error.code);
return Err(error);
}
};
- shared
- .runtime_events
- .record("api.user.create.ok", format!("username={}", data.user.username));
+ shared.runtime_events.record(
+ "api.user.create.ok",
+ format!("username={}", data.user.username),
+ );
Ok(success_response(StatusCode::CREATED, data, revision))
}
_ => {
@@ -414,7 +420,8 @@ async fn handle(
detected_ip_v6,
)
.await;
- if let Some(user_info) = users.into_iter().find(|entry| entry.username == user)
+ if let Some(user_info) =
+ users.into_iter().find(|entry| entry.username == user)
{
return Ok(success_response(StatusCode::OK, user_info, revision));
}
@@ -435,7 +442,8 @@ async fn handle(
));
}
let expected_revision = parse_if_match(req.headers());
- let body = read_json::(req.into_body(), body_limit).await?;
+ let body =
+ read_json::(req.into_body(), body_limit).await?;
let result = patch_user(user, body, expected_revision, &shared).await;
let (data, revision) = match result {
Ok(ok) => ok,
@@ -475,10 +483,9 @@ async fn handle(
return Err(error);
}
};
- shared.runtime_events.record(
- "api.user.delete.ok",
- format!("username={}", deleted_user),
- );
+ shared
+ .runtime_events
+ .record("api.user.delete.ok", format!("username={}", deleted_user));
return Ok(success_response(StatusCode::OK, deleted_user, revision));
}
if method == Method::POST
diff --git a/src/api/model.rs b/src/api/model.rs
index 6578d35..8ae0c0b 100644
--- a/src/api/model.rs
+++ b/src/api/model.rs
@@ -1,10 +1,12 @@
use std::net::IpAddr;
+use std::sync::OnceLock;
use chrono::{DateTime, Utc};
use hyper::StatusCode;
-use rand::Rng;
use serde::{Deserialize, Serialize};
+use crate::crypto::SecureRandom;
+
const MAX_USERNAME_LEN: usize = 64;
#[derive(Debug)]
@@ -172,6 +174,24 @@ pub(super) struct ZeroMiddleProxyData {
pub(super) route_drop_queue_full_total: u64,
pub(super) route_drop_queue_full_base_total: u64,
pub(super) route_drop_queue_full_high_total: u64,
+ pub(super) d2c_batches_total: u64,
+ pub(super) d2c_batch_frames_total: u64,
+ pub(super) d2c_batch_bytes_total: u64,
+ pub(super) d2c_flush_reason_queue_drain_total: u64,
+ pub(super) d2c_flush_reason_batch_frames_total: u64,
+ pub(super) d2c_flush_reason_batch_bytes_total: u64,
+ pub(super) d2c_flush_reason_max_delay_total: u64,
+ pub(super) d2c_flush_reason_ack_immediate_total: u64,
+ pub(super) d2c_flush_reason_close_total: u64,
+ pub(super) d2c_data_frames_total: u64,
+ pub(super) d2c_ack_frames_total: u64,
+ pub(super) d2c_payload_bytes_total: u64,
+ pub(super) d2c_write_mode_coalesced_total: u64,
+ pub(super) d2c_write_mode_split_total: u64,
+ pub(super) d2c_quota_reject_pre_write_total: u64,
+ pub(super) d2c_quota_reject_post_write_total: u64,
+ pub(super) d2c_frame_buf_shrink_total: u64,
+ pub(super) d2c_frame_buf_shrink_bytes_total: u64,
pub(super) socks_kdf_strict_reject_total: u64,
pub(super) socks_kdf_compat_fallback_total: u64,
pub(super) endpoint_quarantine_total: u64,
@@ -196,8 +216,6 @@ pub(super) struct ZeroPoolData {
pub(super) pool_swap_total: u64,
pub(super) pool_drain_active: u64,
pub(super) pool_force_close_total: u64,
- pub(super) pool_drain_soft_evict_total: u64,
- pub(super) pool_drain_soft_evict_writer_total: u64,
pub(super) pool_stale_pick_total: u64,
pub(super) writer_removed_total: u64,
pub(super) writer_removed_unexpected_total: u64,
@@ -206,16 +224,6 @@ pub(super) struct ZeroPoolData {
pub(super) refill_failed_total: u64,
pub(super) writer_restored_same_endpoint_total: u64,
pub(super) writer_restored_fallback_total: u64,
- pub(super) teardown_attempt_total_normal: u64,
- pub(super) teardown_attempt_total_hard_detach: u64,
- pub(super) teardown_success_total_normal: u64,
- pub(super) teardown_success_total_hard_detach: u64,
- pub(super) teardown_timeout_total: u64,
- pub(super) teardown_escalation_total: u64,
- pub(super) teardown_noop_total: u64,
- pub(super) teardown_cleanup_side_effect_failures_total: u64,
- pub(super) teardown_duration_count_total: u64,
- pub(super) teardown_duration_sum_seconds_total: f64,
}
#[derive(Serialize, Clone)]
@@ -248,7 +256,6 @@ pub(super) struct MeWritersSummary {
pub(super) available_pct: f64,
pub(super) required_writers: usize,
pub(super) alive_writers: usize,
- pub(super) coverage_ratio: f64,
pub(super) coverage_pct: f64,
pub(super) fresh_alive_writers: usize,
pub(super) fresh_coverage_pct: f64,
@@ -297,7 +304,6 @@ pub(super) struct DcStatus {
pub(super) floor_max: usize,
pub(super) floor_capped: bool,
pub(super) alive_writers: usize,
- pub(super) coverage_ratio: f64,
pub(super) coverage_pct: f64,
pub(super) fresh_alive_writers: usize,
pub(super) fresh_coverage_pct: f64,
@@ -375,12 +381,6 @@ pub(super) struct MinimalMeRuntimeData {
pub(super) me_reconnect_backoff_cap_ms: u64,
pub(super) me_reconnect_fast_retry_count: u32,
pub(super) me_pool_drain_ttl_secs: u64,
- pub(super) me_instadrain: bool,
- pub(super) me_pool_drain_soft_evict_enabled: bool,
- pub(super) me_pool_drain_soft_evict_grace_secs: u64,
- pub(super) me_pool_drain_soft_evict_per_writer: u8,
- pub(super) me_pool_drain_soft_evict_budget_per_core: u16,
- pub(super) me_pool_drain_soft_evict_cooldown_ms: u64,
pub(super) me_pool_force_close_secs: u64,
pub(super) me_pool_min_fresh_ratio: f32,
pub(super) me_bind_stale_mode: &'static str,
@@ -502,7 +502,9 @@ pub(super) fn is_valid_username(user: &str) -> bool {
}
pub(super) fn random_user_secret() -> String {
+ static API_SECRET_RNG: OnceLock = OnceLock::new();
+ let rng = API_SECRET_RNG.get_or_init(SecureRandom::new);
let mut bytes = [0u8; 16];
- rand::rng().fill(&mut bytes);
+ rng.fill(&mut bytes);
hex::encode(bytes)
}
diff --git a/src/api/runtime_init.rs b/src/api/runtime_init.rs
index 4bd8943..b7601f5 100644
--- a/src/api/runtime_init.rs
+++ b/src/api/runtime_init.rs
@@ -167,11 +167,7 @@ async fn current_me_pool_stage_progress(shared: &ApiShared) -> Option {
let pool = shared.me_pool.read().await.clone()?;
let status = pool.api_status_snapshot().await;
let configured_dc_groups = status.configured_dc_groups;
- let covered_dc_groups = status
- .dcs
- .iter()
- .filter(|dc| dc.alive_writers > 0)
- .count();
+ let covered_dc_groups = status.dcs.iter().filter(|dc| dc.alive_writers > 0).count();
let dc_coverage = ratio_01(covered_dc_groups, configured_dc_groups);
let writer_coverage = ratio_01(status.alive_writers, status.required_writers);
diff --git a/src/api/runtime_min.rs b/src/api/runtime_min.rs
index 047fd9c..986f138 100644
--- a/src/api/runtime_min.rs
+++ b/src/api/runtime_min.rs
@@ -4,9 +4,6 @@ use std::time::{SystemTime, UNIX_EPOCH};
use serde::Serialize;
use crate::config::ProxyConfig;
-use crate::stats::{
- MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
-};
use super::ApiShared;
@@ -101,50 +98,6 @@ pub(super) struct RuntimeMeQualityCountersData {
pub(super) reconnect_success_total: u64,
}
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownAttemptData {
- pub(super) reason: &'static str,
- pub(super) mode: &'static str,
- pub(super) total: u64,
-}
-
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownSuccessData {
- pub(super) mode: &'static str,
- pub(super) total: u64,
-}
-
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownSideEffectData {
- pub(super) step: &'static str,
- pub(super) total: u64,
-}
-
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownDurationBucketData {
- pub(super) le_seconds: &'static str,
- pub(super) total: u64,
-}
-
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownDurationData {
- pub(super) mode: &'static str,
- pub(super) count: u64,
- pub(super) sum_seconds: f64,
- pub(super) buckets: Vec,
-}
-
-#[derive(Serialize)]
-pub(super) struct RuntimeMeQualityTeardownData {
- pub(super) attempts: Vec,
- pub(super) success: Vec,
- pub(super) timeout_total: u64,
- pub(super) escalation_total: u64,
- pub(super) noop_total: u64,
- pub(super) cleanup_side_effect_failures: Vec,
- pub(super) duration: Vec,
-}
-
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityRouteDropData {
pub(super) no_conn_total: u64,
@@ -179,14 +132,12 @@ pub(super) struct RuntimeMeQualityDcRttData {
pub(super) rtt_ema_ms: Option,
pub(super) alive_writers: usize,
pub(super) required_writers: usize,
- pub(super) coverage_ratio: f64,
pub(super) coverage_pct: f64,
}
#[derive(Serialize)]
pub(super) struct RuntimeMeQualityPayload {
pub(super) counters: RuntimeMeQualityCountersData,
- pub(super) teardown: RuntimeMeQualityTeardownData,
pub(super) route_drops: RuntimeMeQualityRouteDropData,
pub(super) family_states: Vec,
pub(super) drain_gate: RuntimeMeQualityDrainGateData,
@@ -457,7 +408,6 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
reconnect_attempt_total: shared.stats.get_me_reconnect_attempts(),
reconnect_success_total: shared.stats.get_me_reconnect_success(),
},
- teardown: build_runtime_me_teardown_data(shared),
route_drops: RuntimeMeQualityRouteDropData {
no_conn_total: shared.stats.get_me_route_drop_no_conn(),
channel_closed_total: shared.stats.get_me_route_drop_channel_closed(),
@@ -480,7 +430,6 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
rtt_ema_ms: dc.rtt_ms,
alive_writers: dc.alive_writers,
required_writers: dc.required_writers,
- coverage_ratio: dc.coverage_ratio,
coverage_pct: dc.coverage_pct,
})
.collect(),
@@ -488,81 +437,6 @@ pub(super) async fn build_runtime_me_quality_data(shared: &ApiShared) -> Runtime
}
}
-fn build_runtime_me_teardown_data(shared: &ApiShared) -> RuntimeMeQualityTeardownData {
- let attempts = MeWriterTeardownReason::ALL
- .iter()
- .copied()
- .flat_map(|reason| {
- MeWriterTeardownMode::ALL
- .iter()
- .copied()
- .map(move |mode| RuntimeMeQualityTeardownAttemptData {
- reason: reason.as_str(),
- mode: mode.as_str(),
- total: shared.stats.get_me_writer_teardown_attempt_total(reason, mode),
- })
- })
- .collect();
-
- let success = MeWriterTeardownMode::ALL
- .iter()
- .copied()
- .map(|mode| RuntimeMeQualityTeardownSuccessData {
- mode: mode.as_str(),
- total: shared.stats.get_me_writer_teardown_success_total(mode),
- })
- .collect();
-
- let cleanup_side_effect_failures = MeWriterCleanupSideEffectStep::ALL
- .iter()
- .copied()
- .map(|step| RuntimeMeQualityTeardownSideEffectData {
- step: step.as_str(),
- total: shared
- .stats
- .get_me_writer_cleanup_side_effect_failures_total(step),
- })
- .collect();
-
- let duration = MeWriterTeardownMode::ALL
- .iter()
- .copied()
- .map(|mode| {
- let count = shared.stats.get_me_writer_teardown_duration_count(mode);
- let mut buckets: Vec = Stats::me_writer_teardown_duration_bucket_labels()
- .iter()
- .enumerate()
- .map(|(bucket_idx, label)| RuntimeMeQualityTeardownDurationBucketData {
- le_seconds: label,
- total: shared
- .stats
- .get_me_writer_teardown_duration_bucket_total(mode, bucket_idx),
- })
- .collect();
- buckets.push(RuntimeMeQualityTeardownDurationBucketData {
- le_seconds: "+Inf",
- total: count,
- });
- RuntimeMeQualityTeardownDurationData {
- mode: mode.as_str(),
- count,
- sum_seconds: shared.stats.get_me_writer_teardown_duration_sum_seconds(mode),
- buckets,
- }
- })
- .collect();
-
- RuntimeMeQualityTeardownData {
- attempts,
- success,
- timeout_total: shared.stats.get_me_writer_teardown_timeout_total(),
- escalation_total: shared.stats.get_me_writer_teardown_escalation_total(),
- noop_total: shared.stats.get_me_writer_teardown_noop_total(),
- cleanup_side_effect_failures,
- duration,
- }
-}
-
pub(super) async fn build_runtime_upstream_quality_data(
shared: &ApiShared,
) -> RuntimeUpstreamQualityData {
diff --git a/src/api/runtime_stats.rs b/src/api/runtime_stats.rs
index 999e2cf..b66d1a5 100644
--- a/src/api/runtime_stats.rs
+++ b/src/api/runtime_stats.rs
@@ -1,9 +1,9 @@
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use crate::config::ApiConfig;
-use crate::stats::{MeWriterTeardownMode, Stats};
-use crate::transport::upstream::IpPreference;
+use crate::stats::Stats;
use crate::transport::UpstreamRouteKind;
+use crate::transport::upstream::IpPreference;
use super::ApiShared;
use super::model::{
@@ -68,6 +68,25 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
route_drop_queue_full_total: stats.get_me_route_drop_queue_full(),
route_drop_queue_full_base_total: stats.get_me_route_drop_queue_full_base(),
route_drop_queue_full_high_total: stats.get_me_route_drop_queue_full_high(),
+ d2c_batches_total: stats.get_me_d2c_batches_total(),
+ d2c_batch_frames_total: stats.get_me_d2c_batch_frames_total(),
+ d2c_batch_bytes_total: stats.get_me_d2c_batch_bytes_total(),
+ d2c_flush_reason_queue_drain_total: stats.get_me_d2c_flush_reason_queue_drain_total(),
+ d2c_flush_reason_batch_frames_total: stats.get_me_d2c_flush_reason_batch_frames_total(),
+ d2c_flush_reason_batch_bytes_total: stats.get_me_d2c_flush_reason_batch_bytes_total(),
+ d2c_flush_reason_max_delay_total: stats.get_me_d2c_flush_reason_max_delay_total(),
+ d2c_flush_reason_ack_immediate_total: stats
+ .get_me_d2c_flush_reason_ack_immediate_total(),
+ d2c_flush_reason_close_total: stats.get_me_d2c_flush_reason_close_total(),
+ d2c_data_frames_total: stats.get_me_d2c_data_frames_total(),
+ d2c_ack_frames_total: stats.get_me_d2c_ack_frames_total(),
+ d2c_payload_bytes_total: stats.get_me_d2c_payload_bytes_total(),
+ d2c_write_mode_coalesced_total: stats.get_me_d2c_write_mode_coalesced_total(),
+ d2c_write_mode_split_total: stats.get_me_d2c_write_mode_split_total(),
+ d2c_quota_reject_pre_write_total: stats.get_me_d2c_quota_reject_pre_write_total(),
+ d2c_quota_reject_post_write_total: stats.get_me_d2c_quota_reject_post_write_total(),
+ d2c_frame_buf_shrink_total: stats.get_me_d2c_frame_buf_shrink_total(),
+ d2c_frame_buf_shrink_bytes_total: stats.get_me_d2c_frame_buf_shrink_bytes_total(),
socks_kdf_strict_reject_total: stats.get_me_socks_kdf_strict_reject(),
socks_kdf_compat_fallback_total: stats.get_me_socks_kdf_compat_fallback(),
endpoint_quarantine_total: stats.get_me_endpoint_quarantine_total(),
@@ -96,8 +115,6 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
pool_swap_total: stats.get_pool_swap_total(),
pool_drain_active: stats.get_pool_drain_active(),
pool_force_close_total: stats.get_pool_force_close_total(),
- pool_drain_soft_evict_total: stats.get_pool_drain_soft_evict_total(),
- pool_drain_soft_evict_writer_total: stats.get_pool_drain_soft_evict_writer_total(),
pool_stale_pick_total: stats.get_pool_stale_pick_total(),
writer_removed_total: stats.get_me_writer_removed_total(),
writer_removed_unexpected_total: stats.get_me_writer_removed_unexpected_total(),
@@ -106,29 +123,6 @@ pub(super) fn build_zero_all_data(stats: &Stats, configured_users: usize) -> Zer
refill_failed_total: stats.get_me_refill_failed_total(),
writer_restored_same_endpoint_total: stats.get_me_writer_restored_same_endpoint_total(),
writer_restored_fallback_total: stats.get_me_writer_restored_fallback_total(),
- teardown_attempt_total_normal: stats
- .get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::Normal),
- teardown_attempt_total_hard_detach: stats
- .get_me_writer_teardown_attempt_total_by_mode(MeWriterTeardownMode::HardDetach),
- teardown_success_total_normal: stats
- .get_me_writer_teardown_success_total(MeWriterTeardownMode::Normal),
- teardown_success_total_hard_detach: stats
- .get_me_writer_teardown_success_total(MeWriterTeardownMode::HardDetach),
- teardown_timeout_total: stats.get_me_writer_teardown_timeout_total(),
- teardown_escalation_total: stats.get_me_writer_teardown_escalation_total(),
- teardown_noop_total: stats.get_me_writer_teardown_noop_total(),
- teardown_cleanup_side_effect_failures_total: stats
- .get_me_writer_cleanup_side_effect_failures_total_all(),
- teardown_duration_count_total: stats
- .get_me_writer_teardown_duration_count(MeWriterTeardownMode::Normal)
- .saturating_add(
- stats.get_me_writer_teardown_duration_count(MeWriterTeardownMode::HardDetach),
- ),
- teardown_duration_sum_seconds_total: stats
- .get_me_writer_teardown_duration_sum_seconds(MeWriterTeardownMode::Normal)
- + stats.get_me_writer_teardown_duration_sum_seconds(
- MeWriterTeardownMode::HardDetach,
- ),
},
desync: ZeroDesyncData {
secure_padding_invalid_total: stats.get_secure_padding_invalid(),
@@ -340,7 +334,6 @@ async fn get_minimal_payload_cached(
available_pct: status.available_pct,
required_writers: status.required_writers,
alive_writers: status.alive_writers,
- coverage_ratio: status.coverage_ratio,
coverage_pct: status.coverage_pct,
fresh_alive_writers: status.fresh_alive_writers,
fresh_coverage_pct: status.fresh_coverage_pct,
@@ -398,7 +391,6 @@ async fn get_minimal_payload_cached(
floor_max: entry.floor_max,
floor_capped: entry.floor_capped,
alive_writers: entry.alive_writers,
- coverage_ratio: entry.coverage_ratio,
coverage_pct: entry.coverage_pct,
fresh_alive_writers: entry.fresh_alive_writers,
fresh_coverage_pct: entry.fresh_coverage_pct,
@@ -452,12 +444,6 @@ async fn get_minimal_payload_cached(
me_reconnect_backoff_cap_ms: runtime.me_reconnect_backoff_cap_ms,
me_reconnect_fast_retry_count: runtime.me_reconnect_fast_retry_count,
me_pool_drain_ttl_secs: runtime.me_pool_drain_ttl_secs,
- me_instadrain: runtime.me_instadrain,
- me_pool_drain_soft_evict_enabled: runtime.me_pool_drain_soft_evict_enabled,
- me_pool_drain_soft_evict_grace_secs: runtime.me_pool_drain_soft_evict_grace_secs,
- me_pool_drain_soft_evict_per_writer: runtime.me_pool_drain_soft_evict_per_writer,
- me_pool_drain_soft_evict_budget_per_core: runtime.me_pool_drain_soft_evict_budget_per_core,
- me_pool_drain_soft_evict_cooldown_ms: runtime.me_pool_drain_soft_evict_cooldown_ms,
me_pool_force_close_secs: runtime.me_pool_force_close_secs,
me_pool_min_fresh_ratio: runtime.me_pool_min_fresh_ratio,
me_bind_stale_mode: runtime.me_bind_stale_mode,
@@ -526,7 +512,6 @@ fn disabled_me_writers(now_epoch_secs: u64, reason: &'static str) -> MeWritersDa
available_pct: 0.0,
required_writers: 0,
alive_writers: 0,
- coverage_ratio: 0.0,
coverage_pct: 0.0,
fresh_alive_writers: 0,
fresh_coverage_pct: 0.0,
diff --git a/src/api/runtime_zero.rs b/src/api/runtime_zero.rs
index ba89302..a6eb163 100644
--- a/src/api/runtime_zero.rs
+++ b/src/api/runtime_zero.rs
@@ -128,7 +128,8 @@ pub(super) fn build_system_info_data(
.runtime_state
.last_config_reload_epoch_secs
.load(Ordering::Relaxed);
- let last_config_reload_epoch_secs = (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
+ let last_config_reload_epoch_secs =
+ (last_reload_epoch_secs > 0).then_some(last_reload_epoch_secs);
let git_commit = option_env!("TELEMT_GIT_COMMIT")
.or(option_env!("VERGEN_GIT_SHA"))
@@ -153,7 +154,10 @@ pub(super) fn build_system_info_data(
uptime_seconds: shared.stats.uptime_secs(),
config_path: shared.config_path.display().to_string(),
config_hash: revision.to_string(),
- config_reload_count: shared.runtime_state.config_reload_count.load(Ordering::Relaxed),
+ config_reload_count: shared
+ .runtime_state
+ .config_reload_count
+ .load(Ordering::Relaxed),
last_config_reload_epoch_secs,
}
}
@@ -233,9 +237,7 @@ pub(super) fn build_limits_effective_data(cfg: &ProxyConfig) -> EffectiveLimitsD
adaptive_floor_writers_per_core_total: cfg
.general
.me_adaptive_floor_writers_per_core_total,
- adaptive_floor_cpu_cores_override: cfg
- .general
- .me_adaptive_floor_cpu_cores_override,
+ adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
adaptive_floor_max_extra_writers_single_per_core: cfg
.general
.me_adaptive_floor_max_extra_writers_single_per_core,
diff --git a/src/api/users.rs b/src/api/users.rs
index f339806..2ee8b98 100644
--- a/src/api/users.rs
+++ b/src/api/users.rs
@@ -46,7 +46,9 @@ pub(super) async fn create_user(
None => random_user_secret(),
};
- if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
+ if let Some(ad_tag) = body.user_ad_tag.as_ref()
+ && !is_valid_ad_tag(ad_tag)
+ {
return Err(ApiFailure::bad_request(
"user_ad_tag must be exactly 32 hex characters",
));
@@ -65,12 +67,18 @@ pub(super) async fn create_user(
));
}
- cfg.access.users.insert(body.username.clone(), secret.clone());
+ cfg.access
+ .users
+ .insert(body.username.clone(), secret.clone());
if let Some(ad_tag) = body.user_ad_tag {
- cfg.access.user_ad_tags.insert(body.username.clone(), ad_tag);
+ cfg.access
+ .user_ad_tags
+ .insert(body.username.clone(), ad_tag);
}
if let Some(limit) = body.max_tcp_conns {
- cfg.access.user_max_tcp_conns.insert(body.username.clone(), limit);
+ cfg.access
+ .user_max_tcp_conns
+ .insert(body.username.clone(), limit);
}
if let Some(expiration) = expiration {
cfg.access
@@ -78,7 +86,9 @@ pub(super) async fn create_user(
.insert(body.username.clone(), expiration);
}
if let Some(quota) = body.data_quota_bytes {
- cfg.access.user_data_quota.insert(body.username.clone(), quota);
+ cfg.access
+ .user_data_quota
+ .insert(body.username.clone(), quota);
}
let updated_limit = body.max_unique_ips;
@@ -108,11 +118,15 @@ pub(super) async fn create_user(
touched_sections.push(AccessSection::UserMaxUniqueIps);
}
- let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
+ let revision =
+ save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
drop(_guard);
if let Some(limit) = updated_limit {
- shared.ip_tracker.set_user_limit(&body.username, limit).await;
+ shared
+ .ip_tracker
+ .set_user_limit(&body.username, limit)
+ .await;
}
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
@@ -140,12 +154,7 @@ pub(super) async fn create_user(
recent_unique_ips: 0,
recent_unique_ips_list: Vec::new(),
total_octets: 0,
- links: build_user_links(
- &cfg,
- &secret,
- detected_ip_v4,
- detected_ip_v6,
- ),
+ links: build_user_links(&cfg, &secret, detected_ip_v4, detected_ip_v6),
});
Ok((CreateUserResponse { user, secret }, revision))
@@ -157,12 +166,16 @@ pub(super) async fn patch_user(
expected_revision: Option,
shared: &ApiShared,
) -> Result<(UserInfo, String), ApiFailure> {
- if let Some(secret) = body.secret.as_ref() && !is_valid_user_secret(secret) {
+ if let Some(secret) = body.secret.as_ref()
+ && !is_valid_user_secret(secret)
+ {
return Err(ApiFailure::bad_request(
"secret must be exactly 32 hex characters",
));
}
- if let Some(ad_tag) = body.user_ad_tag.as_ref() && !is_valid_ad_tag(ad_tag) {
+ if let Some(ad_tag) = body.user_ad_tag.as_ref()
+ && !is_valid_ad_tag(ad_tag)
+ {
return Err(ApiFailure::bad_request(
"user_ad_tag must be exactly 32 hex characters",
));
@@ -187,10 +200,14 @@ pub(super) async fn patch_user(
cfg.access.user_ad_tags.insert(user.to_string(), ad_tag);
}
if let Some(limit) = body.max_tcp_conns {
- cfg.access.user_max_tcp_conns.insert(user.to_string(), limit);
+ cfg.access
+ .user_max_tcp_conns
+ .insert(user.to_string(), limit);
}
if let Some(expiration) = expiration {
- cfg.access.user_expirations.insert(user.to_string(), expiration);
+ cfg.access
+ .user_expirations
+ .insert(user.to_string(), expiration);
}
if let Some(quota) = body.data_quota_bytes {
cfg.access.user_data_quota.insert(user.to_string(), quota);
@@ -198,7 +215,9 @@ pub(super) async fn patch_user(
let mut updated_limit = None;
if let Some(limit) = body.max_unique_ips {
- cfg.access.user_max_unique_ips.insert(user.to_string(), limit);
+ cfg.access
+ .user_max_unique_ips
+ .insert(user.to_string(), limit);
updated_limit = Some(limit);
}
@@ -263,7 +282,8 @@ pub(super) async fn rotate_secret(
AccessSection::UserDataQuota,
AccessSection::UserMaxUniqueIps,
];
- let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
+ let revision =
+ save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
drop(_guard);
let (detected_ip_v4, detected_ip_v6) = shared.detected_link_ips();
@@ -330,7 +350,8 @@ pub(super) async fn delete_user(
AccessSection::UserDataQuota,
AccessSection::UserMaxUniqueIps,
];
- let revision = save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
+ let revision =
+ save_access_sections_to_disk(&shared.config_path, &cfg, &touched_sections).await?;
drop(_guard);
shared.ip_tracker.remove_user_limit(user).await;
shared.ip_tracker.clear_user_ips(user).await;
@@ -365,12 +386,7 @@ pub(super) async fn users_from_config(
.users
.get(&username)
.map(|secret| {
- build_user_links(
- cfg,
- secret,
- startup_detected_ip_v4,
- startup_detected_ip_v6,
- )
+ build_user_links(cfg, secret, startup_detected_ip_v4, startup_detected_ip_v6)
})
.unwrap_or(UserLinks {
classic: Vec::new(),
@@ -392,10 +408,8 @@ pub(super) async fn users_from_config(
.get(&username)
.copied()
.filter(|limit| *limit > 0)
- .or(
- (cfg.access.user_max_unique_ips_global_each > 0)
- .then_some(cfg.access.user_max_unique_ips_global_each),
- ),
+ .or((cfg.access.user_max_unique_ips_global_each > 0)
+ .then_some(cfg.access.user_max_unique_ips_global_each)),
current_connections: stats.get_user_curr_connects(&username),
active_unique_ips: active_ip_list.len(),
active_unique_ips_list: active_ip_list,
@@ -481,11 +495,11 @@ fn resolve_link_hosts(
push_unique_host(&mut hosts, host);
continue;
}
- if let Some(ip) = listener.announce_ip {
- if !ip.is_unspecified() {
- push_unique_host(&mut hosts, &ip.to_string());
- continue;
- }
+ if let Some(ip) = listener.announce_ip
+ && !ip.is_unspecified()
+ {
+ push_unique_host(&mut hosts, &ip.to_string());
+ continue;
}
if listener.ip.is_unspecified() {
let detected_ip = if listener.ip.is_ipv4() {
diff --git a/src/cli.rs b/src/cli.rs
index 87dcfb5..6dc0e2a 100644
--- a/src/cli.rs
+++ b/src/cli.rs
@@ -1,9 +1,9 @@
//! CLI commands: --init (fire-and-forget setup)
+use rand::RngExt;
use std::fs;
use std::path::{Path, PathBuf};
use std::process::Command;
-use rand::Rng;
/// Options for the init command
pub struct InitOptions {
@@ -35,10 +35,10 @@ pub fn parse_init_args(args: &[String]) -> Option {
if !args.iter().any(|a| a == "--init") {
return None;
}
-
+
let mut opts = InitOptions::default();
let mut i = 0;
-
+
while i < args.len() {
match args[i].as_str() {
"--port" => {
@@ -78,7 +78,7 @@ pub fn parse_init_args(args: &[String]) -> Option {
}
i += 1;
}
-
+
Some(opts)
}
@@ -86,7 +86,7 @@ pub fn parse_init_args(args: &[String]) -> Option {
pub fn run_init(opts: InitOptions) -> Result<(), Box> {
eprintln!("[telemt] Fire-and-forget setup");
eprintln!();
-
+
// 1. Generate or validate secret
let secret = match opts.secret {
Some(s) => {
@@ -98,28 +98,28 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box> {
}
None => generate_secret(),
};
-
+
eprintln!("[+] Secret: {}", secret);
eprintln!("[+] User: {}", opts.username);
eprintln!("[+] Port: {}", opts.port);
eprintln!("[+] Domain: {}", opts.domain);
-
+
// 2. Create config directory
fs::create_dir_all(&opts.config_dir)?;
let config_path = opts.config_dir.join("config.toml");
-
+
// 3. Write config
let config_content = generate_config(&opts.username, &secret, opts.port, &opts.domain);
fs::write(&config_path, &config_content)?;
eprintln!("[+] Config written to {}", config_path.display());
-
+
// 4. Write systemd unit
- let exe_path = std::env::current_exe()
- .unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
-
+ let exe_path =
+ std::env::current_exe().unwrap_or_else(|_| PathBuf::from("/usr/local/bin/telemt"));
+
let unit_path = Path::new("/etc/systemd/system/telemt.service");
let unit_content = generate_systemd_unit(&exe_path, &config_path);
-
+
match fs::write(unit_path, &unit_content) {
Ok(()) => {
eprintln!("[+] Systemd unit written to {}", unit_path.display());
@@ -128,31 +128,31 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box> {
eprintln!("[!] Cannot write systemd unit (run as root?): {}", e);
eprintln!("[!] Manual unit file content:");
eprintln!("{}", unit_content);
-
+
// Still print links and config
print_links(&opts.username, &secret, opts.port, &opts.domain);
return Ok(());
}
}
-
+
// 5. Reload systemd
run_cmd("systemctl", &["daemon-reload"]);
-
+
// 6. Enable service
run_cmd("systemctl", &["enable", "telemt.service"]);
eprintln!("[+] Service enabled");
-
+
// 7. Start service (unless --no-start)
if !opts.no_start {
run_cmd("systemctl", &["start", "telemt.service"]);
eprintln!("[+] Service started");
-
+
// Brief delay then check status
std::thread::sleep(std::time::Duration::from_secs(1));
let status = Command::new("systemctl")
.args(["is-active", "telemt.service"])
.output();
-
+
match status {
Ok(out) if out.status.success() => {
eprintln!("[+] Service is running");
@@ -166,12 +166,12 @@ pub fn run_init(opts: InitOptions) -> Result<(), Box> {
eprintln!("[+] Service not started (--no-start)");
eprintln!("[+] Start manually: systemctl start telemt.service");
}
-
+
eprintln!();
-
+
// 8. Print links
print_links(&opts.username, &secret, opts.port, &opts.domain);
-
+
Ok(())
}
@@ -183,7 +183,7 @@ fn generate_secret() -> String {
fn generate_config(username: &str, secret: &str, port: u16, domain: &str) -> String {
format!(
-r#"# Telemt MTProxy — auto-generated config
+ r#"# Telemt MTProxy — auto-generated config
# Re-run `telemt --init` to regenerate
show_link = ["{username}"]
@@ -246,7 +246,7 @@ tls_full_cert_ttl_secs = 90
[access]
replay_check_len = 65536
-replay_window_secs = 1800
+replay_window_secs = 120
ignore_time_skew = false
[access.users]
@@ -266,7 +266,7 @@ weight = 10
fn generate_systemd_unit(exe_path: &Path, config_path: &Path) -> String {
format!(
-r#"[Unit]
+ r#"[Unit]
Description=Telemt MTProxy
Documentation=https://github.com/telemt/telemt
After=network-online.target
@@ -309,11 +309,13 @@ fn run_cmd(cmd: &str, args: &[&str]) {
fn print_links(username: &str, secret: &str, port: u16, domain: &str) {
let domain_hex = hex::encode(domain);
-
+
println!("=== Proxy Links ===");
println!("[{}]", username);
- println!(" EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
- port, secret, domain_hex);
+ println!(
+ " EE-TLS: tg://proxy?server=YOUR_SERVER_IP&port={}&secret=ee{}{}",
+ port, secret, domain_hex
+ );
println!();
println!("Replace YOUR_SERVER_IP with your server's public IP.");
println!("The proxy will auto-detect and display the correct link on startup.");
diff --git a/src/config/defaults.rs b/src/config/defaults.rs
index be540b0..66ffeda 100644
--- a/src/config/defaults.rs
+++ b/src/config/defaults.rs
@@ -1,6 +1,6 @@
-use std::collections::HashMap;
use ipnetwork::IpNetwork;
use serde::Deserialize;
+use std::collections::HashMap;
// Helper defaults kept private to the config module.
const DEFAULT_NETWORK_IPV6: Option = Some(false);
@@ -29,6 +29,8 @@ const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_FRAMES: usize = 32;
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_BYTES: usize = 128 * 1024;
const DEFAULT_ME_D2C_FLUSH_BATCH_MAX_DELAY_US: u64 = 500;
const DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE: bool = true;
+const DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES: u64 = 64 * 1024;
+const DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES: usize = 256 * 1024;
const DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES: usize = 64 * 1024;
const DEFAULT_DIRECT_RELAY_COPY_BUF_S2C_BYTES: usize = 256 * 1024;
const DEFAULT_ME_WRITER_PICK_SAMPLE_SIZE: u8 = 3;
@@ -86,13 +88,31 @@ pub(crate) fn default_replay_check_len() -> usize {
}
pub(crate) fn default_replay_window_secs() -> u64 {
- 1800
+ // Keep replay cache TTL tight by default to reduce replay surface.
+ // Deployments with higher RTT or longer reconnect jitter can override this in config.
+ 120
}
pub(crate) fn default_handshake_timeout() -> u64 {
30
}
+pub(crate) fn default_relay_idle_policy_v2_enabled() -> bool {
+ true
+}
+
+pub(crate) fn default_relay_client_idle_soft_secs() -> u64 {
+ 120
+}
+
+pub(crate) fn default_relay_client_idle_hard_secs() -> u64 {
+ 360
+}
+
+pub(crate) fn default_relay_idle_grace_after_downstream_activity_secs() -> u64 {
+ 30
+}
+
pub(crate) fn default_connect_timeout() -> u64 {
10
}
@@ -125,10 +145,7 @@ pub(crate) fn default_weight() -> u16 {
}
pub(crate) fn default_metrics_whitelist() -> Vec {
- vec![
- "127.0.0.1/32".parse().unwrap(),
- "::1/128".parse().unwrap(),
- ]
+ vec!["127.0.0.1/32".parse().unwrap(), "::1/128".parse().unwrap()]
}
pub(crate) fn default_api_listen() -> String {
@@ -151,10 +168,18 @@ pub(crate) fn default_api_minimal_runtime_cache_ttl_ms() -> u64 {
1000
}
-pub(crate) fn default_api_runtime_edge_enabled() -> bool { false }
-pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 { 1000 }
-pub(crate) fn default_api_runtime_edge_top_n() -> usize { 10 }
-pub(crate) fn default_api_runtime_edge_events_capacity() -> usize { 256 }
+pub(crate) fn default_api_runtime_edge_enabled() -> bool {
+ false
+}
+pub(crate) fn default_api_runtime_edge_cache_ttl_ms() -> u64 {
+ 1000
+}
+pub(crate) fn default_api_runtime_edge_top_n() -> usize {
+ 10
+}
+pub(crate) fn default_api_runtime_edge_events_capacity() -> usize {
+ 256
+}
pub(crate) fn default_proxy_protocol_header_timeout_ms() -> u64 {
500
@@ -364,6 +389,14 @@ pub(crate) fn default_me_d2c_ack_flush_immediate() -> bool {
DEFAULT_ME_D2C_ACK_FLUSH_IMMEDIATE
}
+pub(crate) fn default_me_quota_soft_overshoot_bytes() -> u64 {
+ DEFAULT_ME_QUOTA_SOFT_OVERSHOOT_BYTES
+}
+
+pub(crate) fn default_me_d2c_frame_buf_shrink_threshold_bytes() -> usize {
+ DEFAULT_ME_D2C_FRAME_BUF_SHRINK_THRESHOLD_BYTES
+}
+
pub(crate) fn default_direct_relay_copy_buf_c2s_bytes() -> usize {
DEFAULT_DIRECT_RELAY_COPY_BUF_C2S_BYTES
}
@@ -485,17 +518,53 @@ pub(crate) fn default_tls_full_cert_ttl_secs() -> u64 {
}
pub(crate) fn default_server_hello_delay_min_ms() -> u64 {
- 0
+ 8
}
pub(crate) fn default_server_hello_delay_max_ms() -> u64 {
- 0
+ 24
}
pub(crate) fn default_alpn_enforce() -> bool {
true
}
+pub(crate) fn default_mask_shape_hardening() -> bool {
+ true
+}
+
+pub(crate) fn default_mask_shape_hardening_aggressive_mode() -> bool {
+ false
+}
+
+pub(crate) fn default_mask_shape_bucket_floor_bytes() -> usize {
+ 512
+}
+
+pub(crate) fn default_mask_shape_bucket_cap_bytes() -> usize {
+ 4096
+}
+
+pub(crate) fn default_mask_shape_above_cap_blur() -> bool {
+ false
+}
+
+pub(crate) fn default_mask_shape_above_cap_blur_max_bytes() -> usize {
+ 512
+}
+
+pub(crate) fn default_mask_timing_normalization_enabled() -> bool {
+ false
+}
+
+pub(crate) fn default_mask_timing_normalization_floor_ms() -> u64 {
+ 0
+}
+
+pub(crate) fn default_mask_timing_normalization_ceiling_ms() -> u64 {
+ 0
+}
+
pub(crate) fn default_stun_servers() -> Vec {
vec![
"stun.l.google.com:5349".to_string(),
diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs
index 4cf7676..e580b7f 100644
--- a/src/config/hot_reload.rs
+++ b/src/config/hot_reload.rs
@@ -31,38 +31,30 @@ use notify::{EventKind, RecursiveMode, Watcher, recommended_watcher};
use tokio::sync::{mpsc, watch};
use tracing::{error, info, warn};
-use crate::config::{
- LogLevel, MeBindStaleMode, MeFloorMode, MeSocksKdfPolicy, MeTelemetryLevel,
- MeWriterPickMode,
-};
use super::load::{LoadedConfig, ProxyConfig};
+use crate::config::{
+ LogLevel, MeBindStaleMode, MeFloorMode, MeSocksKdfPolicy, MeTelemetryLevel, MeWriterPickMode,
+};
-const HOT_RELOAD_STABLE_SNAPSHOTS: u8 = 2;
const HOT_RELOAD_DEBOUNCE: Duration = Duration::from_millis(50);
-const HOT_RELOAD_STABLE_RECHECK: Duration = Duration::from_millis(75);
// ── Hot fields ────────────────────────────────────────────────────────────────
/// Fields that are safe to swap without restarting listeners.
#[derive(Debug, Clone, PartialEq)]
pub struct HotFields {
- pub log_level: LogLevel,
- pub ad_tag: Option,
- pub dns_overrides: Vec,
- pub desync_all_full: bool,
- pub update_every_secs: u64,
- pub me_reinit_every_secs: u64,
- pub me_reinit_singleflight: bool,
+ pub log_level: LogLevel,
+ pub ad_tag: Option,
+ pub dns_overrides: Vec,
+ pub desync_all_full: bool,
+ pub update_every_secs: u64,
+ pub me_reinit_every_secs: u64,
+ pub me_reinit_singleflight: bool,
pub me_reinit_coalesce_window_ms: u64,
- pub hardswap: bool,
- pub me_pool_drain_ttl_secs: u64,
+ pub hardswap: bool,
+ pub me_pool_drain_ttl_secs: u64,
pub me_instadrain: bool,
pub me_pool_drain_threshold: u64,
- pub me_pool_drain_soft_evict_enabled: bool,
- pub me_pool_drain_soft_evict_grace_secs: u64,
- pub me_pool_drain_soft_evict_per_writer: u8,
- pub me_pool_drain_soft_evict_budget_per_core: u16,
- pub me_pool_drain_soft_evict_cooldown_ms: u64,
pub me_pool_min_fresh_ratio: f32,
pub me_reinit_drain_timeout_secs: u64,
pub me_hardswap_warmup_delay_min_ms: u64,
@@ -114,18 +106,20 @@ pub struct HotFields {
pub me_d2c_flush_batch_max_bytes: usize,
pub me_d2c_flush_batch_max_delay_us: u64,
pub me_d2c_ack_flush_immediate: bool,
+ pub me_quota_soft_overshoot_bytes: u64,
+ pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
pub direct_relay_copy_buf_c2s_bytes: usize,
pub direct_relay_copy_buf_s2c_bytes: usize,
pub me_health_interval_ms_unhealthy: u64,
pub me_health_interval_ms_healthy: u64,
pub me_admission_poll_ms: u64,
pub me_warn_rate_limit_ms: u64,
- pub users: std::collections::HashMap,
- pub user_ad_tags: std::collections::HashMap,
- pub user_max_tcp_conns: std::collections::HashMap,
- pub user_expirations: std::collections::HashMap>,
- pub user_data_quota: std::collections::HashMap,
- pub user_max_unique_ips: std::collections::HashMap,
+ pub users: std::collections::HashMap,
+ pub user_ad_tags: std::collections::HashMap,
+ pub user_max_tcp_conns: std::collections::HashMap,
+ pub user_expirations: std::collections::HashMap>,
+ pub user_data_quota: std::collections::HashMap,
+ pub user_max_unique_ips: std::collections::HashMap,
pub user_max_unique_ips_global_each: usize,
pub user_max_unique_ips_mode: crate::config::UserMaxUniqueIpsMode,
pub user_max_unique_ips_window_secs: u64,
@@ -134,27 +128,18 @@ pub struct HotFields {
impl HotFields {
pub fn from_config(cfg: &ProxyConfig) -> Self {
Self {
- log_level: cfg.general.log_level.clone(),
- ad_tag: cfg.general.ad_tag.clone(),
- dns_overrides: cfg.network.dns_overrides.clone(),
- desync_all_full: cfg.general.desync_all_full,
- update_every_secs: cfg.general.effective_update_every_secs(),
- me_reinit_every_secs: cfg.general.me_reinit_every_secs,
- me_reinit_singleflight: cfg.general.me_reinit_singleflight,
+ log_level: cfg.general.log_level.clone(),
+ ad_tag: cfg.general.ad_tag.clone(),
+ dns_overrides: cfg.network.dns_overrides.clone(),
+ desync_all_full: cfg.general.desync_all_full,
+ update_every_secs: cfg.general.effective_update_every_secs(),
+ me_reinit_every_secs: cfg.general.me_reinit_every_secs,
+ me_reinit_singleflight: cfg.general.me_reinit_singleflight,
me_reinit_coalesce_window_ms: cfg.general.me_reinit_coalesce_window_ms,
- hardswap: cfg.general.hardswap,
- me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
+ hardswap: cfg.general.hardswap,
+ me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs,
me_instadrain: cfg.general.me_instadrain,
me_pool_drain_threshold: cfg.general.me_pool_drain_threshold,
- me_pool_drain_soft_evict_enabled: cfg.general.me_pool_drain_soft_evict_enabled,
- me_pool_drain_soft_evict_grace_secs: cfg.general.me_pool_drain_soft_evict_grace_secs,
- me_pool_drain_soft_evict_per_writer: cfg.general.me_pool_drain_soft_evict_per_writer,
- me_pool_drain_soft_evict_budget_per_core: cfg
- .general
- .me_pool_drain_soft_evict_budget_per_core,
- me_pool_drain_soft_evict_cooldown_ms: cfg
- .general
- .me_pool_drain_soft_evict_cooldown_ms,
me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio,
me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs,
me_hardswap_warmup_delay_min_ms: cfg.general.me_hardswap_warmup_delay_min_ms,
@@ -205,15 +190,11 @@ impl HotFields {
me_adaptive_floor_min_writers_multi_endpoint: cfg
.general
.me_adaptive_floor_min_writers_multi_endpoint,
- me_adaptive_floor_recover_grace_secs: cfg
- .general
- .me_adaptive_floor_recover_grace_secs,
+ me_adaptive_floor_recover_grace_secs: cfg.general.me_adaptive_floor_recover_grace_secs,
me_adaptive_floor_writers_per_core_total: cfg
.general
.me_adaptive_floor_writers_per_core_total,
- me_adaptive_floor_cpu_cores_override: cfg
- .general
- .me_adaptive_floor_cpu_cores_override,
+ me_adaptive_floor_cpu_cores_override: cfg.general.me_adaptive_floor_cpu_cores_override,
me_adaptive_floor_max_extra_writers_single_per_core: cfg
.general
.me_adaptive_floor_max_extra_writers_single_per_core,
@@ -232,26 +213,34 @@ impl HotFields {
me_adaptive_floor_max_warm_writers_global: cfg
.general
.me_adaptive_floor_max_warm_writers_global,
- me_route_backpressure_base_timeout_ms: cfg.general.me_route_backpressure_base_timeout_ms,
- me_route_backpressure_high_timeout_ms: cfg.general.me_route_backpressure_high_timeout_ms,
- me_route_backpressure_high_watermark_pct: cfg.general.me_route_backpressure_high_watermark_pct,
+ me_route_backpressure_base_timeout_ms: cfg
+ .general
+ .me_route_backpressure_base_timeout_ms,
+ me_route_backpressure_high_timeout_ms: cfg
+ .general
+ .me_route_backpressure_high_timeout_ms,
+ me_route_backpressure_high_watermark_pct: cfg
+ .general
+ .me_route_backpressure_high_watermark_pct,
me_reader_route_data_wait_ms: cfg.general.me_reader_route_data_wait_ms,
me_d2c_flush_batch_max_frames: cfg.general.me_d2c_flush_batch_max_frames,
me_d2c_flush_batch_max_bytes: cfg.general.me_d2c_flush_batch_max_bytes,
me_d2c_flush_batch_max_delay_us: cfg.general.me_d2c_flush_batch_max_delay_us,
me_d2c_ack_flush_immediate: cfg.general.me_d2c_ack_flush_immediate,
+ me_quota_soft_overshoot_bytes: cfg.general.me_quota_soft_overshoot_bytes,
+ me_d2c_frame_buf_shrink_threshold_bytes: cfg.general.me_d2c_frame_buf_shrink_threshold_bytes,
direct_relay_copy_buf_c2s_bytes: cfg.general.direct_relay_copy_buf_c2s_bytes,
direct_relay_copy_buf_s2c_bytes: cfg.general.direct_relay_copy_buf_s2c_bytes,
me_health_interval_ms_unhealthy: cfg.general.me_health_interval_ms_unhealthy,
me_health_interval_ms_healthy: cfg.general.me_health_interval_ms_healthy,
me_admission_poll_ms: cfg.general.me_admission_poll_ms,
me_warn_rate_limit_ms: cfg.general.me_warn_rate_limit_ms,
- users: cfg.access.users.clone(),
- user_ad_tags: cfg.access.user_ad_tags.clone(),
- user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
- user_expirations: cfg.access.user_expirations.clone(),
- user_data_quota: cfg.access.user_data_quota.clone(),
- user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
+ users: cfg.access.users.clone(),
+ user_ad_tags: cfg.access.user_ad_tags.clone(),
+ user_max_tcp_conns: cfg.access.user_max_tcp_conns.clone(),
+ user_expirations: cfg.access.user_expirations.clone(),
+ user_data_quota: cfg.access.user_data_quota.clone(),
+ user_max_unique_ips: cfg.access.user_max_unique_ips.clone(),
user_max_unique_ips_global_each: cfg.access.user_max_unique_ips_global_each,
user_max_unique_ips_mode: cfg.access.user_max_unique_ips_mode,
user_max_unique_ips_window_secs: cfg.access.user_max_unique_ips_window_secs,
@@ -346,16 +335,12 @@ impl WatchManifest {
#[derive(Debug, Default)]
struct ReloadState {
applied_snapshot_hash: Option,
- candidate_snapshot_hash: Option,
- candidate_hits: u8,
}
impl ReloadState {
fn new(applied_snapshot_hash: Option) -> Self {
Self {
applied_snapshot_hash,
- candidate_snapshot_hash: None,
- candidate_hits: 0,
}
}
@@ -363,32 +348,8 @@ impl ReloadState {
self.applied_snapshot_hash == Some(hash)
}
- fn observe_candidate(&mut self, hash: u64) -> u8 {
- if self.candidate_snapshot_hash == Some(hash) {
- self.candidate_hits = self.candidate_hits.saturating_add(1);
- } else {
- self.candidate_snapshot_hash = Some(hash);
- self.candidate_hits = 1;
- }
- self.candidate_hits
- }
-
- fn reset_candidate(&mut self) {
- self.candidate_snapshot_hash = None;
- self.candidate_hits = 0;
- }
-
fn mark_applied(&mut self, hash: u64) {
self.applied_snapshot_hash = Some(hash);
- self.reset_candidate();
- }
-
- fn pending_candidate(&self) -> Option<(u64, u8)> {
- let hash = self.candidate_snapshot_hash?;
- if self.candidate_hits < HOT_RELOAD_STABLE_SNAPSHOTS {
- return Some((hash, self.candidate_hits));
- }
- None
}
}
@@ -481,15 +442,6 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
cfg.general.me_pool_drain_ttl_secs = new.general.me_pool_drain_ttl_secs;
cfg.general.me_instadrain = new.general.me_instadrain;
cfg.general.me_pool_drain_threshold = new.general.me_pool_drain_threshold;
- cfg.general.me_pool_drain_soft_evict_enabled = new.general.me_pool_drain_soft_evict_enabled;
- cfg.general.me_pool_drain_soft_evict_grace_secs =
- new.general.me_pool_drain_soft_evict_grace_secs;
- cfg.general.me_pool_drain_soft_evict_per_writer =
- new.general.me_pool_drain_soft_evict_per_writer;
- cfg.general.me_pool_drain_soft_evict_budget_per_core =
- new.general.me_pool_drain_soft_evict_budget_per_core;
- cfg.general.me_pool_drain_soft_evict_cooldown_ms =
- new.general.me_pool_drain_soft_evict_cooldown_ms;
cfg.general.me_pool_min_fresh_ratio = new.general.me_pool_min_fresh_ratio;
cfg.general.me_reinit_drain_timeout_secs = new.general.me_reinit_drain_timeout_secs;
cfg.general.me_hardswap_warmup_delay_min_ms = new.general.me_hardswap_warmup_delay_min_ms;
@@ -536,10 +488,14 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
new.general.me_adaptive_floor_writers_per_core_total;
cfg.general.me_adaptive_floor_cpu_cores_override =
new.general.me_adaptive_floor_cpu_cores_override;
- cfg.general.me_adaptive_floor_max_extra_writers_single_per_core =
- new.general.me_adaptive_floor_max_extra_writers_single_per_core;
- cfg.general.me_adaptive_floor_max_extra_writers_multi_per_core =
- new.general.me_adaptive_floor_max_extra_writers_multi_per_core;
+ cfg.general
+ .me_adaptive_floor_max_extra_writers_single_per_core = new
+ .general
+ .me_adaptive_floor_max_extra_writers_single_per_core;
+ cfg.general
+ .me_adaptive_floor_max_extra_writers_multi_per_core = new
+ .general
+ .me_adaptive_floor_max_extra_writers_multi_per_core;
cfg.general.me_adaptive_floor_max_active_writers_per_core =
new.general.me_adaptive_floor_max_active_writers_per_core;
cfg.general.me_adaptive_floor_max_warm_writers_per_core =
@@ -559,6 +515,9 @@ fn overlay_hot_fields(old: &ProxyConfig, new: &ProxyConfig) -> ProxyConfig {
cfg.general.me_d2c_flush_batch_max_bytes = new.general.me_d2c_flush_batch_max_bytes;
cfg.general.me_d2c_flush_batch_max_delay_us = new.general.me_d2c_flush_batch_max_delay_us;
cfg.general.me_d2c_ack_flush_immediate = new.general.me_d2c_ack_flush_immediate;
+ cfg.general.me_quota_soft_overshoot_bytes = new.general.me_quota_soft_overshoot_bytes;
+ cfg.general.me_d2c_frame_buf_shrink_threshold_bytes =
+ new.general.me_d2c_frame_buf_shrink_threshold_bytes;
cfg.general.direct_relay_copy_buf_c2s_bytes = new.general.direct_relay_copy_buf_c2s_bytes;
cfg.general.direct_relay_copy_buf_s2c_bytes = new.general.direct_relay_copy_buf_s2c_bytes;
cfg.general.me_health_interval_ms_unhealthy = new.general.me_health_interval_ms_unhealthy;
@@ -598,8 +557,7 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|| old.server.api.minimal_runtime_cache_ttl_ms
!= new.server.api.minimal_runtime_cache_ttl_ms
|| old.server.api.runtime_edge_enabled != new.server.api.runtime_edge_enabled
- || old.server.api.runtime_edge_cache_ttl_ms
- != new.server.api.runtime_edge_cache_ttl_ms
+ || old.server.api.runtime_edge_cache_ttl_ms != new.server.api.runtime_edge_cache_ttl_ms
|| old.server.api.runtime_edge_top_n != new.server.api.runtime_edge_top_n
|| old.server.api.runtime_edge_events_capacity
!= new.server.api.runtime_edge_events_capacity
@@ -615,8 +573,6 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|| old.server.listen_tcp != new.server.listen_tcp
|| old.server.listen_unix_sock != new.server.listen_unix_sock
|| old.server.listen_unix_sock_perm != new.server.listen_unix_sock_perm
- || old.server.max_connections != new.server.max_connections
- || old.server.accept_permit_timeout_ms != new.server.accept_permit_timeout_ms
{
warned = true;
warn!("config reload: server listener settings changed; restart required");
@@ -637,6 +593,19 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
|| old.censorship.tls_full_cert_ttl_secs != new.censorship.tls_full_cert_ttl_secs
|| old.censorship.alpn_enforce != new.censorship.alpn_enforce
|| old.censorship.mask_proxy_protocol != new.censorship.mask_proxy_protocol
+ || old.censorship.mask_shape_hardening != new.censorship.mask_shape_hardening
+ || old.censorship.mask_shape_bucket_floor_bytes
+ != new.censorship.mask_shape_bucket_floor_bytes
+ || old.censorship.mask_shape_bucket_cap_bytes != new.censorship.mask_shape_bucket_cap_bytes
+ || old.censorship.mask_shape_above_cap_blur != new.censorship.mask_shape_above_cap_blur
+ || old.censorship.mask_shape_above_cap_blur_max_bytes
+ != new.censorship.mask_shape_above_cap_blur_max_bytes
+ || old.censorship.mask_timing_normalization_enabled
+ != new.censorship.mask_timing_normalization_enabled
+ || old.censorship.mask_timing_normalization_floor_ms
+ != new.censorship.mask_timing_normalization_floor_ms
+ || old.censorship.mask_timing_normalization_ceiling_ms
+ != new.censorship.mask_timing_normalization_ceiling_ms
{
warned = true;
warn!("config reload: censorship settings changed; restart required");
@@ -677,9 +646,6 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
}
if old.general.me_route_no_writer_mode != new.general.me_route_no_writer_mode
|| old.general.me_route_no_writer_wait_ms != new.general.me_route_no_writer_wait_ms
- || old.general.me_route_hybrid_max_wait_ms != new.general.me_route_hybrid_max_wait_ms
- || old.general.me_route_blocking_send_timeout_ms
- != new.general.me_route_blocking_send_timeout_ms
|| old.general.me_route_inline_recovery_attempts
!= new.general.me_route_inline_recovery_attempts
|| old.general.me_route_inline_recovery_wait_ms
@@ -688,10 +654,6 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig, non_hot_changed: b
warned = true;
warn!("config reload: general.me_route_no_writer_* changed; restart required");
}
- if old.general.me_c2me_send_timeout_ms != new.general.me_c2me_send_timeout_ms {
- warned = true;
- warn!("config reload: general.me_c2me_send_timeout_ms changed; restart required");
- }
if old.general.unknown_dc_log_path != new.general.unknown_dc_log_path
|| old.general.unknown_dc_file_log_enabled != new.general.unknown_dc_file_log_enabled
{
@@ -886,25 +848,6 @@ fn log_changes(
old_hot.me_pool_drain_threshold, new_hot.me_pool_drain_threshold,
);
}
- if old_hot.me_pool_drain_soft_evict_enabled != new_hot.me_pool_drain_soft_evict_enabled
- || old_hot.me_pool_drain_soft_evict_grace_secs
- != new_hot.me_pool_drain_soft_evict_grace_secs
- || old_hot.me_pool_drain_soft_evict_per_writer
- != new_hot.me_pool_drain_soft_evict_per_writer
- || old_hot.me_pool_drain_soft_evict_budget_per_core
- != new_hot.me_pool_drain_soft_evict_budget_per_core
- || old_hot.me_pool_drain_soft_evict_cooldown_ms
- != new_hot.me_pool_drain_soft_evict_cooldown_ms
- {
- info!(
- "config reload: me_pool_drain_soft_evict: enabled={} grace={}s per_writer={} budget_per_core={} cooldown={}ms",
- new_hot.me_pool_drain_soft_evict_enabled,
- new_hot.me_pool_drain_soft_evict_grace_secs,
- new_hot.me_pool_drain_soft_evict_per_writer,
- new_hot.me_pool_drain_soft_evict_budget_per_core,
- new_hot.me_pool_drain_soft_evict_cooldown_ms
- );
- }
if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON {
info!(
@@ -938,8 +881,7 @@ fn log_changes(
{
info!(
"config reload: me_bind_stale: mode={:?} ttl={}s",
- new_hot.me_bind_stale_mode,
- new_hot.me_bind_stale_ttl_secs
+ new_hot.me_bind_stale_mode, new_hot.me_bind_stale_ttl_secs
);
}
if old_hot.me_secret_atomic_snapshot != new_hot.me_secret_atomic_snapshot
@@ -1019,8 +961,7 @@ fn log_changes(
if old_hot.me_socks_kdf_policy != new_hot.me_socks_kdf_policy {
info!(
"config reload: me_socks_kdf_policy: {:?} → {:?}",
- old_hot.me_socks_kdf_policy,
- new_hot.me_socks_kdf_policy,
+ old_hot.me_socks_kdf_policy, new_hot.me_socks_kdf_policy,
);
}
@@ -1074,8 +1015,7 @@ fn log_changes(
|| old_hot.me_route_backpressure_high_watermark_pct
!= new_hot.me_route_backpressure_high_watermark_pct
|| old_hot.me_reader_route_data_wait_ms != new_hot.me_reader_route_data_wait_ms
- || old_hot.me_health_interval_ms_unhealthy
- != new_hot.me_health_interval_ms_unhealthy
+ || old_hot.me_health_interval_ms_unhealthy != new_hot.me_health_interval_ms_unhealthy
|| old_hot.me_health_interval_ms_healthy != new_hot.me_health_interval_ms_healthy
|| old_hot.me_admission_poll_ms != new_hot.me_admission_poll_ms
|| old_hot.me_warn_rate_limit_ms != new_hot.me_warn_rate_limit_ms
@@ -1097,34 +1037,47 @@ fn log_changes(
|| old_hot.me_d2c_flush_batch_max_bytes != new_hot.me_d2c_flush_batch_max_bytes
|| old_hot.me_d2c_flush_batch_max_delay_us != new_hot.me_d2c_flush_batch_max_delay_us
|| old_hot.me_d2c_ack_flush_immediate != new_hot.me_d2c_ack_flush_immediate
+ || old_hot.me_quota_soft_overshoot_bytes != new_hot.me_quota_soft_overshoot_bytes
+ || old_hot.me_d2c_frame_buf_shrink_threshold_bytes
+ != new_hot.me_d2c_frame_buf_shrink_threshold_bytes
|| old_hot.direct_relay_copy_buf_c2s_bytes != new_hot.direct_relay_copy_buf_c2s_bytes
|| old_hot.direct_relay_copy_buf_s2c_bytes != new_hot.direct_relay_copy_buf_s2c_bytes
{
info!(
- "config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} direct_buf_c2s={} direct_buf_s2c={}",
+ "config reload: relay_tuning: me_d2c_frames={} me_d2c_bytes={} me_d2c_delay_us={} me_ack_flush_immediate={} me_quota_soft_overshoot_bytes={} me_d2c_frame_buf_shrink_threshold_bytes={} direct_buf_c2s={} direct_buf_s2c={}",
new_hot.me_d2c_flush_batch_max_frames,
new_hot.me_d2c_flush_batch_max_bytes,
new_hot.me_d2c_flush_batch_max_delay_us,
new_hot.me_d2c_ack_flush_immediate,
+ new_hot.me_quota_soft_overshoot_bytes,
+ new_hot.me_d2c_frame_buf_shrink_threshold_bytes,
new_hot.direct_relay_copy_buf_c2s_bytes,
new_hot.direct_relay_copy_buf_s2c_bytes,
);
}
if old_hot.users != new_hot.users {
- let mut added: Vec<&String> = new_hot.users.keys()
+ let mut added: Vec<&String> = new_hot
+ .users
+ .keys()
.filter(|u| !old_hot.users.contains_key(*u))
.collect();
added.sort();
- let mut removed: Vec<&String> = old_hot.users.keys()
+ let mut removed: Vec<&String> = old_hot
+ .users
+ .keys()
.filter(|u| !new_hot.users.contains_key(*u))
.collect();
removed.sort();
- let mut changed: Vec<&String> = new_hot.users.keys()
+ let mut changed: Vec<&String> = new_hot
+ .users
+ .keys()
.filter(|u| {
- old_hot.users.get(*u)
+ old_hot
+ .users
+ .get(*u)
.map(|s| s != &new_hot.users[*u])
.unwrap_or(false)
})
@@ -1134,10 +1087,18 @@ fn log_changes(
if !added.is_empty() {
info!(
"config reload: users added: [{}]",
- added.iter().map(|s| s.as_str()).collect::>().join(", ")
+ added
+ .iter()
+ .map(|s| s.as_str())
+ .collect::>()
+ .join(", ")
);
let host = resolve_link_host(new_cfg, detected_ip_v4, detected_ip_v6);
- let port = new_cfg.general.links.public_port.unwrap_or(new_cfg.server.port);
+ let port = new_cfg
+ .general
+ .links
+ .public_port
+ .unwrap_or(new_cfg.server.port);
for user in &added {
if let Some(secret) = new_hot.users.get(*user) {
print_user_links(user, secret, &host, port, new_cfg);
@@ -1147,13 +1108,21 @@ fn log_changes(
if !removed.is_empty() {
info!(
"config reload: users removed: [{}]",
- removed.iter().map(|s| s.as_str()).collect::>().join(", ")
+ removed
+ .iter()
+ .map(|s| s.as_str())
+ .collect::>()
+ .join(", ")
);
}
if !changed.is_empty() {
info!(
"config reload: users secret changed: [{}]",
- changed.iter().map(|s| s.as_str()).collect::>().join(", ")
+ changed
+ .iter()
+ .map(|s| s.as_str())
+ .collect::>()
+ .join(", ")
);
}
}
@@ -1184,8 +1153,7 @@ fn log_changes(
}
if old_hot.user_max_unique_ips_global_each != new_hot.user_max_unique_ips_global_each
|| old_hot.user_max_unique_ips_mode != new_hot.user_max_unique_ips_mode
- || old_hot.user_max_unique_ips_window_secs
- != new_hot.user_max_unique_ips_window_secs
+ || old_hot.user_max_unique_ips_window_secs != new_hot.user_max_unique_ips_window_secs
{
info!(
"config reload: user_max_unique_ips policy global_each={} mode={:?} window={}s",
@@ -1208,7 +1176,6 @@ fn reload_config(
let loaded = match ProxyConfig::load_with_metadata(config_path) {
Ok(loaded) => loaded,
Err(e) => {
- reload_state.reset_candidate();
error!("config reload: failed to parse {:?}: {}", config_path, e);
return None;
}
@@ -1221,8 +1188,10 @@ fn reload_config(
let next_manifest = WatchManifest::from_source_files(&source_files);
if let Err(e) = new_cfg.validate() {
- reload_state.reset_candidate();
- error!("config reload: validation failed: {}; keeping old config", e);
+ error!(
+ "config reload: validation failed: {}; keeping old config",
+ e
+ );
return Some(next_manifest);
}
@@ -1230,17 +1199,6 @@ fn reload_config(
return Some(next_manifest);
}
- let candidate_hits = reload_state.observe_candidate(rendered_hash);
- if candidate_hits < HOT_RELOAD_STABLE_SNAPSHOTS {
- info!(
- snapshot_hash = rendered_hash,
- candidate_hits,
- required_hits = HOT_RELOAD_STABLE_SNAPSHOTS,
- "config reload: candidate snapshot observed but not stable yet"
- );
- return Some(next_manifest);
- }
-
let old_cfg = config_tx.borrow().clone();
let applied_cfg = overlay_hot_fields(&old_cfg, &new_cfg);
let old_hot = HotFields::from_config(&old_cfg);
@@ -1260,7 +1218,6 @@ fn reload_config(
if old_hot.dns_overrides != applied_hot.dns_overrides
&& let Err(e) = crate::network::dns_overrides::install_entries(&applied_hot.dns_overrides)
{
- reload_state.reset_candidate();
error!(
"config reload: invalid network.dns_overrides: {}; keeping old config",
e
@@ -1281,73 +1238,6 @@ fn reload_config(
Some(next_manifest)
}
-async fn reload_with_internal_stable_rechecks(
- config_path: &PathBuf,
- config_tx: &watch::Sender>,
- log_tx: &watch::Sender,
- detected_ip_v4: Option,
- detected_ip_v6: Option,
- reload_state: &mut ReloadState,
-) -> Option {
- let mut next_manifest = reload_config(
- config_path,
- config_tx,
- log_tx,
- detected_ip_v4,
- detected_ip_v6,
- reload_state,
- );
- let mut rechecks_left = HOT_RELOAD_STABLE_SNAPSHOTS.saturating_sub(1);
-
- while rechecks_left > 0 {
- let Some((snapshot_hash, candidate_hits)) = reload_state.pending_candidate() else {
- break;
- };
-
- info!(
- snapshot_hash,
- candidate_hits,
- required_hits = HOT_RELOAD_STABLE_SNAPSHOTS,
- rechecks_left,
- recheck_delay_ms = HOT_RELOAD_STABLE_RECHECK.as_millis(),
- "config reload: scheduling internal stable recheck"
- );
- tokio::time::sleep(HOT_RELOAD_STABLE_RECHECK).await;
-
- let recheck_manifest = reload_config(
- config_path,
- config_tx,
- log_tx,
- detected_ip_v4,
- detected_ip_v6,
- reload_state,
- );
- if recheck_manifest.is_some() {
- next_manifest = recheck_manifest;
- }
-
- if reload_state.is_applied(snapshot_hash) {
- info!(
- snapshot_hash,
- "config reload: applied after internal stable recheck"
- );
- break;
- }
-
- if reload_state.pending_candidate().is_none() {
- info!(
- snapshot_hash,
- "config reload: internal stable recheck aborted"
- );
- break;
- }
-
- rechecks_left = rechecks_left.saturating_sub(1);
- }
-
- next_manifest
-}
-
// ── Public API ────────────────────────────────────────────────────────────────
/// Spawn the hot-reload watcher task.
@@ -1366,7 +1256,7 @@ pub fn spawn_config_watcher(
) -> (watch::Receiver>, watch::Receiver) {
let initial_level = initial.general.log_level.clone();
let (config_tx, config_rx) = watch::channel(initial);
- let (log_tx, log_rx) = watch::channel(initial_level);
+ let (log_tx, log_rx) = watch::channel(initial_level);
let config_path = normalize_watch_path(&config_path);
let initial_loaded = ProxyConfig::load_with_metadata(&config_path).ok();
@@ -1383,25 +1273,29 @@ pub fn spawn_config_watcher(
let tx_inotify = notify_tx.clone();
let manifest_for_inotify = manifest_state.clone();
- let mut inotify_watcher = match recommended_watcher(move |res: notify::Result| {
- let Ok(event) = res else { return };
- if !matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
- return;
- }
- let is_our_file = manifest_for_inotify
- .read()
- .map(|manifest| manifest.matches_event_paths(&event.paths))
- .unwrap_or(false);
- if is_our_file {
- let _ = tx_inotify.try_send(());
- }
- }) {
- Ok(watcher) => Some(watcher),
- Err(e) => {
- warn!("config watcher: inotify unavailable: {}", e);
- None
- }
- };
+ let mut inotify_watcher =
+ match recommended_watcher(move |res: notify::Result| {
+ let Ok(event) = res else { return };
+ if !matches!(
+ event.kind,
+ EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
+ ) {
+ return;
+ }
+ let is_our_file = manifest_for_inotify
+ .read()
+ .map(|manifest| manifest.matches_event_paths(&event.paths))
+ .unwrap_or(false);
+ if is_our_file {
+ let _ = tx_inotify.try_send(());
+ }
+ }) {
+ Ok(watcher) => Some(watcher),
+ Err(e) => {
+ warn!("config watcher: inotify unavailable: {}", e);
+ None
+ }
+ };
apply_watch_manifest(
inotify_watcher.as_mut(),
Option::<&mut notify::poll::PollWatcher>::None,
@@ -1417,7 +1311,10 @@ pub fn spawn_config_watcher(
let mut poll_watcher = match notify::poll::PollWatcher::new(
move |res: notify::Result| {
let Ok(event) = res else { return };
- if !matches!(event.kind, EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)) {
+ if !matches!(
+ event.kind,
+ EventKind::Modify(_) | EventKind::Create(_) | EventKind::Remove(_)
+ ) {
return;
}
let is_our_file = manifest_for_poll
@@ -1465,22 +1362,36 @@ pub fn spawn_config_watcher(
}
}
#[cfg(not(unix))]
- if notify_rx.recv().await.is_none() { break; }
+ if notify_rx.recv().await.is_none() {
+ break;
+ }
// Debounce: drain extra events that arrive within a short quiet window.
tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
while notify_rx.try_recv().is_ok() {}
- if let Some(next_manifest) = reload_with_internal_stable_rechecks(
+ let mut next_manifest = reload_config(
&config_path,
&config_tx,
&log_tx,
detected_ip_v4,
detected_ip_v6,
&mut reload_state,
- )
- .await
- {
+ );
+ if next_manifest.is_none() {
+ tokio::time::sleep(HOT_RELOAD_DEBOUNCE).await;
+ while notify_rx.try_recv().is_ok() {}
+ next_manifest = reload_config(
+ &config_path,
+ &config_tx,
+ &log_tx,
+ detected_ip_v4,
+ detected_ip_v6,
+ &mut reload_state,
+ );
+ }
+
+ if let Some(next_manifest) = next_manifest {
apply_watch_manifest(
inotify_watcher.as_mut(),
poll_watcher.as_mut(),
@@ -1555,7 +1466,10 @@ mod tests {
new.server.port = old.server.port.saturating_add(1);
let applied = overlay_hot_fields(&old, &new);
- assert_eq!(HotFields::from_config(&old), HotFields::from_config(&applied));
+ assert_eq!(
+ HotFields::from_config(&old),
+ HotFields::from_config(&applied)
+ );
assert_eq!(applied.server.port, old.server.port);
}
@@ -1574,7 +1488,10 @@ mod tests {
applied.general.me_bind_stale_mode,
new.general.me_bind_stale_mode
);
- assert_ne!(HotFields::from_config(&old), HotFields::from_config(&applied));
+ assert_ne!(
+ HotFields::from_config(&old),
+ HotFields::from_config(&applied)
+ );
}
#[test]
@@ -1588,7 +1505,10 @@ mod tests {
applied.general.me_keepalive_interval_secs,
old.general.me_keepalive_interval_secs
);
- assert_eq!(HotFields::from_config(&old), HotFields::from_config(&applied));
+ assert_eq!(
+ HotFields::from_config(&old),
+ HotFields::from_config(&applied)
+ );
}
#[test]
@@ -1600,69 +1520,35 @@ mod tests {
let applied = overlay_hot_fields(&old, &new);
assert_eq!(applied.general.hardswap, new.general.hardswap);
- assert_eq!(applied.general.use_middle_proxy, old.general.use_middle_proxy);
+ assert_eq!(
+ applied.general.use_middle_proxy,
+ old.general.use_middle_proxy
+ );
assert!(!config_equal(&applied, &new));
}
#[test]
- fn reload_requires_stable_snapshot_before_hot_apply() {
+ fn reload_applies_hot_change_on_first_observed_snapshot() {
let initial_tag = "11111111111111111111111111111111";
let final_tag = "22222222222222222222222222222222";
let path = temp_config_path("telemt_hot_reload_stable");
write_reload_config(&path, Some(initial_tag), None);
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
- let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
- let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
- let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
- let mut reload_state = ReloadState::new(Some(initial_hash));
-
- write_reload_config(&path, None, None);
- reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
- assert_eq!(
- config_tx.borrow().general.ad_tag.as_deref(),
- Some(initial_tag)
- );
-
- write_reload_config(&path, Some(final_tag), None);
- reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
- assert_eq!(
- config_tx.borrow().general.ad_tag.as_deref(),
- Some(initial_tag)
- );
-
- reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
- assert_eq!(config_tx.borrow().general.ad_tag.as_deref(), Some(final_tag));
-
- let _ = std::fs::remove_file(path);
- }
-
- #[tokio::test]
- async fn reload_cycle_applies_after_single_external_event() {
- let initial_tag = "10101010101010101010101010101010";
- let final_tag = "20202020202020202020202020202020";
- let path = temp_config_path("telemt_hot_reload_single_event");
-
- write_reload_config(&path, Some(initial_tag), None);
- let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
- let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
+ let initial_hash = ProxyConfig::load_with_metadata(&path)
+ .unwrap()
+ .rendered_hash;
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
let mut reload_state = ReloadState::new(Some(initial_hash));
write_reload_config(&path, Some(final_tag), None);
- reload_with_internal_stable_rechecks(
- &path,
- &config_tx,
- &log_tx,
- None,
- None,
- &mut reload_state,
- )
- .await
- .unwrap();
+ reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
+ assert_eq!(
+ config_tx.borrow().general.ad_tag.as_deref(),
+ Some(final_tag)
+ );
- assert_eq!(config_tx.borrow().general.ad_tag.as_deref(), Some(final_tag));
let _ = std::fs::remove_file(path);
}
@@ -1674,14 +1560,15 @@ mod tests {
write_reload_config(&path, Some(initial_tag), None);
let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
- let initial_hash = ProxyConfig::load_with_metadata(&path).unwrap().rendered_hash;
+ let initial_hash = ProxyConfig::load_with_metadata(&path)
+ .unwrap()
+ .rendered_hash;
let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
let mut reload_state = ReloadState::new(Some(initial_hash));
write_reload_config(&path, Some(final_tag), Some(initial_cfg.server.port + 1));
reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
- reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
let applied = config_tx.borrow().clone();
assert_eq!(applied.general.ad_tag.as_deref(), Some(final_tag));
@@ -1689,4 +1576,36 @@ mod tests {
let _ = std::fs::remove_file(path);
}
+
+ #[test]
+ fn reload_recovers_after_parse_error_on_next_attempt() {
+ let initial_tag = "cccccccccccccccccccccccccccccccc";
+ let final_tag = "dddddddddddddddddddddddddddddddd";
+ let path = temp_config_path("telemt_hot_reload_parse_recovery");
+
+ write_reload_config(&path, Some(initial_tag), None);
+ let initial_cfg = Arc::new(ProxyConfig::load(&path).unwrap());
+ let initial_hash = ProxyConfig::load_with_metadata(&path)
+ .unwrap()
+ .rendered_hash;
+ let (config_tx, _config_rx) = watch::channel(initial_cfg.clone());
+ let (log_tx, _log_rx) = watch::channel(initial_cfg.general.log_level.clone());
+ let mut reload_state = ReloadState::new(Some(initial_hash));
+
+ std::fs::write(&path, "[access.users\nuser = \"broken\"\n").unwrap();
+ assert!(reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).is_none());
+ assert_eq!(
+ config_tx.borrow().general.ad_tag.as_deref(),
+ Some(initial_tag)
+ );
+
+ write_reload_config(&path, Some(final_tag), None);
+ reload_config(&path, &config_tx, &log_tx, None, None, &mut reload_state).unwrap();
+ assert_eq!(
+ config_tx.borrow().general.ad_tag.as_deref(),
+ Some(final_tag)
+ );
+
+ let _ = std::fs::remove_file(path);
+ }
}
diff --git a/src/config/load.rs b/src/config/load.rs
index c797637..bf6d036 100644
--- a/src/config/load.rs
+++ b/src/config/load.rs
@@ -5,7 +5,7 @@ use std::hash::{DefaultHasher, Hash, Hasher};
use std::net::{IpAddr, SocketAddr};
use std::path::{Path, PathBuf};
-use rand::Rng;
+use rand::RngExt;
use serde::{Deserialize, Serialize};
use shadowsocks::config::ServerConfig as ShadowsocksServerConfig;
use tracing::warn;
@@ -360,6 +360,131 @@ impl ProxyConfig {
));
}
+ if config.timeouts.client_handshake == 0 {
+ return Err(ProxyError::Config(
+ "timeouts.client_handshake must be > 0".to_string(),
+ ));
+ }
+
+ let handshake_timeout_ms = config
+ .timeouts
+ .client_handshake
+ .checked_mul(1000)
+ .ok_or_else(|| {
+ ProxyError::Config(
+ "timeouts.client_handshake is too large to validate milliseconds budget"
+ .to_string(),
+ )
+ })?;
+
+ if config.censorship.server_hello_delay_max_ms >= handshake_timeout_ms {
+ return Err(ProxyError::Config(
+ "censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_bucket_floor_bytes == 0 {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_bucket_floor_bytes must be > 0".to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_bucket_cap_bytes
+ < config.censorship.mask_shape_bucket_floor_bytes
+ {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_above_cap_blur && !config.censorship.mask_shape_hardening {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_hardening_aggressive_mode
+ && !config.censorship.mask_shape_hardening
+ {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_above_cap_blur
+ && config.censorship.mask_shape_above_cap_blur_max_bytes == 0
+ {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_shape_above_cap_blur_max_bytes > 1_048_576 {
+ return Err(ProxyError::Config(
+ "censorship.mask_shape_above_cap_blur_max_bytes must be <= 1048576".to_string(),
+ ));
+ }
+
+ if config.censorship.mask_timing_normalization_ceiling_ms
+ < config.censorship.mask_timing_normalization_floor_ms
+ {
+ return Err(ProxyError::Config(
+ "censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_timing_normalization_enabled
+ && config.censorship.mask_timing_normalization_floor_ms == 0
+ {
+ return Err(ProxyError::Config(
+ "censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"
+ .to_string(),
+ ));
+ }
+
+ if config.censorship.mask_timing_normalization_ceiling_ms > 60_000 {
+ return Err(ProxyError::Config(
+ "censorship.mask_timing_normalization_ceiling_ms must be <= 60000".to_string(),
+ ));
+ }
+
+ if config.timeouts.relay_client_idle_soft_secs == 0 {
+ return Err(ProxyError::Config(
+ "timeouts.relay_client_idle_soft_secs must be > 0".to_string(),
+ ));
+ }
+
+ if config.timeouts.relay_client_idle_hard_secs == 0 {
+ return Err(ProxyError::Config(
+ "timeouts.relay_client_idle_hard_secs must be > 0".to_string(),
+ ));
+ }
+
+ if config.timeouts.relay_client_idle_hard_secs < config.timeouts.relay_client_idle_soft_secs
+ {
+ return Err(ProxyError::Config(
+ "timeouts.relay_client_idle_hard_secs must be >= timeouts.relay_client_idle_soft_secs"
+ .to_string(),
+ ));
+ }
+
+ if config
+ .timeouts
+ .relay_idle_grace_after_downstream_activity_secs
+ > config.timeouts.relay_client_idle_hard_secs
+ {
+ return Err(ProxyError::Config(
+ "timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"
+ .to_string(),
+ ));
+ }
+
if config.general.me_writer_cmd_channel_capacity == 0 {
return Err(ProxyError::Config(
"general.me_writer_cmd_channel_capacity must be > 0".to_string(),
@@ -408,6 +533,19 @@ impl ProxyConfig {
));
}
+ if config.general.me_quota_soft_overshoot_bytes > 16 * 1024 * 1024 {
+ return Err(ProxyError::Config(
+ "general.me_quota_soft_overshoot_bytes must be within [0, 16777216]".to_string(),
+ ));
+ }
+
+ if !(4096..=16 * 1024 * 1024).contains(&config.general.me_d2c_frame_buf_shrink_threshold_bytes) {
+ return Err(ProxyError::Config(
+ "general.me_d2c_frame_buf_shrink_threshold_bytes must be within [4096, 16777216]"
+ .to_string(),
+ ));
+ }
+
if !(4096..=1024 * 1024).contains(&config.general.direct_relay_copy_buf_c2s_bytes) {
return Err(ProxyError::Config(
"general.direct_relay_copy_buf_c2s_bytes must be within [4096, 1048576]"
@@ -648,7 +786,8 @@ impl ProxyConfig {
}
if config.general.me_route_backpressure_base_timeout_ms > 5000 {
return Err(ProxyError::Config(
- "general.me_route_backpressure_base_timeout_ms must be within [1, 5000]".to_string(),
+ "general.me_route_backpressure_base_timeout_ms must be within [1, 5000]"
+ .to_string(),
));
}
@@ -661,7 +800,8 @@ impl ProxyConfig {
}
if config.general.me_route_backpressure_high_timeout_ms > 5000 {
return Err(ProxyError::Config(
- "general.me_route_backpressure_high_timeout_ms must be within [1, 5000]".to_string(),
+ "general.me_route_backpressure_high_timeout_ms must be within [1, 5000]"
+ .to_string(),
));
}
@@ -860,7 +1000,7 @@ impl ProxyConfig {
if !config.censorship.tls_emulation
&& config.censorship.fake_cert_len == default_fake_cert_len()
{
- config.censorship.fake_cert_len = rand::rng().gen_range(1024..4096);
+ config.censorship.fake_cert_len = rand::rng().random_range(1024..4096);
}
// Resolve listen_tcp: explicit value wins, otherwise auto-detect.
@@ -982,6 +1122,18 @@ impl ProxyConfig {
}
}
+#[cfg(test)]
+#[path = "tests/load_idle_policy_tests.rs"]
+mod load_idle_policy_tests;
+
+#[cfg(test)]
+#[path = "tests/load_security_tests.rs"]
+mod load_security_tests;
+
+#[cfg(test)]
+#[path = "tests/load_mask_shape_security_tests.rs"]
+mod load_mask_shape_security_tests;
+
#[cfg(test)]
mod tests {
use super::*;
@@ -1697,7 +1849,9 @@ mod tests {
let path = dir.join("telemt_me_route_backpressure_base_timeout_ms_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
- assert!(err.contains("general.me_route_backpressure_base_timeout_ms must be within [1, 5000]"));
+ assert!(
+ err.contains("general.me_route_backpressure_base_timeout_ms must be within [1, 5000]")
+ );
let _ = std::fs::remove_file(path);
}
@@ -1718,7 +1872,9 @@ mod tests {
let path = dir.join("telemt_me_route_backpressure_high_timeout_ms_out_of_range_test.toml");
std::fs::write(&path, toml).unwrap();
let err = ProxyConfig::load(&path).unwrap_err().to_string();
- assert!(err.contains("general.me_route_backpressure_high_timeout_ms must be within [1, 5000]"));
+ assert!(
+ err.contains("general.me_route_backpressure_high_timeout_ms must be within [1, 5000]")
+ );
let _ = std::fs::remove_file(path);
}
diff --git a/src/config/mod.rs b/src/config/mod.rs
index c7187ad..dcb3bec 100644
--- a/src/config/mod.rs
+++ b/src/config/mod.rs
@@ -1,9 +1,9 @@
//! Configuration.
pub(crate) mod defaults;
-mod types;
-mod load;
pub mod hot_reload;
+mod load;
+mod types;
pub use load::ProxyConfig;
pub use types::*;
diff --git a/src/config/tests/load_idle_policy_tests.rs b/src/config/tests/load_idle_policy_tests.rs
new file mode 100644
index 0000000..c6a4e86
--- /dev/null
+++ b/src/config/tests/load_idle_policy_tests.rs
@@ -0,0 +1,80 @@
+use super::*;
+use std::fs;
+use std::path::PathBuf;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+fn write_temp_config(contents: &str) -> PathBuf {
+ let nonce = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("system time must be after unix epoch")
+ .as_nanos();
+ let path = std::env::temp_dir().join(format!("telemt-idle-policy-{nonce}.toml"));
+ fs::write(&path, contents).expect("temp config write must succeed");
+ path
+}
+
+fn remove_temp_config(path: &PathBuf) {
+ let _ = fs::remove_file(path);
+}
+
+#[test]
+fn load_rejects_relay_hard_idle_smaller_than_soft_idle_with_clear_error() {
+ let path = write_temp_config(
+ r#"
+[timeouts]
+relay_client_idle_soft_secs = 120
+relay_client_idle_hard_secs = 60
+"#,
+ );
+
+ let err = ProxyConfig::load(&path).expect_err("config with hard= timeouts.relay_client_idle_soft_secs"
+ ),
+ "error must explain the violated hard>=soft invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_relay_grace_larger_than_hard_idle_with_clear_error() {
+ let path = write_temp_config(
+ r#"
+[timeouts]
+relay_client_idle_soft_secs = 60
+relay_client_idle_hard_secs = 120
+relay_idle_grace_after_downstream_activity_secs = 121
+"#,
+ );
+
+ let err = ProxyConfig::load(&path).expect_err("config with grace>hard must fail");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("timeouts.relay_idle_grace_after_downstream_activity_secs must be <= timeouts.relay_client_idle_hard_secs"),
+ "error must explain the violated grace<=hard invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_zero_handshake_timeout_with_clear_error() {
+ let path = write_temp_config(
+ r#"
+[timeouts]
+client_handshake = 0
+"#,
+ );
+
+ let err = ProxyConfig::load(&path).expect_err("config with zero handshake timeout must fail");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("timeouts.client_handshake must be > 0"),
+ "error must explain that handshake timeout must be positive, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
diff --git a/src/config/tests/load_mask_shape_security_tests.rs b/src/config/tests/load_mask_shape_security_tests.rs
new file mode 100644
index 0000000..8986a49
--- /dev/null
+++ b/src/config/tests/load_mask_shape_security_tests.rs
@@ -0,0 +1,238 @@
+use super::*;
+use std::fs;
+use std::path::PathBuf;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+fn write_temp_config(contents: &str) -> PathBuf {
+ let nonce = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("system time must be after unix epoch")
+ .as_nanos();
+ let path = std::env::temp_dir().join(format!("telemt-load-mask-shape-security-{nonce}.toml"));
+ fs::write(&path, contents).expect("temp config write must succeed");
+ path
+}
+
+fn remove_temp_config(path: &PathBuf) {
+ let _ = fs::remove_file(path);
+}
+
+#[test]
+fn load_rejects_zero_mask_shape_bucket_floor_bytes() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_bucket_floor_bytes = 0
+mask_shape_bucket_cap_bytes = 4096
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("zero mask_shape_bucket_floor_bytes must be rejected");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("censorship.mask_shape_bucket_floor_bytes must be > 0"),
+ "error must explain floor>0 invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_mask_shape_bucket_cap_less_than_floor() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_bucket_floor_bytes = 1024
+mask_shape_bucket_cap_bytes = 512
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("mask_shape_bucket_cap_bytes < floor must be rejected");
+ let msg = err.to_string();
+ assert!(
+ msg.contains(
+ "censorship.mask_shape_bucket_cap_bytes must be >= censorship.mask_shape_bucket_floor_bytes"
+ ),
+ "error must explain cap>=floor invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_accepts_mask_shape_bucket_cap_equal_to_floor() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = true
+mask_shape_bucket_floor_bytes = 1024
+mask_shape_bucket_cap_bytes = 1024
+"#,
+ );
+
+ let cfg = ProxyConfig::load(&path).expect("equal cap and floor must be accepted");
+ assert!(cfg.censorship.mask_shape_hardening);
+ assert_eq!(cfg.censorship.mask_shape_bucket_floor_bytes, 1024);
+ assert_eq!(cfg.censorship.mask_shape_bucket_cap_bytes, 1024);
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_above_cap_blur_when_shape_hardening_disabled() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = false
+mask_shape_above_cap_blur = true
+mask_shape_above_cap_blur_max_bytes = 64
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("above-cap blur must require shape hardening enabled");
+ let msg = err.to_string();
+ assert!(
+ msg.contains(
+ "censorship.mask_shape_above_cap_blur requires censorship.mask_shape_hardening = true"
+ ),
+ "error must explain blur prerequisite, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_above_cap_blur_with_zero_max_bytes() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = true
+mask_shape_above_cap_blur = true
+mask_shape_above_cap_blur_max_bytes = 0
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("above-cap blur max bytes must be > 0 when enabled");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("censorship.mask_shape_above_cap_blur_max_bytes must be > 0 when censorship.mask_shape_above_cap_blur is enabled"),
+ "error must explain blur max bytes invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_timing_normalization_floor_zero_when_enabled() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_timing_normalization_enabled = true
+mask_timing_normalization_floor_ms = 0
+mask_timing_normalization_ceiling_ms = 200
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("timing normalization floor must be > 0 when enabled");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("censorship.mask_timing_normalization_floor_ms must be > 0 when censorship.mask_timing_normalization_enabled is true"),
+ "error must explain timing floor invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_timing_normalization_ceiling_below_floor() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_timing_normalization_enabled = true
+mask_timing_normalization_floor_ms = 220
+mask_timing_normalization_ceiling_ms = 200
+"#,
+ );
+
+ let err = ProxyConfig::load(&path).expect_err("timing normalization ceiling must be >= floor");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("censorship.mask_timing_normalization_ceiling_ms must be >= censorship.mask_timing_normalization_floor_ms"),
+ "error must explain timing ceiling/floor invariant, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_accepts_valid_timing_normalization_and_above_cap_blur_config() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = true
+mask_shape_above_cap_blur = true
+mask_shape_above_cap_blur_max_bytes = 128
+mask_timing_normalization_enabled = true
+mask_timing_normalization_floor_ms = 150
+mask_timing_normalization_ceiling_ms = 240
+"#,
+ );
+
+ let cfg = ProxyConfig::load(&path)
+ .expect("valid blur and timing normalization settings must be accepted");
+ assert!(cfg.censorship.mask_shape_hardening);
+ assert!(cfg.censorship.mask_shape_above_cap_blur);
+ assert_eq!(cfg.censorship.mask_shape_above_cap_blur_max_bytes, 128);
+ assert!(cfg.censorship.mask_timing_normalization_enabled);
+ assert_eq!(cfg.censorship.mask_timing_normalization_floor_ms, 150);
+ assert_eq!(cfg.censorship.mask_timing_normalization_ceiling_ms, 240);
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_rejects_aggressive_shape_mode_when_shape_hardening_disabled() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = false
+mask_shape_hardening_aggressive_mode = true
+"#,
+ );
+
+ let err = ProxyConfig::load(&path)
+ .expect_err("aggressive shape hardening mode must require shape hardening enabled");
+ let msg = err.to_string();
+ assert!(
+ msg.contains("censorship.mask_shape_hardening_aggressive_mode requires censorship.mask_shape_hardening = true"),
+ "error must explain aggressive-mode prerequisite, got: {msg}"
+ );
+
+ remove_temp_config(&path);
+}
+
+#[test]
+fn load_accepts_aggressive_shape_mode_when_shape_hardening_enabled() {
+ let path = write_temp_config(
+ r#"
+[censorship]
+mask_shape_hardening = true
+mask_shape_hardening_aggressive_mode = true
+mask_shape_above_cap_blur = true
+mask_shape_above_cap_blur_max_bytes = 8
+"#,
+ );
+
+ let cfg = ProxyConfig::load(&path)
+ .expect("aggressive shape hardening mode should be accepted when prerequisites are met");
+ assert!(cfg.censorship.mask_shape_hardening);
+ assert!(cfg.censorship.mask_shape_hardening_aggressive_mode);
+ assert!(cfg.censorship.mask_shape_above_cap_blur);
+
+ remove_temp_config(&path);
+}
diff --git a/src/config/tests/load_security_tests.rs b/src/config/tests/load_security_tests.rs
new file mode 100644
index 0000000..654a9c0
--- /dev/null
+++ b/src/config/tests/load_security_tests.rs
@@ -0,0 +1,88 @@
+use super::*;
+use std::fs;
+use std::path::PathBuf;
+use std::time::{SystemTime, UNIX_EPOCH};
+
+fn write_temp_config(contents: &str) -> PathBuf {
+ let nonce = SystemTime::now()
+ .duration_since(UNIX_EPOCH)
+ .expect("system time must be after unix epoch")
+ .as_nanos();
+ let path = std::env::temp_dir().join(format!("telemt-load-security-{nonce}.toml"));
+ fs::write(&path, contents).expect("temp config write must succeed");
+ path
+}
+
+fn remove_temp_config(path: &PathBuf) {
+ let _ = fs::remove_file(path);
+}
+
+#[test]
+fn load_rejects_server_hello_delay_equal_to_handshake_timeout_budget() {
+ let path = write_temp_config(
+ r#"
+[timeouts]
+client_handshake = 1
+
+[censorship]
+server_hello_delay_max_ms = 1000
+"#,
+ );
+
+ let err =
+ ProxyConfig::load(&path).expect_err("delay equal to handshake timeout must be rejected");
+ let msg = err.to_string();
+ assert!(
+ msg.contains(
+ "censorship.server_hello_delay_max_ms must be < timeouts.client_handshake * 1000"
+ ),
+ "error must explain delay0 enable bounded wait for compatibility.
#[serde(default = "default_me_reader_route_data_wait_ms")]
pub me_reader_route_data_wait_ms: u64,
@@ -489,6 +489,14 @@ pub struct GeneralConfig {
#[serde(default = "default_me_d2c_ack_flush_immediate")]
pub me_d2c_ack_flush_immediate: bool,
+ /// Additional bytes above strict per-user quota allowed in hot-path soft mode.
+ #[serde(default = "default_me_quota_soft_overshoot_bytes")]
+ pub me_quota_soft_overshoot_bytes: u64,
+
+ /// Shrink threshold for reusable ME->Client frame assembly buffer.
+ #[serde(default = "default_me_d2c_frame_buf_shrink_threshold_bytes")]
+ pub me_d2c_frame_buf_shrink_threshold_bytes: usize,
+
/// Copy buffer size for client->DC direction in direct relay.
#[serde(default = "default_direct_relay_copy_buf_c2s_bytes")]
pub direct_relay_copy_buf_c2s_bytes: usize,
@@ -945,6 +953,8 @@ impl Default for GeneralConfig {
me_d2c_flush_batch_max_bytes: default_me_d2c_flush_batch_max_bytes(),
me_d2c_flush_batch_max_delay_us: default_me_d2c_flush_batch_max_delay_us(),
me_d2c_ack_flush_immediate: default_me_d2c_ack_flush_immediate(),
+ me_quota_soft_overshoot_bytes: default_me_quota_soft_overshoot_bytes(),
+ me_d2c_frame_buf_shrink_threshold_bytes: default_me_d2c_frame_buf_shrink_threshold_bytes(),
direct_relay_copy_buf_c2s_bytes: default_direct_relay_copy_buf_c2s_bytes(),
direct_relay_copy_buf_s2c_bytes: default_direct_relay_copy_buf_s2c_bytes(),
me_warmup_stagger_enabled: default_true(),
@@ -1047,8 +1057,7 @@ impl Default for GeneralConfig {
me_pool_drain_soft_evict_per_writer: default_me_pool_drain_soft_evict_per_writer(),
me_pool_drain_soft_evict_budget_per_core:
default_me_pool_drain_soft_evict_budget_per_core(),
- me_pool_drain_soft_evict_cooldown_ms:
- default_me_pool_drain_soft_evict_cooldown_ms(),
+ me_pool_drain_soft_evict_cooldown_ms: default_me_pool_drain_soft_evict_cooldown_ms(),
me_bind_stale_mode: MeBindStaleMode::default(),
me_bind_stale_ttl_secs: default_me_bind_stale_ttl_secs(),
me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(),
@@ -1228,6 +1237,13 @@ pub struct ServerConfig {
#[serde(default = "default_proxy_protocol_header_timeout_ms")]
pub proxy_protocol_header_timeout_ms: u64,
+ /// Trusted source CIDRs allowed to send incoming PROXY protocol headers.
+ ///
+ /// When non-empty, connections from addresses outside this allowlist are
+ /// rejected before `src_addr` is applied.
+ #[serde(default)]
+ pub proxy_protocol_trusted_cidrs: Vec,
+
/// Port for the Prometheus-compatible metrics endpoint.
/// Enables metrics when set; binds on all interfaces (dual-stack) by default.
#[serde(default)]
@@ -1270,6 +1286,7 @@ impl Default for ServerConfig {
listen_tcp: None,
proxy_protocol: false,
proxy_protocol_header_timeout_ms: default_proxy_protocol_header_timeout_ms(),
+ proxy_protocol_trusted_cidrs: Vec::new(),
metrics_port: None,
metrics_listen: None,
metrics_whitelist: default_metrics_whitelist(),
@@ -1286,6 +1303,24 @@ pub struct TimeoutsConfig {
#[serde(default = "default_handshake_timeout")]
pub client_handshake: u64,
+ /// Enables soft/hard relay client idle policy for middle-relay sessions.
+ #[serde(default = "default_relay_idle_policy_v2_enabled")]
+ pub relay_idle_policy_v2_enabled: bool,
+
+ /// Soft idle threshold for middle-relay client uplink activity in seconds.
+ /// Hitting this threshold marks the session as idle-candidate, but does not close it.
+ #[serde(default = "default_relay_client_idle_soft_secs")]
+ pub relay_client_idle_soft_secs: u64,
+
+ /// Hard idle threshold for middle-relay client uplink activity in seconds.
+ /// Hitting this threshold closes the session.
+ #[serde(default = "default_relay_client_idle_hard_secs")]
+ pub relay_client_idle_hard_secs: u64,
+
+ /// Additional grace in seconds added to hard idle window after recent downstream activity.
+ #[serde(default = "default_relay_idle_grace_after_downstream_activity_secs")]
+ pub relay_idle_grace_after_downstream_activity_secs: u64,
+
#[serde(default = "default_connect_timeout")]
pub tg_connect: u64,
@@ -1308,6 +1343,11 @@ impl Default for TimeoutsConfig {
fn default() -> Self {
Self {
client_handshake: default_handshake_timeout(),
+ relay_idle_policy_v2_enabled: default_relay_idle_policy_v2_enabled(),
+ relay_client_idle_soft_secs: default_relay_client_idle_soft_secs(),
+ relay_client_idle_hard_secs: default_relay_client_idle_hard_secs(),
+ relay_idle_grace_after_downstream_activity_secs:
+ default_relay_idle_grace_after_downstream_activity_secs(),
tg_connect: default_connect_timeout(),
client_keepalive: default_keepalive(),
client_ack: default_ack_timeout(),
@@ -1381,6 +1421,46 @@ pub struct AntiCensorshipConfig {
/// Allows the backend to see the real client IP.
#[serde(default)]
pub mask_proxy_protocol: u8,
+
+ /// Enable shape-channel hardening on mask backend path by padding
+ /// client->mask stream tail to configured buckets on stream end.
+ #[serde(default = "default_mask_shape_hardening")]
+ pub mask_shape_hardening: bool,
+
+ /// Opt-in aggressive shape hardening mode.
+ /// When enabled, masking may shape some backend-silent timeout paths and
+ /// enforces strictly positive above-cap blur when blur is enabled.
+ #[serde(default = "default_mask_shape_hardening_aggressive_mode")]
+ pub mask_shape_hardening_aggressive_mode: bool,
+
+ /// Minimum bucket size for mask shape hardening padding.
+ #[serde(default = "default_mask_shape_bucket_floor_bytes")]
+ pub mask_shape_bucket_floor_bytes: usize,
+
+ /// Maximum bucket size for mask shape hardening padding.
+ #[serde(default = "default_mask_shape_bucket_cap_bytes")]
+ pub mask_shape_bucket_cap_bytes: usize,
+
+ /// Add bounded random tail bytes even when total bytes already exceed
+ /// mask_shape_bucket_cap_bytes.
+ #[serde(default = "default_mask_shape_above_cap_blur")]
+ pub mask_shape_above_cap_blur: bool,
+
+ /// Maximum random bytes appended above cap when above-cap blur is enabled.
+ #[serde(default = "default_mask_shape_above_cap_blur_max_bytes")]
+ pub mask_shape_above_cap_blur_max_bytes: usize,
+
+ /// Enable outcome-time normalization envelope for masking fallback.
+ #[serde(default = "default_mask_timing_normalization_enabled")]
+ pub mask_timing_normalization_enabled: bool,
+
+ /// Lower bound (ms) for masking outcome timing envelope.
+ #[serde(default = "default_mask_timing_normalization_floor_ms")]
+ pub mask_timing_normalization_floor_ms: u64,
+
+ /// Upper bound (ms) for masking outcome timing envelope.
+ #[serde(default = "default_mask_timing_normalization_ceiling_ms")]
+ pub mask_timing_normalization_ceiling_ms: u64,
}
impl Default for AntiCensorshipConfig {
@@ -1402,6 +1482,15 @@ impl Default for AntiCensorshipConfig {
tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(),
alpn_enforce: default_alpn_enforce(),
mask_proxy_protocol: 0,
+ mask_shape_hardening: default_mask_shape_hardening(),
+ mask_shape_hardening_aggressive_mode: default_mask_shape_hardening_aggressive_mode(),
+ mask_shape_bucket_floor_bytes: default_mask_shape_bucket_floor_bytes(),
+ mask_shape_bucket_cap_bytes: default_mask_shape_bucket_cap_bytes(),
+ mask_shape_above_cap_blur: default_mask_shape_above_cap_blur(),
+ mask_shape_above_cap_blur_max_bytes: default_mask_shape_above_cap_blur_max_bytes(),
+ mask_timing_normalization_enabled: default_mask_timing_normalization_enabled(),
+ mask_timing_normalization_floor_ms: default_mask_timing_normalization_floor_ms(),
+ mask_timing_normalization_ceiling_ms: default_mask_timing_normalization_ceiling_ms(),
}
}
}
diff --git a/src/crypto/aes.rs b/src/crypto/aes.rs
index deda730..0726298 100644
--- a/src/crypto/aes.rs
+++ b/src/crypto/aes.rs
@@ -13,10 +13,13 @@
#![allow(dead_code)]
-use aes::Aes256;
-use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}};
-use zeroize::Zeroize;
use crate::error::{ProxyError, Result};
+use aes::Aes256;
+use ctr::{
+ Ctr128BE,
+ cipher::{KeyIvInit, StreamCipher},
+};
+use zeroize::Zeroize;
type Aes256Ctr = Ctr128BE;
@@ -42,33 +45,39 @@ impl AesCtr {
cipher: Aes256Ctr::new(key.into(), (&iv_bytes).into()),
}
}
-
+
/// Create from key and IV slices
pub fn from_key_iv(key: &[u8], iv: &[u8]) -> Result {
if key.len() != 32 {
- return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
+ return Err(ProxyError::InvalidKeyLength {
+ expected: 32,
+ got: key.len(),
+ });
}
if iv.len() != 16 {
- return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
+ return Err(ProxyError::InvalidKeyLength {
+ expected: 16,
+ got: iv.len(),
+ });
}
-
+
let key: [u8; 32] = key.try_into().unwrap();
let iv = u128::from_be_bytes(iv.try_into().unwrap());
Ok(Self::new(&key, iv))
}
-
+
/// Encrypt/decrypt data in-place (CTR mode is symmetric)
pub fn apply(&mut self, data: &mut [u8]) {
self.cipher.apply_keystream(data);
}
-
+
/// Encrypt data, returning new buffer
pub fn encrypt(&mut self, data: &[u8]) -> Vec {
let mut output = data.to_vec();
self.apply(&mut output);
output
}
-
+
/// Decrypt data (for CTR, identical to encrypt)
pub fn decrypt(&mut self, data: &[u8]) -> Vec {
self.encrypt(data)
@@ -99,27 +108,33 @@ impl Drop for AesCbc {
impl AesCbc {
/// AES block size
const BLOCK_SIZE: usize = 16;
-
+
/// Create new AES-CBC cipher with key and IV
pub fn new(key: [u8; 32], iv: [u8; 16]) -> Self {
Self { key, iv }
}
-
+
/// Create from slices
pub fn from_slices(key: &[u8], iv: &[u8]) -> Result {
if key.len() != 32 {
- return Err(ProxyError::InvalidKeyLength { expected: 32, got: key.len() });
+ return Err(ProxyError::InvalidKeyLength {
+ expected: 32,
+ got: key.len(),
+ });
}
if iv.len() != 16 {
- return Err(ProxyError::InvalidKeyLength { expected: 16, got: iv.len() });
+ return Err(ProxyError::InvalidKeyLength {
+ expected: 16,
+ got: iv.len(),
+ });
}
-
+
Ok(Self {
key: key.try_into().unwrap(),
iv: iv.try_into().unwrap(),
})
}
-
+
/// Encrypt a single block using raw AES (no chaining)
fn encrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
use aes::cipher::BlockEncrypt;
@@ -127,7 +142,7 @@ impl AesCbc {
key_schedule.encrypt_block((&mut output).into());
output
}
-
+
/// Decrypt a single block using raw AES (no chaining)
fn decrypt_block(&self, block: &[u8; 16], key_schedule: &aes::Aes256) -> [u8; 16] {
use aes::cipher::BlockDecrypt;
@@ -135,7 +150,7 @@ impl AesCbc {
key_schedule.decrypt_block((&mut output).into());
output
}
-
+
/// XOR two 16-byte blocks
fn xor_blocks(a: &[u8; 16], b: &[u8; 16]) -> [u8; 16] {
let mut result = [0u8; 16];
@@ -144,27 +159,28 @@ impl AesCbc {
}
result
}
-
+
/// Encrypt data using CBC mode with proper chaining
///
/// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV
pub fn encrypt(&self, data: &[u8]) -> Result> {
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
- return Err(ProxyError::Crypto(
- format!("CBC data must be aligned to 16 bytes, got {}", data.len())
- ));
+ return Err(ProxyError::Crypto(format!(
+ "CBC data must be aligned to 16 bytes, got {}",
+ data.len()
+ )));
}
-
+
if data.is_empty() {
return Ok(Vec::new());
}
-
+
use aes::cipher::KeyInit;
let key_schedule = aes::Aes256::new((&self.key).into());
-
+
let mut result = Vec::with_capacity(data.len());
let mut prev_ciphertext = self.iv;
-
+
for chunk in data.chunks(Self::BLOCK_SIZE) {
let plaintext: [u8; 16] = chunk.try_into().unwrap();
let xored = Self::xor_blocks(&plaintext, &prev_ciphertext);
@@ -172,30 +188,31 @@ impl AesCbc {
prev_ciphertext = ciphertext;
result.extend_from_slice(&ciphertext);
}
-
+
Ok(result)
}
-
+
/// Decrypt data using CBC mode with proper chaining
///
/// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV
pub fn decrypt(&self, data: &[u8]) -> Result> {
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
- return Err(ProxyError::Crypto(
- format!("CBC data must be aligned to 16 bytes, got {}", data.len())
- ));
+ return Err(ProxyError::Crypto(format!(
+ "CBC data must be aligned to 16 bytes, got {}",
+ data.len()
+ )));
}
-
+
if data.is_empty() {
return Ok(Vec::new());
}
-
+
use aes::cipher::KeyInit;
let key_schedule = aes::Aes256::new((&self.key).into());
-
+
let mut result = Vec::with_capacity(data.len());
let mut prev_ciphertext = self.iv;
-
+
for chunk in data.chunks(Self::BLOCK_SIZE) {
let ciphertext: [u8; 16] = chunk.try_into().unwrap();
let decrypted = self.decrypt_block(&ciphertext, &key_schedule);
@@ -203,75 +220,77 @@ impl AesCbc {
prev_ciphertext = ciphertext;
result.extend_from_slice(&plaintext);
}
-
+
Ok(result)
}
-
+
/// Encrypt data in-place
pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
- return Err(ProxyError::Crypto(
- format!("CBC data must be aligned to 16 bytes, got {}", data.len())
- ));
+ return Err(ProxyError::Crypto(format!(
+ "CBC data must be aligned to 16 bytes, got {}",
+ data.len()
+ )));
}
-
+
if data.is_empty() {
return Ok(());
}
-
+
use aes::cipher::KeyInit;
let key_schedule = aes::Aes256::new((&self.key).into());
-
+
let mut prev_ciphertext = self.iv;
-
+
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
let block = &mut data[i..i + Self::BLOCK_SIZE];
-
+
for j in 0..Self::BLOCK_SIZE {
block[j] ^= prev_ciphertext[j];
}
-
+
let block_array: &mut [u8; 16] = block.try_into().unwrap();
*block_array = self.encrypt_block(block_array, &key_schedule);
-
+
prev_ciphertext = *block_array;
}
-
+
Ok(())
}
-
+
/// Decrypt data in-place
pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> {
if !data.len().is_multiple_of(Self::BLOCK_SIZE) {
- return Err(ProxyError::Crypto(
- format!("CBC data must be aligned to 16 bytes, got {}", data.len())
- ));
+ return Err(ProxyError::Crypto(format!(
+ "CBC data must be aligned to 16 bytes, got {}",
+ data.len()
+ )));
}
-
+
if data.is_empty() {
return Ok(());
}
-
+
use aes::cipher::KeyInit;
let key_schedule = aes::Aes256::new((&self.key).into());
-
+
let mut prev_ciphertext = self.iv;
-
+
for i in (0..data.len()).step_by(Self::BLOCK_SIZE) {
let block = &mut data[i..i + Self::BLOCK_SIZE];
-
+
let current_ciphertext: [u8; 16] = block.try_into().unwrap();
-
+
let block_array: &mut [u8; 16] = block.try_into().unwrap();
*block_array = self.decrypt_block(block_array, &key_schedule);
-
+
for j in 0..Self::BLOCK_SIZE {
block[j] ^= prev_ciphertext[j];
}
-
+
prev_ciphertext = current_ciphertext;
}
-
+
Ok(())
}
}
@@ -318,227 +337,227 @@ impl Decryptor for PassthroughEncryptor {
#[cfg(test)]
mod tests {
use super::*;
-
+
// ============= AES-CTR Tests =============
-
+
#[test]
fn test_aes_ctr_roundtrip() {
let key = [0u8; 32];
let iv = 12345u128;
-
+
let original = b"Hello, MTProto!";
-
+
let mut enc = AesCtr::new(&key, iv);
let encrypted = enc.encrypt(original);
-
+
let mut dec = AesCtr::new(&key, iv);
let decrypted = dec.decrypt(&encrypted);
-
+
assert_eq!(original.as_slice(), decrypted.as_slice());
}
-
+
#[test]
fn test_aes_ctr_in_place() {
let key = [0x42u8; 32];
let iv = 999u128;
-
+
let original = b"Test data for in-place encryption";
let mut data = original.to_vec();
-
+
let mut cipher = AesCtr::new(&key, iv);
cipher.apply(&mut data);
-
+
assert_ne!(&data[..], original);
-
+
let mut cipher = AesCtr::new(&key, iv);
cipher.apply(&mut data);
-
+
assert_eq!(&data[..], original);
}
-
+
// ============= AES-CBC Tests =============
-
+
#[test]
fn test_aes_cbc_roundtrip() {
let key = [0u8; 32];
let iv = [0u8; 16];
-
+
let original = [0u8; 32];
-
+
let cipher = AesCbc::new(key, iv);
let encrypted = cipher.encrypt(&original).unwrap();
let decrypted = cipher.decrypt(&encrypted).unwrap();
-
+
assert_eq!(original.as_slice(), decrypted.as_slice());
}
-
+
#[test]
fn test_aes_cbc_chaining_works() {
let key = [0x42u8; 32];
let iv = [0x00u8; 16];
-
+
let plaintext = [0xAAu8; 32];
-
+
let cipher = AesCbc::new(key, iv);
let ciphertext = cipher.encrypt(&plaintext).unwrap();
-
+
let block1 = &ciphertext[0..16];
let block2 = &ciphertext[16..32];
-
+
assert_ne!(
block1, block2,
"CBC chaining broken: identical plaintext blocks produced identical ciphertext"
);
}
-
+
#[test]
fn test_aes_cbc_known_vector() {
let key = [0u8; 32];
let iv = [0u8; 16];
let plaintext = [0u8; 16];
-
+
let cipher = AesCbc::new(key, iv);
let ciphertext = cipher.encrypt(&plaintext).unwrap();
-
+
let decrypted = cipher.decrypt(&ciphertext).unwrap();
assert_eq!(plaintext.as_slice(), decrypted.as_slice());
-
+
assert_ne!(ciphertext.as_slice(), plaintext.as_slice());
}
-
+
#[test]
fn test_aes_cbc_multi_block() {
let key = [0x12u8; 32];
let iv = [0x34u8; 16];
-
+
let plaintext: Vec = (0..80).collect();
-
+
let cipher = AesCbc::new(key, iv);
let ciphertext = cipher.encrypt(&plaintext).unwrap();
let decrypted = cipher.decrypt(&ciphertext).unwrap();
-
+
assert_eq!(plaintext, decrypted);
}
-
+
#[test]
fn test_aes_cbc_in_place() {
let key = [0x12u8; 32];
let iv = [0x34u8; 16];
-
+
let original = [0x56u8; 48];
let mut buffer = original;
-
+
let cipher = AesCbc::new(key, iv);
-
+
cipher.encrypt_in_place(&mut buffer).unwrap();
assert_ne!(&buffer[..], &original[..]);
-
+
cipher.decrypt_in_place(&mut buffer).unwrap();
assert_eq!(&buffer[..], &original[..]);
}
-
+
#[test]
fn test_aes_cbc_empty_data() {
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
-
+
let encrypted = cipher.encrypt(&[]).unwrap();
assert!(encrypted.is_empty());
-
+
let decrypted = cipher.decrypt(&[]).unwrap();
assert!(decrypted.is_empty());
}
-
+
#[test]
fn test_aes_cbc_unaligned_error() {
let cipher = AesCbc::new([0u8; 32], [0u8; 16]);
-
+
let result = cipher.encrypt(&[0u8; 15]);
assert!(result.is_err());
-
+
let result = cipher.encrypt(&[0u8; 17]);
assert!(result.is_err());
}
-
+
#[test]
fn test_aes_cbc_avalanche_effect() {
let key = [0xAB; 32];
let iv = [0xCD; 16];
-
+
let plaintext1 = [0u8; 32];
let mut plaintext2 = [0u8; 32];
plaintext2[0] = 0x01;
-
+
let cipher = AesCbc::new(key, iv);
-
+
let ciphertext1 = cipher.encrypt(&plaintext1).unwrap();
let ciphertext2 = cipher.encrypt(&plaintext2).unwrap();
-
+
assert_ne!(&ciphertext1[0..16], &ciphertext2[0..16]);
assert_ne!(&ciphertext1[16..32], &ciphertext2[16..32]);
}
-
+
#[test]
fn test_aes_cbc_iv_matters() {
let key = [0x55; 32];
let plaintext = [0x77u8; 16];
-
+
let cipher1 = AesCbc::new(key, [0u8; 16]);
let cipher2 = AesCbc::new(key, [1u8; 16]);
-
+
let ciphertext1 = cipher1.encrypt(&plaintext).unwrap();
let ciphertext2 = cipher2.encrypt(&plaintext).unwrap();
-
+
assert_ne!(ciphertext1, ciphertext2);
}
-
+
#[test]
fn test_aes_cbc_deterministic() {
let key = [0x99; 32];
let iv = [0x88; 16];
let plaintext = [0x77u8; 32];
-
+
let cipher = AesCbc::new(key, iv);
-
+
let ciphertext1 = cipher.encrypt(&plaintext).unwrap();
let ciphertext2 = cipher.encrypt(&plaintext).unwrap();
-
+
assert_eq!(ciphertext1, ciphertext2);
}
-
+
// ============= Zeroize Tests =============
-
+
#[test]
fn test_aes_cbc_zeroize_on_drop() {
let key = [0xAA; 32];
let iv = [0xBB; 16];
-
+
let cipher = AesCbc::new(key, iv);
// Verify key/iv are set
assert_eq!(cipher.key, [0xAA; 32]);
assert_eq!(cipher.iv, [0xBB; 16]);
-
+
drop(cipher);
// After drop, key/iv are zeroized (can't observe directly,
// but the Drop impl runs without panic)
}
-
+
// ============= Error Handling Tests =============
-
+
#[test]
fn test_invalid_key_length() {
let result = AesCtr::from_key_iv(&[0u8; 16], &[0u8; 16]);
assert!(result.is_err());
-
+
let result = AesCbc::from_slices(&[0u8; 16], &[0u8; 16]);
assert!(result.is_err());
}
-
+
#[test]
fn test_invalid_iv_length() {
let result = AesCtr::from_key_iv(&[0u8; 32], &[0u8; 8]);
assert!(result.is_err());
-
+
let result = AesCbc::from_slices(&[0u8; 32], &[0u8; 8]);
assert!(result.is_err());
}
-}
\ No newline at end of file
+}
diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs
index fa3e441..9e1fa16 100644
--- a/src/crypto/hash.rs
+++ b/src/crypto/hash.rs
@@ -12,10 +12,10 @@
//! usages are intentional and protocol-mandated.
use hmac::{Hmac, Mac};
-use sha2::Sha256;
use md5::Md5;
use sha1::Sha1;
use sha2::Digest;
+use sha2::Sha256;
type HmacSha256 = Hmac;
@@ -28,8 +28,7 @@ pub fn sha256(data: &[u8]) -> [u8; 32] {
/// SHA-256 HMAC
pub fn sha256_hmac(key: &[u8], data: &[u8]) -> [u8; 32] {
- let mut mac = HmacSha256::new_from_slice(key)
- .expect("HMAC accepts any key length");
+ let mut mac = HmacSha256::new_from_slice(key).expect("HMAC accepts any key length");
mac.update(data);
mac.finalize().into_bytes().into()
}
@@ -124,27 +123,18 @@ pub fn derive_middleproxy_keys(
srv_ipv6: Option<&[u8; 16]>,
) -> ([u8; 32], [u8; 16]) {
let s = build_middleproxy_prekey(
- nonce_srv,
- nonce_clt,
- clt_ts,
- srv_ip,
- clt_port,
- purpose,
- clt_ip,
- srv_port,
- secret,
- clt_ipv6,
- srv_ipv6,
+ nonce_srv, nonce_clt, clt_ts, srv_ip, clt_port, purpose, clt_ip, srv_port, secret,
+ clt_ipv6, srv_ipv6,
);
let md5_1 = md5(&s[1..]);
let sha1_sum = sha1(&s);
let md5_2 = md5(&s[2..]);
-
+
let mut key = [0u8; 32];
key[..12].copy_from_slice(&md5_1[..12]);
key[12..].copy_from_slice(&sha1_sum);
-
+
(key, md5_2)
}
@@ -164,17 +154,8 @@ mod tests {
let secret = vec![0x55u8; 128];
let prekey = build_middleproxy_prekey(
- &nonce_srv,
- &nonce_clt,
- &clt_ts,
- srv_ip,
- &clt_port,
- b"CLIENT",
- clt_ip,
- &srv_port,
- &secret,
- None,
- None,
+ &nonce_srv, &nonce_clt, &clt_ts, srv_ip, &clt_port, b"CLIENT", clt_ip, &srv_port,
+ &secret, None, None,
);
let digest = sha256(&prekey);
assert_eq!(
diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs
index 9108f34..cf2dcd2 100644
--- a/src/crypto/mod.rs
+++ b/src/crypto/mod.rs
@@ -4,7 +4,7 @@ pub mod aes;
pub mod hash;
pub mod random;
-pub use aes::{AesCtr, AesCbc};
+pub use aes::{AesCbc, AesCtr};
pub use hash::{
build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac,
};
diff --git a/src/crypto/random.rs b/src/crypto/random.rs
index a88efc6..760f120 100644
--- a/src/crypto/random.rs
+++ b/src/crypto/random.rs
@@ -3,11 +3,11 @@
#![allow(deprecated)]
#![allow(dead_code)]
-use rand::{Rng, RngCore, SeedableRng};
-use rand::rngs::StdRng;
-use parking_lot::Mutex;
-use zeroize::Zeroize;
use crate::crypto::AesCtr;
+use parking_lot::Mutex;
+use rand::rngs::StdRng;
+use rand::{Rng, RngExt, SeedableRng};
+use zeroize::Zeroize;
/// Cryptographically secure PRNG with AES-CTR
pub struct SecureRandom {
@@ -34,16 +34,16 @@ impl SecureRandom {
pub fn new() -> Self {
let mut seed_source = rand::rng();
let mut rng = StdRng::from_rng(&mut seed_source);
-
+
let mut key = [0u8; 32];
rng.fill_bytes(&mut key);
let iv: u128 = rng.random();
-
+
let cipher = AesCtr::new(&key, iv);
-
+
// Zeroize local key copy — cipher already consumed it
key.zeroize();
-
+
Self {
inner: Mutex::new(SecureRandomInner {
rng,
@@ -53,7 +53,7 @@ impl SecureRandom {
}),
}
}
-
+
/// Fill a caller-provided buffer with random bytes.
pub fn fill(&self, out: &mut [u8]) {
let mut inner = self.inner.lock();
@@ -94,25 +94,25 @@ impl SecureRandom {
self.fill(&mut out);
out
}
-
+
/// Generate random number in range [0, max)
pub fn range(&self, max: usize) -> usize {
if max == 0 {
return 0;
}
let mut inner = self.inner.lock();
- inner.rng.gen_range(0..max)
+ inner.rng.random_range(0..max)
}
-
+
/// Generate random bits
pub fn bits(&self, k: usize) -> u64 {
if k == 0 {
return 0;
}
-
+
let bytes_needed = k.div_ceil(8);
let bytes = self.bytes(bytes_needed.min(8));
-
+
let mut result = 0u64;
for (i, &b) in bytes.iter().enumerate() {
if i >= 8 {
@@ -120,14 +120,14 @@ impl SecureRandom {
}
result |= (b as u64) << (i * 8);
}
-
+
if k < 64 {
result &= (1u64 << k) - 1;
}
-
+
result
}
-
+
/// Choose random element from slice
pub fn choose<'a, T>(&self, slice: &'a [T]) -> Option<&'a T> {
if slice.is_empty() {
@@ -136,22 +136,22 @@ impl SecureRandom {
Some(&slice[self.range(slice.len())])
}
}
-
+
/// Shuffle slice in place
pub fn shuffle(&self, slice: &mut [T]) {
let mut inner = self.inner.lock();
for i in (1..slice.len()).rev() {
- let j = inner.rng.gen_range(0..=i);
+ let j = inner.rng.random_range(0..=i);
slice.swap(i, j);
}
}
-
+
/// Generate random u32
pub fn u32(&self) -> u32 {
let mut inner = self.inner.lock();
inner.rng.random()
}
-
+
/// Generate random u64
pub fn u64(&self) -> u64 {
let mut inner = self.inner.lock();
@@ -169,7 +169,7 @@ impl Default for SecureRandom {
mod tests {
use super::*;
use std::collections::HashSet;
-
+
#[test]
fn test_bytes_uniqueness() {
let rng = SecureRandom::new();
@@ -177,7 +177,7 @@ mod tests {
let b = rng.bytes(32);
assert_ne!(a, b);
}
-
+
#[test]
fn test_bytes_length() {
let rng = SecureRandom::new();
@@ -186,63 +186,63 @@ mod tests {
assert_eq!(rng.bytes(100).len(), 100);
assert_eq!(rng.bytes(1000).len(), 1000);
}
-
+
#[test]
fn test_range() {
let rng = SecureRandom::new();
-
+
for _ in 0..1000 {
let n = rng.range(10);
assert!(n < 10);
}
-
+
assert_eq!(rng.range(1), 0);
assert_eq!(rng.range(0), 0);
}
-
+
#[test]
fn test_bits() {
let rng = SecureRandom::new();
-
+
for _ in 0..100 {
assert!(rng.bits(1) <= 1);
}
-
+
for _ in 0..100 {
assert!(rng.bits(8) <= 255);
}
}
-
+
#[test]
fn test_choose() {
let rng = SecureRandom::new();
let items = vec![1, 2, 3, 4, 5];
-
+
let mut seen = HashSet::new();
for _ in 0..1000 {
if let Some(&item) = rng.choose(&items) {
seen.insert(item);
}
}
-
+
assert_eq!(seen.len(), 5);
-
+
let empty: Vec = vec![];
assert!(rng.choose(&empty).is_none());
}
-
+
#[test]
fn test_shuffle() {
let rng = SecureRandom::new();
let original = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
-
+
let mut shuffled = original.clone();
rng.shuffle(&mut shuffled);
-
+
let mut sorted = shuffled.clone();
sorted.sort();
assert_eq!(sorted, original);
-
+
assert_ne!(shuffled, original);
}
}
diff --git a/src/error.rs b/src/error.rs
index e4d66b9..d9aeb22 100644
--- a/src/error.rs
+++ b/src/error.rs
@@ -12,28 +12,15 @@ use thiserror::Error;
#[derive(Debug)]
pub enum StreamError {
/// Partial read: got fewer bytes than expected
- PartialRead {
- expected: usize,
- got: usize,
- },
+ PartialRead { expected: usize, got: usize },
/// Partial write: wrote fewer bytes than expected
- PartialWrite {
- expected: usize,
- written: usize,
- },
+ PartialWrite { expected: usize, written: usize },
/// Stream is in poisoned state and cannot be used
- Poisoned {
- reason: String,
- },
+ Poisoned { reason: String },
/// Buffer overflow: attempted to buffer more than allowed
- BufferOverflow {
- limit: usize,
- attempted: usize,
- },
+ BufferOverflow { limit: usize, attempted: usize },
/// Invalid frame format
- InvalidFrame {
- details: String,
- },
+ InvalidFrame { details: String },
/// Unexpected end of stream
UnexpectedEof,
/// Underlying I/O error
@@ -47,13 +34,21 @@ impl fmt::Display for StreamError {
write!(f, "partial read: expected {} bytes, got {}", expected, got)
}
Self::PartialWrite { expected, written } => {
- write!(f, "partial write: expected {} bytes, wrote {}", expected, written)
+ write!(
+ f,
+ "partial write: expected {} bytes, wrote {}",
+ expected, written
+ )
}
Self::Poisoned { reason } => {
write!(f, "stream poisoned: {}", reason)
}
Self::BufferOverflow { limit, attempted } => {
- write!(f, "buffer overflow: limit {}, attempted {}", limit, attempted)
+ write!(
+ f,
+ "buffer overflow: limit {}, attempted {}",
+ limit, attempted
+ )
}
Self::InvalidFrame { details } => {
write!(f, "invalid frame: {}", details)
@@ -90,9 +85,7 @@ impl From for std::io::Error {
StreamError::UnexpectedEof => {
std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err)
}
- StreamError::Poisoned { .. } => {
- std::io::Error::other(err)
- }
+ StreamError::Poisoned { .. } => std::io::Error::other(err),
StreamError::BufferOverflow { .. } => {
std::io::Error::new(std::io::ErrorKind::OutOfMemory, err)
}
@@ -112,7 +105,7 @@ impl From for std::io::Error {
pub trait Recoverable {
/// Check if error is recoverable (can retry operation)
fn is_recoverable(&self) -> bool;
-
+
/// Check if connection can continue after this error
fn can_continue(&self) -> bool;
}
@@ -123,19 +116,22 @@ impl Recoverable for StreamError {
Self::PartialRead { .. } | Self::PartialWrite { .. } => true,
Self::Io(e) => matches!(
e.kind(),
- std::io::ErrorKind::WouldBlock
- | std::io::ErrorKind::Interrupted
- | std::io::ErrorKind::TimedOut
+ std::io::ErrorKind::WouldBlock
+ | std::io::ErrorKind::Interrupted
+ | std::io::ErrorKind::TimedOut
),
- Self::Poisoned { .. }
+ Self::Poisoned { .. }
| Self::BufferOverflow { .. }
| Self::InvalidFrame { .. }
| Self::UnexpectedEof => false,
}
}
-
+
fn can_continue(&self) -> bool {
- !matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. })
+ !matches!(
+ self,
+ Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. }
+ )
}
}
@@ -143,19 +139,19 @@ impl Recoverable for std::io::Error {
fn is_recoverable(&self) -> bool {
matches!(
self.kind(),
- std::io::ErrorKind::WouldBlock
- | std::io::ErrorKind::Interrupted
- | std::io::ErrorKind::TimedOut
+ std::io::ErrorKind::WouldBlock
+ | std::io::ErrorKind::Interrupted
+ | std::io::ErrorKind::TimedOut
)
}
-
+
fn can_continue(&self) -> bool {
!matches!(
self.kind(),
std::io::ErrorKind::BrokenPipe
- | std::io::ErrorKind::ConnectionReset
- | std::io::ErrorKind::ConnectionAborted
- | std::io::ErrorKind::NotConnected
+ | std::io::ErrorKind::ConnectionReset
+ | std::io::ErrorKind::ConnectionAborted
+ | std::io::ErrorKind::NotConnected
)
}
}
@@ -165,96 +161,88 @@ impl Recoverable for std::io::Error {
#[derive(Error, Debug)]
pub enum ProxyError {
// ============= Crypto Errors =============
-
#[error("Crypto error: {0}")]
Crypto(String),
-
+
#[error("Invalid key length: expected {expected}, got {got}")]
InvalidKeyLength { expected: usize, got: usize },
-
+
// ============= Stream Errors =============
-
#[error("Stream error: {0}")]
Stream(#[from] StreamError),
-
+
// ============= Protocol Errors =============
-
#[error("Invalid handshake: {0}")]
InvalidHandshake(String),
-
+
#[error("Invalid protocol tag: {0:02x?}")]
InvalidProtoTag([u8; 4]),
-
+
#[error("Invalid TLS record: type={record_type}, version={version:02x?}")]
InvalidTlsRecord { record_type: u8, version: [u8; 2] },
-
+
#[error("Replay attack detected from {addr}")]
ReplayAttack { addr: SocketAddr },
-
+
#[error("Time skew detected: client={client_time}, server={server_time}")]
TimeSkew { client_time: u32, server_time: u32 },
-
+
#[error("Invalid message length: {len} (min={min}, max={max})")]
InvalidMessageLength { len: usize, min: usize, max: usize },
-
+
#[error("Checksum mismatch: expected={expected:08x}, got={got:08x}")]
ChecksumMismatch { expected: u32, got: u32 },
-
+
#[error("Sequence number mismatch: expected={expected}, got={got}")]
SeqNoMismatch { expected: i32, got: i32 },
-
+
#[error("TLS handshake failed: {reason}")]
TlsHandshakeFailed { reason: String },
-
+
#[error("Telegram handshake timeout")]
TgHandshakeTimeout,
-
+
// ============= Network Errors =============
-
#[error("Connection timeout to {addr}")]
ConnectionTimeout { addr: String },
-
+
#[error("Connection refused by {addr}")]
ConnectionRefused { addr: String },
-
+
#[error("IO error: {0}")]
Io(#[from] std::io::Error),
-
+
// ============= Proxy Protocol Errors =============
-
#[error("Invalid proxy protocol header")]
InvalidProxyProtocol,
-
+
#[error("Proxy error: {0}")]
Proxy(String),
-
+
// ============= Config Errors =============
-
#[error("Config error: {0}")]
Config(String),
-
+
#[error("Invalid secret for user {user}: {reason}")]
InvalidSecret { user: String, reason: String },
-
+
// ============= User Errors =============
-
#[error("User {user} expired")]
UserExpired { user: String },
-
+
#[error("User {user} exceeded connection limit")]
ConnectionLimitExceeded { user: String },
-
+
#[error("User {user} exceeded data quota")]
DataQuotaExceeded { user: String },
-
+
#[error("Unknown user")]
UnknownUser,
-
+
#[error("Rate limited")]
RateLimited,
-
+
// ============= General Errors =============
-
#[error("Internal error: {0}")]
Internal(String),
}
@@ -269,7 +257,7 @@ impl Recoverable for ProxyError {
_ => false,
}
}
-
+
fn can_continue(&self) -> bool {
match self {
Self::Stream(e) => e.can_continue(),
@@ -301,17 +289,19 @@ impl HandshakeResult {
pub fn is_success(&self) -> bool {
matches!(self, HandshakeResult::Success(_))
}
-
+
/// Check if bad client
pub fn is_bad_client(&self) -> bool {
matches!(self, HandshakeResult::BadClient { .. })
}
-
+
/// Map the success value
pub fn map U>(self, f: F) -> HandshakeResult {
match self {
HandshakeResult::Success(v) => HandshakeResult::Success(f(v)),
- HandshakeResult::BadClient { reader, writer } => HandshakeResult::BadClient { reader, writer },
+ HandshakeResult::BadClient { reader, writer } => {
+ HandshakeResult::BadClient { reader, writer }
+ }
HandshakeResult::Error(e) => HandshakeResult::Error(e),
}
}
@@ -338,76 +328,104 @@ impl From for HandshakeResult {
#[cfg(test)]
mod tests {
use super::*;
-
+
#[test]
fn test_stream_error_display() {
- let err = StreamError::PartialRead { expected: 100, got: 50 };
+ let err = StreamError::PartialRead {
+ expected: 100,
+ got: 50,
+ };
assert!(err.to_string().contains("100"));
assert!(err.to_string().contains("50"));
-
- let err = StreamError::Poisoned { reason: "test".into() };
+
+ let err = StreamError::Poisoned {
+ reason: "test".into(),
+ };
assert!(err.to_string().contains("test"));
}
-
+
#[test]
fn test_stream_error_recoverable() {
- assert!(StreamError::PartialRead { expected: 10, got: 5 }.is_recoverable());
- assert!(StreamError::PartialWrite { expected: 10, written: 5 }.is_recoverable());
+ assert!(
+ StreamError::PartialRead {
+ expected: 10,
+ got: 5
+ }
+ .is_recoverable()
+ );
+ assert!(
+ StreamError::PartialWrite {
+ expected: 10,
+ written: 5
+ }
+ .is_recoverable()
+ );
assert!(!StreamError::Poisoned { reason: "x".into() }.is_recoverable());
assert!(!StreamError::UnexpectedEof.is_recoverable());
}
-
+
#[test]
fn test_stream_error_can_continue() {
assert!(!StreamError::Poisoned { reason: "x".into() }.can_continue());
assert!(!StreamError::UnexpectedEof.can_continue());
- assert!(StreamError::PartialRead { expected: 10, got: 5 }.can_continue());
+ assert!(
+ StreamError::PartialRead {
+ expected: 10,
+ got: 5
+ }
+ .can_continue()
+ );
}
-
+
#[test]
fn test_stream_error_to_io_error() {
let stream_err = StreamError::UnexpectedEof;
let io_err: std::io::Error = stream_err.into();
assert_eq!(io_err.kind(), std::io::ErrorKind::UnexpectedEof);
}
-
+
#[test]
fn test_handshake_result() {
let success: HandshakeResult = HandshakeResult::Success(42);
assert!(success.is_success());
assert!(!success.is_bad_client());
-
- let bad: HandshakeResult = HandshakeResult::BadClient { reader: (), writer: () };
+
+ let bad: HandshakeResult = HandshakeResult::BadClient {
+ reader: (),
+ writer: (),
+ };
assert!(!bad.is_success());
assert!(bad.is_bad_client());
}
-
+
#[test]
fn test_handshake_result_map() {
let success: HandshakeResult = HandshakeResult::Success(42);
let mapped = success.map(|x| x * 2);
-
+
match mapped {
HandshakeResult::Success(v) => assert_eq!(v, 84),
_ => panic!("Expected success"),
}
}
-
+
#[test]
fn test_proxy_error_recoverable() {
let err = ProxyError::RateLimited;
assert!(err.is_recoverable());
-
+
let err = ProxyError::InvalidHandshake("bad".into());
assert!(!err.is_recoverable());
}
-
+
#[test]
fn test_error_display() {
- let err = ProxyError::ConnectionTimeout { addr: "1.2.3.4:443".into() };
+ let err = ProxyError::ConnectionTimeout {
+ addr: "1.2.3.4:443".into(),
+ };
assert!(err.to_string().contains("1.2.3.4:443"));
-
+
let err = ProxyError::InvalidProxyProtocol;
assert!(err.to_string().contains("proxy protocol"));
}
-}
\ No newline at end of file
+}
diff --git a/src/ip_tracker.rs b/src/ip_tracker.rs
index fce20b6..76ea424 100644
--- a/src/ip_tracker.rs
+++ b/src/ip_tracker.rs
@@ -5,10 +5,11 @@
use std::collections::HashMap;
use std::net::IpAddr;
use std::sync::Arc;
+use std::sync::Mutex;
use std::sync::atomic::{AtomicU64, Ordering};
use std::time::{Duration, Instant};
-use tokio::sync::RwLock;
+use tokio::sync::{Mutex as AsyncMutex, RwLock};
use crate::config::UserMaxUniqueIpsMode;
@@ -21,6 +22,8 @@ pub struct UserIpTracker {
limit_mode: Arc>,
limit_window: Arc>,
last_compact_epoch_secs: Arc,
+ cleanup_queue: Arc>>,
+ cleanup_drain_lock: Arc>,
}
impl UserIpTracker {
@@ -33,6 +36,79 @@ impl UserIpTracker {
limit_mode: Arc::new(RwLock::new(UserMaxUniqueIpsMode::ActiveWindow)),
limit_window: Arc::new(RwLock::new(Duration::from_secs(30))),
last_compact_epoch_secs: Arc::new(AtomicU64::new(0)),
+ cleanup_queue: Arc::new(Mutex::new(Vec::new())),
+ cleanup_drain_lock: Arc::new(AsyncMutex::new(())),
+ }
+ }
+
+ pub fn enqueue_cleanup(&self, user: String, ip: IpAddr) {
+ match self.cleanup_queue.lock() {
+ Ok(mut queue) => queue.push((user, ip)),
+ Err(poisoned) => {
+ let mut queue = poisoned.into_inner();
+ queue.push((user.clone(), ip));
+ self.cleanup_queue.clear_poison();
+ tracing::warn!(
+ "UserIpTracker cleanup_queue lock poisoned; recovered and enqueued IP cleanup for {} ({})",
+ user,
+ ip
+ );
+ }
+ }
+ }
+
+ #[cfg(test)]
+ pub(crate) fn cleanup_queue_len_for_tests(&self) -> usize {
+ self.cleanup_queue
+ .lock()
+ .unwrap_or_else(|poisoned| poisoned.into_inner())
+ .len()
+ }
+
+ #[cfg(test)]
+ pub(crate) fn cleanup_queue_mutex_for_tests(&self) -> Arc>> {
+ Arc::clone(&self.cleanup_queue)
+ }
+
+ pub(crate) async fn drain_cleanup_queue(&self) {
+ // Serialize queue draining and active-IP mutation so check-and-add cannot
+ // observe stale active entries that are already queued for removal.
+ let _drain_guard = self.cleanup_drain_lock.lock().await;
+ let to_remove = {
+ match self.cleanup_queue.lock() {
+ Ok(mut queue) => {
+ if queue.is_empty() {
+ return;
+ }
+ std::mem::take(&mut *queue)
+ }
+ Err(poisoned) => {
+ let mut queue = poisoned.into_inner();
+ if queue.is_empty() {
+ self.cleanup_queue.clear_poison();
+ return;
+ }
+ let drained = std::mem::take(&mut *queue);
+ self.cleanup_queue.clear_poison();
+ drained
+ }
+ }
+ };
+
+ let mut active_ips = self.active_ips.write().await;
+ for (user, ip) in to_remove {
+ if let Some(user_ips) = active_ips.get_mut(&user) {
+ if let Some(count) = user_ips.get_mut(&ip) {
+ if *count > 1 {
+ *count -= 1;
+ } else {
+ user_ips.remove(&ip);
+ }
+ }
+ if user_ips.is_empty() {
+ active_ips.remove(&user);
+ }
+ }
}
}
@@ -65,7 +141,8 @@ impl UserIpTracker {
let mut active_ips = self.active_ips.write().await;
let mut recent_ips = self.recent_ips.write().await;
- let mut users = Vec::::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
+ let mut users =
+ Vec::::with_capacity(active_ips.len().saturating_add(recent_ips.len()));
users.extend(active_ips.keys().cloned());
for user in recent_ips.keys() {
if !active_ips.contains_key(user) {
@@ -74,8 +151,14 @@ impl UserIpTracker {
}
for user in users {
- let active_empty = active_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
- let recent_empty = recent_ips.get(&user).map(|ips| ips.is_empty()).unwrap_or(true);
+ let active_empty = active_ips
+ .get(&user)
+ .map(|ips| ips.is_empty())
+ .unwrap_or(true);
+ let recent_empty = recent_ips
+ .get(&user)
+ .map(|ips| ips.is_empty())
+ .unwrap_or(true);
if active_empty && recent_empty {
active_ips.remove(&user);
recent_ips.remove(&user);
@@ -118,6 +201,7 @@ impl UserIpTracker {
}
pub async fn check_and_add(&self, username: &str, ip: IpAddr) -> Result<(), String> {
+ self.drain_cleanup_queue().await;
self.maybe_compact_empty_users().await;
let default_max_ips = *self.default_max_ips.read().await;
let limit = {
@@ -194,6 +278,7 @@ impl UserIpTracker {
}
pub async fn get_recent_counts_for_users(&self, users: &[String]) -> HashMap {
+ self.drain_cleanup_queue().await;
let window = *self.limit_window.read().await;
let now = Instant::now();
let recent_ips = self.recent_ips.read().await;
@@ -214,6 +299,7 @@ impl UserIpTracker {
}
pub async fn get_active_ips_for_users(&self, users: &[String]) -> HashMap> {
+ self.drain_cleanup_queue().await;
let active_ips = self.active_ips.read().await;
let mut out = HashMap::with_capacity(users.len());
for user in users {
@@ -228,6 +314,7 @@ impl UserIpTracker {
}
pub async fn get_recent_ips_for_users(&self, users: &[String]) -> HashMap> {
+ self.drain_cleanup_queue().await;
let window = *self.limit_window.read().await;
let now = Instant::now();
let recent_ips = self.recent_ips.read().await;
@@ -250,11 +337,13 @@ impl UserIpTracker {
}
pub async fn get_active_ip_count(&self, username: &str) -> usize {
+ self.drain_cleanup_queue().await;
let active_ips = self.active_ips.read().await;
active_ips.get(username).map(|ips| ips.len()).unwrap_or(0)
}
pub async fn get_active_ips(&self, username: &str) -> Vec {
+ self.drain_cleanup_queue().await;
let active_ips = self.active_ips.read().await;
active_ips
.get(username)
@@ -263,6 +352,7 @@ impl UserIpTracker {
}
pub async fn get_stats(&self) -> Vec<(String, usize, usize)> {
+ self.drain_cleanup_queue().await;
let active_ips = self.active_ips.read().await;
let max_ips = self.max_ips.read().await;
let default_max_ips = *self.default_max_ips.read().await;
@@ -301,6 +391,7 @@ impl UserIpTracker {
}
pub async fn is_ip_active(&self, username: &str, ip: IpAddr) -> bool {
+ self.drain_cleanup_queue().await;
let active_ips = self.active_ips.read().await;
active_ips
.get(username)
diff --git a/src/maestro/connectivity.rs b/src/maestro/connectivity.rs
index c843223..0cb561d 100644
--- a/src/maestro/connectivity.rs
+++ b/src/maestro/connectivity.rs
@@ -1,3 +1,5 @@
+#![allow(clippy::too_many_arguments)]
+
use std::sync::Arc;
use std::time::Instant;
@@ -11,10 +13,10 @@ use crate::startup::{
COMPONENT_DC_CONNECTIVITY_PING, COMPONENT_ME_CONNECTIVITY_PING, COMPONENT_RUNTIME_READY,
StartupTracker,
};
+use crate::transport::UpstreamManager;
use crate::transport::middle_proxy::{
MePingFamily, MePingSample, MePool, format_me_route, format_sample_line, run_me_ping,
};
-use crate::transport::UpstreamManager;
pub(crate) async fn run_startup_connectivity(
config: &Arc,
@@ -47,11 +49,15 @@ pub(crate) async fn run_startup_connectivity(
let v4_ok = me_results.iter().any(|r| {
matches!(r.family, MePingFamily::V4)
- && r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
+ && r.samples
+ .iter()
+ .any(|s| s.error.is_none() && s.handshake_ms.is_some())
});
let v6_ok = me_results.iter().any(|r| {
matches!(r.family, MePingFamily::V6)
- && r.samples.iter().any(|s| s.error.is_none() && s.handshake_ms.is_some())
+ && r.samples
+ .iter()
+ .any(|s| s.error.is_none() && s.handshake_ms.is_some())
});
info!("================= Telegram ME Connectivity =================");
@@ -131,8 +137,14 @@ pub(crate) async fn run_startup_connectivity(
.await;
for upstream_result in &ping_results {
- let v6_works = upstream_result.v6_results.iter().any(|r| r.rtt_ms.is_some());
- let v4_works = upstream_result.v4_results.iter().any(|r| r.rtt_ms.is_some());
+ let v6_works = upstream_result
+ .v6_results
+ .iter()
+ .any(|r| r.rtt_ms.is_some());
+ let v4_works = upstream_result
+ .v4_results
+ .iter()
+ .any(|r| r.rtt_ms.is_some());
if upstream_result.both_available {
if prefer_ipv6 {
diff --git a/src/maestro/helpers.rs b/src/maestro/helpers.rs
index f43e308..35f796f 100644
--- a/src/maestro/helpers.rs
+++ b/src/maestro/helpers.rs
@@ -1,5 +1,7 @@
-use std::time::Duration;
+#![allow(clippy::items_after_test_module)]
+
use std::path::PathBuf;
+use std::time::Duration;
use tokio::sync::watch;
use tracing::{debug, error, info, warn};
@@ -10,6 +12,19 @@ use crate::transport::middle_proxy::{
ProxyConfigData, fetch_proxy_config_with_raw, load_proxy_config_cache, save_proxy_config_cache,
};
+pub(crate) fn resolve_runtime_config_path(
+ config_path_cli: &str,
+ startup_cwd: &std::path::Path,
+) -> PathBuf {
+ let raw = PathBuf::from(config_path_cli);
+ let absolute = if raw.is_absolute() {
+ raw
+ } else {
+ startup_cwd.join(raw)
+ };
+ absolute.canonicalize().unwrap_or(absolute)
+}
+
pub(crate) fn parse_cli() -> (String, Option, bool, Option) {
let mut config_path = "config.toml".to_string();
let mut data_path: Option = None;
@@ -40,7 +55,9 @@ pub(crate) fn parse_cli() -> (String, Option, bool, Option) {
}
}
s if s.starts_with("--data-path=") => {
- data_path = Some(PathBuf::from(s.trim_start_matches("--data-path=").to_string()));
+ data_path = Some(PathBuf::from(
+ s.trim_start_matches("--data-path=").to_string(),
+ ));
}
"--silent" | "-s" => {
silent = true;
@@ -58,7 +75,9 @@ pub(crate) fn parse_cli() -> (String, Option, bool, Option) {
eprintln!("Usage: telemt [config.toml] [OPTIONS]");
eprintln!();
eprintln!("Options:");
- eprintln!(" --data-path Set data directory (absolute path; overrides config value)");
+ eprintln!(
+ " --data-path Set data directory (absolute path; overrides config value)"
+ );
eprintln!(" --silent, -s Suppress info logs");
eprintln!(" --log-level debug|verbose|normal|silent");
eprintln!(" --help, -h Show this help");
@@ -96,9 +115,52 @@ pub(crate) fn parse_cli() -> (String, Option, bool, Option) {
(config_path, data_path, silent, log_level)
}
+#[cfg(test)]
+mod tests {
+ use super::resolve_runtime_config_path;
+
+ #[test]
+ fn resolve_runtime_config_path_anchors_relative_to_startup_cwd() {
+ let nonce = std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .unwrap()
+ .as_nanos();
+ let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_{nonce}"));
+ std::fs::create_dir_all(&startup_cwd).unwrap();
+ let target = startup_cwd.join("config.toml");
+ std::fs::write(&target, " ").unwrap();
+
+ let resolved = resolve_runtime_config_path("config.toml", &startup_cwd);
+ assert_eq!(resolved, target.canonicalize().unwrap());
+
+ let _ = std::fs::remove_file(&target);
+ let _ = std::fs::remove_dir(&startup_cwd);
+ }
+
+ #[test]
+ fn resolve_runtime_config_path_keeps_absolute_for_missing_file() {
+ let nonce = std::time::SystemTime::now()
+ .duration_since(std::time::UNIX_EPOCH)
+ .unwrap()
+ .as_nanos();
+ let startup_cwd = std::env::temp_dir().join(format!("telemt_cfg_path_missing_{nonce}"));
+ std::fs::create_dir_all(&startup_cwd).unwrap();
+
+ let resolved = resolve_runtime_config_path("missing.toml", &startup_cwd);
+ assert_eq!(resolved, startup_cwd.join("missing.toml"));
+
+ let _ = std::fs::remove_dir(&startup_cwd);
+ }
+}
+
pub(crate) fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) {
info!(target: "telemt::links", "--- Proxy Links ({}) ---", host);
- for user_name in config.general.links.show.resolve_users(&config.access.users) {
+ for user_name in config
+ .general
+ .links
+ .show
+ .resolve_users(&config.access.users)
+ {
if let Some(secret) = config.access.users.get(user_name) {
info!(target: "telemt::links", "User: {}", user_name);
if config.general.modes.classic {
@@ -239,7 +301,10 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
return Some(cfg);
}
- warn!(snapshot = label, url, "Startup proxy-config is empty; trying disk cache");
+ warn!(
+ snapshot = label,
+ url, "Startup proxy-config is empty; trying disk cache"
+ );
if let Some(path) = cache_path {
match load_proxy_config_cache(path).await {
Ok(cached) if !cached.map.is_empty() => {
@@ -254,8 +319,7 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
Ok(_) => {
warn!(
snapshot = label,
- path,
- "Startup proxy-config cache is empty; ignoring cache file"
+ path, "Startup proxy-config cache is empty; ignoring cache file"
);
}
Err(cache_err) => {
@@ -299,8 +363,7 @@ pub(crate) async fn load_startup_proxy_config_snapshot(
Ok(_) => {
warn!(
snapshot = label,
- path,
- "Startup proxy-config cache is empty; ignoring cache file"
+ path, "Startup proxy-config cache is empty; ignoring cache file"
);
}
Err(cache_err) => {
diff --git a/src/maestro/listeners.rs b/src/maestro/listeners.rs
index fe041d9..effaff8 100644
--- a/src/maestro/listeners.rs
+++ b/src/maestro/listeners.rs
@@ -12,17 +12,15 @@ use tracing::{debug, error, info, warn};
use crate::config::ProxyConfig;
use crate::crypto::SecureRandom;
use crate::ip_tracker::UserIpTracker;
-use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
use crate::proxy::ClientHandler;
+use crate::proxy::route_mode::{ROUTE_SWITCH_ERROR_MSG, RouteRuntimeController};
use crate::startup::{COMPONENT_LISTENERS_BIND, StartupTracker};
use crate::stats::beobachten::BeobachtenStore;
use crate::stats::{ReplayChecker, Stats};
use crate::stream::BufferPool;
use crate::tls_front::TlsFrontCache;
use crate::transport::middle_proxy::MePool;
-use crate::transport::{
- ListenOptions, UpstreamManager, create_listener, find_listener_processes,
-};
+use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes};
use super::helpers::{is_expected_handshake_eof, print_proxy_links};
@@ -81,8 +79,9 @@ pub(crate) async fn bind_listeners(
Ok(socket) => {
let listener = TcpListener::from_std(socket.into())?;
info!("Listening on {}", addr);
- let listener_proxy_protocol =
- listener_conf.proxy_protocol.unwrap_or(config.server.proxy_protocol);
+ let listener_proxy_protocol = listener_conf
+ .proxy_protocol
+ .unwrap_or(config.server.proxy_protocol);
let public_host = if let Some(ref announce) = listener_conf.announce {
announce.clone()
@@ -100,8 +99,14 @@ pub(crate) async fn bind_listeners(
listener_conf.ip.to_string()
};
- if config.general.links.public_host.is_none() && !config.general.links.show.is_empty() {
- let link_port = config.general.links.public_port.unwrap_or(config.server.port);
+ if config.general.links.public_host.is_none()
+ && !config.general.links.show.is_empty()
+ {
+ let link_port = config
+ .general
+ .links
+ .public_port
+ .unwrap_or(config.server.port);
print_proxy_links(&public_host, link_port, config);
}
@@ -145,12 +150,14 @@ pub(crate) async fn bind_listeners(
let (host, port) = if let Some(ref h) = config.general.links.public_host {
(
h.clone(),
- config.general.links.public_port.unwrap_or(config.server.port),
+ config
+ .general
+ .links
+ .public_port
+ .unwrap_or(config.server.port),
)
} else {
- let ip = detected_ip_v4
- .or(detected_ip_v6)
- .map(|ip| ip.to_string());
+ let ip = detected_ip_v4.or(detected_ip_v6).map(|ip| ip.to_string());
if ip.is_none() {
warn!(
"show_link is configured but public IP could not be detected. Set public_host in config."
@@ -158,7 +165,11 @@ pub(crate) async fn bind_listeners(
}
(
ip.unwrap_or_else(|| "UNKNOWN".to_string()),
- config.general.links.public_port.unwrap_or(config.server.port),
+ config
+ .general
+ .links
+ .public_port
+ .unwrap_or(config.server.port),
)
};
@@ -178,13 +189,19 @@ pub(crate) async fn bind_listeners(
use std::os::unix::fs::PermissionsExt;
let perms = std::fs::Permissions::from_mode(mode);
if let Err(e) = std::fs::set_permissions(unix_path, perms) {
- error!("Failed to set unix socket permissions to {}: {}", perm_str, e);
+ error!(
+ "Failed to set unix socket permissions to {}: {}",
+ perm_str, e
+ );
} else {
info!("Listening on unix:{} (mode {})", unix_path, perm_str);
}
}
Err(e) => {
- warn!("Invalid listen_unix_sock_perm '{}': {}. Ignoring.", perm_str, e);
+ warn!(
+ "Invalid listen_unix_sock_perm '{}': {}. Ignoring.",
+ perm_str, e
+ );
info!("Listening on unix:{}", unix_path);
}
}
@@ -218,10 +235,8 @@ pub(crate) async fn bind_listeners(
drop(stream);
continue;
}
- let accept_permit_timeout_ms = config_rx_unix
- .borrow()
- .server
- .accept_permit_timeout_ms;
+ let accept_permit_timeout_ms =
+ config_rx_unix.borrow().server.accept_permit_timeout_ms;
let permit = if accept_permit_timeout_ms == 0 {
match max_connections_unix.clone().acquire_owned().await {
Ok(permit) => permit,
@@ -361,10 +376,8 @@ pub(crate) fn spawn_tcp_accept_loops(
drop(stream);
continue;
}
- let accept_permit_timeout_ms = config_rx
- .borrow()
- .server
- .accept_permit_timeout_ms;
+ let accept_permit_timeout_ms =
+ config_rx.borrow().server.accept_permit_timeout_ms;
let permit = if accept_permit_timeout_ms == 0 {
match max_connections_tcp.clone().acquire_owned().await {
Ok(permit) => permit,
diff --git a/src/maestro/me_startup.rs b/src/maestro/me_startup.rs
index eb45cc4..022f8ae 100644
--- a/src/maestro/me_startup.rs
+++ b/src/maestro/me_startup.rs
@@ -1,3 +1,5 @@
+#![allow(clippy::too_many_arguments)]
+
use std::sync::Arc;
use std::time::Duration;
@@ -12,8 +14,8 @@ use crate::startup::{
COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH, StartupMeStatus, StartupTracker,
};
use crate::stats::Stats;
-use crate::transport::middle_proxy::MePool;
use crate::transport::UpstreamManager;
+use crate::transport::middle_proxy::MePool;
use super::helpers::load_startup_proxy_config_snapshot;
@@ -229,8 +231,12 @@ pub(crate) async fn initialize_me_pool(
config.general.me_adaptive_floor_recover_grace_secs,
config.general.me_adaptive_floor_writers_per_core_total,
config.general.me_adaptive_floor_cpu_cores_override,
- config.general.me_adaptive_floor_max_extra_writers_single_per_core,
- config.general.me_adaptive_floor_max_extra_writers_multi_per_core,
+ config
+ .general
+ .me_adaptive_floor_max_extra_writers_single_per_core,
+ config
+ .general
+ .me_adaptive_floor_max_extra_writers_multi_per_core,
config.general.me_adaptive_floor_max_active_writers_per_core,
config.general.me_adaptive_floor_max_warm_writers_per_core,
config.general.me_adaptive_floor_max_active_writers_global,
@@ -268,8 +274,6 @@ pub(crate) async fn initialize_me_pool(
config.general.me_warn_rate_limit_ms,
config.general.me_route_no_writer_mode,
config.general.me_route_no_writer_wait_ms,
- config.general.me_route_hybrid_max_wait_ms,
- config.general.me_route_blocking_send_timeout_ms,
config.general.me_route_inline_recovery_attempts,
config.general.me_route_inline_recovery_wait_ms,
);
@@ -459,65 +463,71 @@ pub(crate) async fn initialize_me_pool(
"Middle-End pool initialized successfully"
);
- // ── Supervised background tasks ──────────────────
- let pool_clone = pool.clone();
- let rng_clone = rng.clone();
- let min_conns = pool_size;
- tokio::spawn(async move {
- loop {
- let p = pool_clone.clone();
- let r = rng_clone.clone();
- let res = tokio::spawn(async move {
- crate::transport::middle_proxy::me_health_monitor(
- p, r, min_conns,
- )
- .await;
- })
+ // ── Supervised background tasks ──────────────────
+ let pool_clone = pool.clone();
+ let rng_clone = rng.clone();
+ let min_conns = pool_size;
+ tokio::spawn(async move {
+ loop {
+ let p = pool_clone.clone();
+ let r = rng_clone.clone();
+ let res = tokio::spawn(async move {
+ crate::transport::middle_proxy::me_health_monitor(
+ p, r, min_conns,
+ )
.await;
- match res {
- Ok(()) => warn!("me_health_monitor exited unexpectedly, restarting"),
- Err(e) => {
- error!(error = %e, "me_health_monitor panicked, restarting in 1s");
- tokio::time::sleep(Duration::from_secs(1)).await;
- }
+ })
+ .await;
+ match res {
+ Ok(()) => warn!(
+ "me_health_monitor exited unexpectedly, restarting"
+ ),
+ Err(e) => {
+ error!(error = %e, "me_health_monitor panicked, restarting in 1s");
+ tokio::time::sleep(Duration::from_secs(1)).await;
}
}
- });
- let pool_drain_enforcer = pool.clone();
- tokio::spawn(async move {
- loop {
- let p = pool_drain_enforcer.clone();
- let res = tokio::spawn(async move {
+ }
+ });
+ let pool_drain_enforcer = pool.clone();
+ tokio::spawn(async move {
+ loop {
+ let p = pool_drain_enforcer.clone();
+ let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_drain_timeout_enforcer(p).await;
})
.await;
- match res {
- Ok(()) => warn!("me_drain_timeout_enforcer exited unexpectedly, restarting"),
- Err(e) => {
- error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
- tokio::time::sleep(Duration::from_secs(1)).await;
- }
+ match res {
+ Ok(()) => warn!(
+ "me_drain_timeout_enforcer exited unexpectedly, restarting"
+ ),
+ Err(e) => {
+ error!(error = %e, "me_drain_timeout_enforcer panicked, restarting in 1s");
+ tokio::time::sleep(Duration::from_secs(1)).await;
}
}
- });
- let pool_watchdog = pool.clone();
- tokio::spawn(async move {
- loop {
- let p = pool_watchdog.clone();
- let res = tokio::spawn(async move {
+ }
+ });
+ let pool_watchdog = pool.clone();
+ tokio::spawn(async move {
+ loop {
+ let p = pool_watchdog.clone();
+ let res = tokio::spawn(async move {
crate::transport::middle_proxy::me_zombie_writer_watchdog(p).await;
})
.await;
- match res {
- Ok(()) => warn!("me_zombie_writer_watchdog exited unexpectedly, restarting"),
- Err(e) => {
- error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
- tokio::time::sleep(Duration::from_secs(1)).await;
- }
+ match res {
+ Ok(()) => warn!(
+ "me_zombie_writer_watchdog exited unexpectedly, restarting"
+ ),
+ Err(e) => {
+ error!(error = %e, "me_zombie_writer_watchdog panicked, restarting in 1s");
+ tokio::time::sleep(Duration::from_secs(1)).await;
}
}
- });
-
+ }
+ });
+
break Some(pool);
}
Err(e) => {
diff --git a/src/maestro/mod.rs b/src/maestro/mod.rs
index dce421c..7d3b168 100644
--- a/src/maestro/mod.rs
+++ b/src/maestro/mod.rs
@@ -11,9 +11,9 @@
// - admission: conditional-cast gate and route mode switching.
// - listeners: TCP/Unix listener bind and accept-loop orchestration.
// - shutdown: graceful shutdown sequence and uptime logging.
-mod helpers;
mod admission;
mod connectivity;
+mod helpers;
mod listeners;
mod me_startup;
mod runtime_tasks;
@@ -33,19 +33,19 @@ use crate::crypto::SecureRandom;
use crate::ip_tracker::UserIpTracker;
use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe};
use crate::proxy::route_mode::{RelayRouteMode, RouteRuntimeController};
+use crate::startup::{
+ COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD, COMPONENT_ME_POOL_CONSTRUCT,
+ COMPONENT_ME_POOL_INIT_STAGE1, COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6,
+ COMPONENT_ME_SECRET_FETCH, COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus,
+ StartupTracker,
+};
use crate::stats::beobachten::BeobachtenStore;
use crate::stats::telemetry::TelemetryPolicy;
use crate::stats::{ReplayChecker, Stats};
-use crate::startup::{
- COMPONENT_API_BOOTSTRAP, COMPONENT_CONFIG_LOAD,
- COMPONENT_ME_POOL_CONSTRUCT, COMPONENT_ME_POOL_INIT_STAGE1,
- COMPONENT_ME_PROXY_CONFIG_V4, COMPONENT_ME_PROXY_CONFIG_V6, COMPONENT_ME_SECRET_FETCH,
- COMPONENT_NETWORK_PROBE, COMPONENT_TRACING_INIT, StartupMeStatus, StartupTracker,
-};
use crate::stream::BufferPool;
-use crate::transport::middle_proxy::MePool;
use crate::transport::UpstreamManager;
-use helpers::parse_cli;
+use crate::transport::middle_proxy::MePool;
+use helpers::{parse_cli, resolve_runtime_config_path};
/// Runs the full telemt runtime startup pipeline and blocks until shutdown.
pub async fn run() -> std::result::Result<(), Box> {
@@ -56,20 +56,34 @@ pub async fn run() -> std::result::Result<(), Box> {
.as_secs();
let startup_tracker = Arc::new(StartupTracker::new(process_started_at_epoch_secs));
startup_tracker
- .start_component(COMPONENT_CONFIG_LOAD, Some("load and validate config".to_string()))
+ .start_component(
+ COMPONENT_CONFIG_LOAD,
+ Some("load and validate config".to_string()),
+ )
.await;
- let (config_path, data_path, cli_silent, cli_log_level) = parse_cli();
+ let (config_path_cli, data_path, cli_silent, cli_log_level) = parse_cli();
+ let startup_cwd = match std::env::current_dir() {
+ Ok(cwd) => cwd,
+ Err(e) => {
+ eprintln!("[telemt] Can't read current_dir: {}", e);
+ std::process::exit(1);
+ }
+ };
+ let config_path = resolve_runtime_config_path(&config_path_cli, &startup_cwd);
let mut config = match ProxyConfig::load(&config_path) {
Ok(c) => c,
Err(e) => {
- if std::path::Path::new(&config_path).exists() {
+ if config_path.exists() {
eprintln!("[telemt] Error: {}", e);
std::process::exit(1);
} else {
let default = ProxyConfig::default();
std::fs::write(&config_path, toml::to_string_pretty(&default).unwrap()).unwrap();
- eprintln!("[telemt] Created default config at {}", config_path);
+ eprintln!(
+ "[telemt] Created default config at {}",
+ config_path.display()
+ );
default
}
}
@@ -86,24 +100,38 @@ pub async fn run() -> std::result::Result<(), Box> {
if let Some(ref data_path) = config.general.data_path {
if !data_path.is_absolute() {
- eprintln!("[telemt] data_path must be absolute: {}", data_path.display());
+ eprintln!(
+ "[telemt] data_path must be absolute: {}",
+ data_path.display()
+ );
std::process::exit(1);
}
if data_path.exists() {
if !data_path.is_dir() {
- eprintln!("[telemt] data_path exists but is not a directory: {}", data_path.display());
+ eprintln!(
+ "[telemt] data_path exists but is not a directory: {}",
+ data_path.display()
+ );
std::process::exit(1);
}
} else {
if let Err(e) = std::fs::create_dir_all(data_path) {
- eprintln!("[telemt] Can't create data_path {}: {}", data_path.display(), e);
+ eprintln!(
+ "[telemt] Can't create data_path {}: {}",
+ data_path.display(),
+ e
+ );
std::process::exit(1);
}
}
if let Err(e) = std::env::set_current_dir(data_path) {
- eprintln!("[telemt] Can't use data_path {}: {}", data_path.display(), e);
+ eprintln!(
+ "[telemt] Can't use data_path {}: {}",
+ data_path.display(),
+ e
+ );
std::process::exit(1);
}
}
@@ -127,7 +155,10 @@ pub async fn run() -> std::result::Result<(), Box> {
let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info"));
startup_tracker
- .start_component(COMPONENT_TRACING_INIT, Some("initialize tracing subscriber".to_string()))
+ .start_component(
+ COMPONENT_TRACING_INIT,
+ Some("initialize tracing subscriber".to_string()),
+ )
.await;
// Configure color output based on config
@@ -142,7 +173,10 @@ pub async fn run() -> std::result::Result<(), Box> {
.with(fmt_layer)
.init();
startup_tracker
- .complete_component(COMPONENT_TRACING_INIT, Some("tracing initialized".to_string()))
+ .complete_component(
+ COMPONENT_TRACING_INIT,
+ Some("tracing initialized".to_string()),
+ )
.await;
info!("Telemt MTProxy v{}", env!("CARGO_PKG_VERSION"));
@@ -208,7 +242,8 @@ pub async fn run() -> std::result::Result<(), Box> {
config.access.user_max_unique_ips_window_secs,
)
.await;
- if config.access.user_max_unique_ips_global_each > 0 || !config.access.user_max_unique_ips.is_empty()
+ if config.access.user_max_unique_ips_global_each > 0
+ || !config.access.user_max_unique_ips.is_empty()
{
info!(
global_each_limit = config.access.user_max_unique_ips_global_each,
@@ -235,7 +270,10 @@ pub async fn run() -> std::result::Result<(), Box> {
let route_runtime = Arc::new(RouteRuntimeController::new(initial_route_mode));
let api_me_pool = Arc::new(RwLock::new(None::>));
startup_tracker
- .start_component(COMPONENT_API_BOOTSTRAP, Some("spawn API listener task".to_string()))
+ .start_component(
+ COMPONENT_API_BOOTSTRAP,
+ Some("spawn API listener task".to_string()),
+ )
.await;
if config.server.api.enabled {
@@ -258,7 +296,7 @@ pub async fn run() -> std::result::Result<(), Box> {
let route_runtime_api = route_runtime.clone();
let config_rx_api = api_config_rx.clone();
let admission_rx_api = admission_rx.clone();
- let config_path_api = std::path::PathBuf::from(&config_path);
+ let config_path_api = config_path.clone();
let startup_tracker_api = startup_tracker.clone();
let detected_ips_rx_api = detected_ips_rx.clone();
tokio::spawn(async move {
@@ -318,7 +356,10 @@ pub async fn run() -> std::result::Result<(), Box> {
.await;
startup_tracker
- .start_component(COMPONENT_NETWORK_PROBE, Some("probe network capabilities".to_string()))
+ .start_component(
+ COMPONENT_NETWORK_PROBE,
+ Some("probe network capabilities".to_string()),
+ )
.await;
let probe = run_probe(
&config.network,
@@ -331,11 +372,8 @@ pub async fn run() -> std::result::Result<(), Box> {
probe.detected_ipv4.map(IpAddr::V4),
probe.detected_ipv6.map(IpAddr::V6),
));
- let decision = decide_network_capabilities(
- &config.network,
- &probe,
- config.general.middle_proxy_nat_ip,
- );
+ let decision =
+ decide_network_capabilities(&config.network, &probe, config.general.middle_proxy_nat_ip);
log_probe_result(&probe, &decision);
startup_tracker
.complete_component(
@@ -438,24 +476,16 @@ pub async fn run() -> std::result::Result<(), Box> {
// If ME failed to initialize, force direct-only mode.
if me_pool.is_some() {
- startup_tracker
- .set_transport_mode("middle_proxy")
- .await;
- startup_tracker
- .set_degraded(false)
- .await;
+ startup_tracker.set_transport_mode("middle_proxy").await;
+ startup_tracker.set_degraded(false).await;
info!("Transport: Middle-End Proxy - all DC-over-RPC");
} else {
let _ = use_middle_proxy;
use_middle_proxy = false;
// Make runtime config reflect direct-only mode for handlers.
config.general.use_middle_proxy = false;
- startup_tracker
- .set_transport_mode("direct")
- .await;
- startup_tracker
- .set_degraded(true)
- .await;
+ startup_tracker.set_transport_mode("direct").await;
+ startup_tracker.set_degraded(true).await;
if me2dc_fallback {
startup_tracker
.set_me_status(StartupMeStatus::Failed, "fallback_to_direct")
diff --git a/src/maestro/runtime_tasks.rs b/src/maestro/runtime_tasks.rs
index d9691a8..d553eb9 100644
--- a/src/maestro/runtime_tasks.rs
+++ b/src/maestro/runtime_tasks.rs
@@ -1,24 +1,27 @@
use std::net::IpAddr;
-use std::path::PathBuf;
+use std::path::Path;
use std::sync::Arc;
use tokio::sync::{mpsc, watch};
use tracing::{debug, warn};
-use tracing_subscriber::reload;
use tracing_subscriber::EnvFilter;
+use tracing_subscriber::reload;
-use crate::config::{LogLevel, ProxyConfig};
use crate::config::hot_reload::spawn_config_watcher;
+use crate::config::{LogLevel, ProxyConfig};
use crate::crypto::SecureRandom;
use crate::ip_tracker::UserIpTracker;
use crate::metrics;
use crate::network::probe::NetworkProbe;
-use crate::startup::{COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY, StartupTracker};
+use crate::startup::{
+ COMPONENT_CONFIG_WATCHER_START, COMPONENT_METRICS_START, COMPONENT_RUNTIME_READY,
+ StartupTracker,
+};
use crate::stats::beobachten::BeobachtenStore;
use crate::stats::telemetry::TelemetryPolicy;
use crate::stats::{ReplayChecker, Stats};
-use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
use crate::transport::UpstreamManager;
+use crate::transport::middle_proxy::{MePool, MeReinitTrigger};
use super::helpers::write_beobachten_snapshot;
@@ -32,7 +35,7 @@ pub(crate) struct RuntimeWatches {
#[allow(clippy::too_many_arguments)]
pub(crate) async fn spawn_runtime_tasks(
config: &Arc,
- config_path: &str,
+ config_path: &Path,
probe: &NetworkProbe,
prefer_ipv6: bool,
decision_ipv4_dc: bool,
@@ -79,15 +82,13 @@ pub(crate) async fn spawn_runtime_tasks(
Some("spawn config hot-reload watcher".to_string()),
)
.await;
- let (config_rx, log_level_rx): (
- watch::Receiver>,
- watch::Receiver,
- ) = spawn_config_watcher(
- PathBuf::from(config_path),
- config.clone(),
- detected_ip_v4,
- detected_ip_v6,
- );
+ let (config_rx, log_level_rx): (watch::Receiver>, watch::Receiver) =
+ spawn_config_watcher(
+ config_path.to_path_buf(),
+ config.clone(),
+ detected_ip_v4,
+ detected_ip_v6,
+ );
startup_tracker
.complete_component(
COMPONENT_CONFIG_WATCHER_START,
@@ -114,7 +115,8 @@ pub(crate) async fn spawn_runtime_tasks(
break;
}
let cfg = config_rx_policy.borrow_and_update().clone();
- stats_policy.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
+ stats_policy
+ .apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry));
if let Some(pool) = &me_pool_for_policy {
pool.update_runtime_transport_policy(
cfg.general.me_socks_kdf_policy,
@@ -130,7 +132,11 @@ pub(crate) async fn spawn_runtime_tasks(
let ip_tracker_policy = ip_tracker.clone();
let mut config_rx_ip_limits = config_rx.clone();
tokio::spawn(async move {
- let mut prev_limits = config_rx_ip_limits.borrow().access.user_max_unique_ips.clone();
+ let mut prev_limits = config_rx_ip_limits
+ .borrow()
+ .access
+ .user_max_unique_ips
+ .clone();
let mut prev_global_each = config_rx_ip_limits
.borrow()
.access
@@ -183,7 +189,9 @@ pub(crate) async fn spawn_runtime_tasks(
let sleep_secs = cfg.general.beobachten_flush_secs.max(1);
if cfg.general.beobachten {
- let ttl = std::time::Duration::from_secs(cfg.general.beobachten_minutes.saturating_mul(60));
+ let ttl = std::time::Duration::from_secs(
+ cfg.general.beobachten_minutes.saturating_mul(60),
+ );
let path = cfg.general.beobachten_file.clone();
let snapshot = beobachten_writer.snapshot_text(ttl);
if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await {
@@ -227,8 +235,11 @@ pub(crate) async fn spawn_runtime_tasks(
let config_rx_clone_rot = config_rx.clone();
let reinit_tx_rotation = reinit_tx.clone();
tokio::spawn(async move {
- crate::transport::middle_proxy::me_rotation_task(config_rx_clone_rot, reinit_tx_rotation)
- .await;
+ crate::transport::middle_proxy::me_rotation_task(
+ config_rx_clone_rot,
+ reinit_tx_rotation,
+ )
+ .await;
});
}
diff --git a/src/maestro/shutdown.rs b/src/maestro/shutdown.rs
index b73df30..243c772 100644
--- a/src/maestro/shutdown.rs
+++ b/src/maestro/shutdown.rs
@@ -16,8 +16,11 @@ pub(crate) async fn wait_for_shutdown(process_started_at: Instant, me_pool: Opti
let uptime_secs = process_started_at.elapsed().as_secs();
info!("Uptime: {}", format_uptime(uptime_secs));
if let Some(pool) = &me_pool {
- match tokio::time::timeout(Duration::from_secs(2), pool.shutdown_send_close_conn_all())
- .await
+ match tokio::time::timeout(
+ Duration::from_secs(2),
+ pool.shutdown_send_close_conn_all(),
+ )
+ .await
{
Ok(total) => {
info!(
diff --git a/src/maestro/tls_bootstrap.rs b/src/maestro/tls_bootstrap.rs
index 73eec4c..342a2f9 100644
--- a/src/maestro/tls_bootstrap.rs
+++ b/src/maestro/tls_bootstrap.rs
@@ -1,7 +1,7 @@
use std::sync::Arc;
use std::time::Duration;
-use rand::Rng;
+use rand::RngExt;
use tracing::warn;
use crate::config::ProxyConfig;
diff --git a/src/main.rs b/src/main.rs
index 2cfbe28..c512e6b 100644
--- a/src/main.rs
+++ b/src/main.rs
@@ -7,6 +7,13 @@ mod crypto;
mod error;
mod ip_tracker;
#[cfg(test)]
+#[path = "tests/ip_tracker_hotpath_adversarial_tests.rs"]
+mod ip_tracker_hotpath_adversarial_tests;
+#[cfg(test)]
+#[path = "tests/ip_tracker_encapsulation_adversarial_tests.rs"]
+mod ip_tracker_encapsulation_adversarial_tests;
+#[cfg(test)]
+#[path = "tests/ip_tracker_regression_tests.rs"]
mod ip_tracker_regression_tests;
mod maestro;
mod metrics;
diff --git a/src/metrics.rs b/src/metrics.rs
index b7272b2..a821d4d 100644
--- a/src/metrics.rs
+++ b/src/metrics.rs
@@ -1,5 +1,5 @@
-use std::convert::Infallible;
use std::collections::{BTreeSet, HashMap};
+use std::convert::Infallible;
use std::net::SocketAddr;
use std::sync::Arc;
use std::time::Duration;
@@ -11,14 +11,12 @@ use hyper::service::service_fn;
use hyper::{Request, Response, StatusCode};
use ipnetwork::IpNetwork;
use tokio::net::TcpListener;
-use tracing::{info, warn, debug};
+use tracing::{debug, info, warn};
use crate::config::ProxyConfig;
use crate::ip_tracker::UserIpTracker;
+use crate::stats::Stats;
use crate::stats::beobachten::BeobachtenStore;
-use crate::stats::{
- MeWriterCleanupSideEffectStep, MeWriterTeardownMode, MeWriterTeardownReason, Stats,
-};
use crate::transport::{ListenOptions, create_listener};
pub async fn serve(
@@ -64,7 +62,10 @@ pub async fn serve(
let addr_v4 = SocketAddr::from(([0, 0, 0, 0], port));
match bind_metrics_listener(addr_v4, false) {
Ok(listener) => {
- info!("Metrics endpoint: http://{}/metrics and /beobachten", addr_v4);
+ info!(
+ "Metrics endpoint: http://{}/metrics and /beobachten",
+ addr_v4
+ );
listener_v4 = Some(listener);
}
Err(e) => {
@@ -75,7 +76,10 @@ pub async fn serve(
let addr_v6 = SocketAddr::from(([0, 0, 0, 0, 0, 0, 0, 0], port));
match bind_metrics_listener(addr_v6, true) {
Ok(listener) => {
- info!("Metrics endpoint: http://[::]:{}/metrics and /beobachten", port);
+ info!(
+ "Metrics endpoint: http://[::]:{}/metrics and /beobachten",
+ port
+ );
listener_v6 = Some(listener);
}
Err(e) => {
@@ -111,12 +115,7 @@ pub async fn serve(
.await;
});
serve_listener(
- listener4,
- stats,
- beobachten,
- ip_tracker,
- config_rx,
- whitelist,
+ listener4, stats, beobachten, ip_tracker, config_rx, whitelist,
)
.await;
}
@@ -233,7 +232,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
let _ = writeln!(out, "# TYPE telemt_uptime_seconds gauge");
let _ = writeln!(out, "telemt_uptime_seconds {:.1}", stats.uptime_secs());
- let _ = writeln!(out, "# HELP telemt_telemetry_core_enabled Runtime core telemetry switch");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_telemetry_core_enabled Runtime core telemetry switch"
+ );
let _ = writeln!(out, "# TYPE telemt_telemetry_core_enabled gauge");
let _ = writeln!(
out,
@@ -241,7 +243,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
if core_enabled { 1 } else { 0 }
);
- let _ = writeln!(out, "# HELP telemt_telemetry_user_enabled Runtime per-user telemetry switch");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_telemetry_user_enabled Runtime per-user telemetry switch"
+ );
let _ = writeln!(out, "# TYPE telemt_telemetry_user_enabled gauge");
let _ = writeln!(
out,
@@ -249,7 +254,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
if user_enabled { 1 } else { 0 }
);
- let _ = writeln!(out, "# HELP telemt_telemetry_me_level Runtime ME telemetry level flag");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_telemetry_me_level Runtime ME telemetry level flag"
+ );
let _ = writeln!(out, "# TYPE telemt_telemetry_me_level gauge");
let _ = writeln!(
out,
@@ -279,126 +287,40 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_connections_total Total accepted connections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_connections_total Total accepted connections"
+ );
let _ = writeln!(out, "# TYPE telemt_connections_total counter");
let _ = writeln!(
out,
"telemt_connections_total {}",
- if core_enabled { stats.get_connects_all() } else { 0 }
+ if core_enabled {
+ stats.get_connects_all()
+ } else {
+ 0
+ }
);
- let _ = writeln!(out, "# HELP telemt_connections_bad_total Bad/rejected connections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_connections_bad_total Bad/rejected connections"
+ );
let _ = writeln!(out, "# TYPE telemt_connections_bad_total counter");
let _ = writeln!(
out,
"telemt_connections_bad_total {}",
- if core_enabled { stats.get_connects_bad() } else { 0 }
- );
- let _ = writeln!(out, "# HELP telemt_connections_current Current active connections");
- let _ = writeln!(out, "# TYPE telemt_connections_current gauge");
- let _ = writeln!(
- out,
- "telemt_connections_current {}",
if core_enabled {
- stats.get_current_connections_total()
- } else {
- 0
- }
- );
- let _ = writeln!(out, "# HELP telemt_connections_direct_current Current active direct connections");
- let _ = writeln!(out, "# TYPE telemt_connections_direct_current gauge");
- let _ = writeln!(
- out,
- "telemt_connections_direct_current {}",
- if core_enabled {
- stats.get_current_connections_direct()
- } else {
- 0
- }
- );
- let _ = writeln!(out, "# HELP telemt_connections_me_current Current active middle-end connections");
- let _ = writeln!(out, "# TYPE telemt_connections_me_current gauge");
- let _ = writeln!(
- out,
- "telemt_connections_me_current {}",
- if core_enabled {
- stats.get_current_connections_me()
- } else {
- 0
- }
- );
- let _ = writeln!(
- out,
- "# HELP telemt_relay_adaptive_promotions_total Adaptive relay tier promotions"
- );
- let _ = writeln!(out, "# TYPE telemt_relay_adaptive_promotions_total counter");
- let _ = writeln!(
- out,
- "telemt_relay_adaptive_promotions_total {}",
- if core_enabled {
- stats.get_relay_adaptive_promotions_total()
- } else {
- 0
- }
- );
- let _ = writeln!(
- out,
- "# HELP telemt_relay_adaptive_demotions_total Adaptive relay tier demotions"
- );
- let _ = writeln!(out, "# TYPE telemt_relay_adaptive_demotions_total counter");
- let _ = writeln!(
- out,
- "telemt_relay_adaptive_demotions_total {}",
- if core_enabled {
- stats.get_relay_adaptive_demotions_total()
- } else {
- 0
- }
- );
- let _ = writeln!(
- out,
- "# HELP telemt_relay_adaptive_hard_promotions_total Adaptive relay hard promotions triggered by write pressure"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_relay_adaptive_hard_promotions_total counter"
- );
- let _ = writeln!(
- out,
- "telemt_relay_adaptive_hard_promotions_total {}",
- if core_enabled {
- stats.get_relay_adaptive_hard_promotions_total()
- } else {
- 0
- }
- );
- let _ = writeln!(out, "# HELP telemt_reconnect_evict_total Reconnect-driven session evictions");
- let _ = writeln!(out, "# TYPE telemt_reconnect_evict_total counter");
- let _ = writeln!(
- out,
- "telemt_reconnect_evict_total {}",
- if core_enabled {
- stats.get_reconnect_evict_total()
- } else {
- 0
- }
- );
- let _ = writeln!(
- out,
- "# HELP telemt_reconnect_stale_close_total Sessions closed because they became stale after reconnect"
- );
- let _ = writeln!(out, "# TYPE telemt_reconnect_stale_close_total counter");
- let _ = writeln!(
- out,
- "telemt_reconnect_stale_close_total {}",
- if core_enabled {
- stats.get_reconnect_stale_close_total()
+ stats.get_connects_bad()
} else {
0
}
);
- let _ = writeln!(out, "# HELP telemt_handshake_timeouts_total Handshake timeouts");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_handshake_timeouts_total Handshake timeouts"
+ );
let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter");
let _ = writeln!(
out,
@@ -477,7 +399,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_upstream_connect_attempts_per_request Histogram-like buckets for attempts per upstream connect request cycle"
);
- let _ = writeln!(out, "# TYPE telemt_upstream_connect_attempts_per_request counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_upstream_connect_attempts_per_request counter"
+ );
let _ = writeln!(
out,
"telemt_upstream_connect_attempts_per_request{{bucket=\"1\"}} {}",
@@ -519,7 +444,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_upstream_connect_duration_success_total Histogram-like buckets of successful upstream connect cycle duration"
);
- let _ = writeln!(out, "# TYPE telemt_upstream_connect_duration_success_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_upstream_connect_duration_success_total counter"
+ );
let _ = writeln!(
out,
"telemt_upstream_connect_duration_success_total{{bucket=\"le_100ms\"}} {}",
@@ -561,7 +489,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_upstream_connect_duration_fail_total Histogram-like buckets of failed upstream connect cycle duration"
);
- let _ = writeln!(out, "# TYPE telemt_upstream_connect_duration_fail_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_upstream_connect_duration_fail_total counter"
+ );
let _ = writeln!(
out,
"telemt_upstream_connect_duration_fail_total{{bucket=\"le_100ms\"}} {}",
@@ -599,7 +530,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_keepalive_sent_total ME keepalive frames sent");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_keepalive_sent_total ME keepalive frames sent"
+ );
let _ = writeln!(out, "# TYPE telemt_me_keepalive_sent_total counter");
let _ = writeln!(
out,
@@ -611,7 +545,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_keepalive_failed_total ME keepalive send failures");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_keepalive_failed_total ME keepalive send failures"
+ );
let _ = writeln!(out, "# TYPE telemt_me_keepalive_failed_total counter");
let _ = writeln!(
out,
@@ -623,7 +560,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_keepalive_pong_total ME keepalive pong replies");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_keepalive_pong_total ME keepalive pong replies"
+ );
let _ = writeln!(out, "# TYPE telemt_me_keepalive_pong_total counter");
let _ = writeln!(
out,
@@ -635,7 +575,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts"
+ );
let _ = writeln!(out, "# TYPE telemt_me_keepalive_timeout_total counter");
let _ = writeln!(
out,
@@ -651,7 +594,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_rpc_proxy_req_signal_sent_total Service RPC_PROXY_REQ activity signals sent"
);
- let _ = writeln!(out, "# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_rpc_proxy_req_signal_sent_total {}",
@@ -734,7 +680,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts"
+ );
let _ = writeln!(out, "# TYPE telemt_me_reconnect_attempts_total counter");
let _ = writeln!(
out,
@@ -746,7 +695,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_reconnect_success_total ME reconnect successes");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_reconnect_success_total ME reconnect successes"
+ );
let _ = writeln!(out, "# TYPE telemt_me_reconnect_success_total counter");
let _ = writeln!(
out,
@@ -758,7 +710,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_handshake_reject_total ME handshake rejects from upstream");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_handshake_reject_total ME handshake rejects from upstream"
+ );
let _ = writeln!(out, "# TYPE telemt_me_handshake_reject_total counter");
let _ = writeln!(
out,
@@ -770,20 +725,25 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_handshake_error_code_total ME handshake reject errors by code");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_handshake_error_code_total ME handshake reject errors by code"
+ );
let _ = writeln!(out, "# TYPE telemt_me_handshake_error_code_total counter");
if me_allows_normal {
for (error_code, count) in stats.get_me_handshake_error_code_counts() {
let _ = writeln!(
out,
"telemt_me_handshake_error_code_total{{error_code=\"{}\"}} {}",
- error_code,
- count
+ error_code, count
);
}
}
- let _ = writeln!(out, "# HELP telemt_me_reader_eof_total ME reader EOF terminations");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_reader_eof_total ME reader EOF terminations"
+ );
let _ = writeln!(out, "# TYPE telemt_me_reader_eof_total counter");
let _ = writeln!(
out,
@@ -810,6 +770,69 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
+ let _ = writeln!(
+ out,
+ "# HELP telemt_relay_idle_soft_mark_total Middle-relay sessions marked as soft-idle candidates"
+ );
+ let _ = writeln!(out, "# TYPE telemt_relay_idle_soft_mark_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_relay_idle_soft_mark_total {}",
+ if me_allows_normal {
+ stats.get_relay_idle_soft_mark_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_relay_idle_hard_close_total Middle-relay sessions closed by hard-idle policy"
+ );
+ let _ = writeln!(out, "# TYPE telemt_relay_idle_hard_close_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_relay_idle_hard_close_total {}",
+ if me_allows_normal {
+ stats.get_relay_idle_hard_close_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_relay_pressure_evict_total Middle-relay sessions evicted under resource pressure"
+ );
+ let _ = writeln!(out, "# TYPE telemt_relay_pressure_evict_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_relay_pressure_evict_total {}",
+ if me_allows_normal {
+ stats.get_relay_pressure_evict_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_relay_protocol_desync_close_total Middle-relay sessions closed due to protocol desync"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_relay_protocol_desync_close_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_relay_protocol_desync_close_total {}",
+ if me_allows_normal {
+ stats.get_relay_protocol_desync_close_total()
+ } else {
+ 0
+ }
+ );
+
let _ = writeln!(out, "# HELP telemt_me_crc_mismatch_total ME CRC mismatches");
let _ = writeln!(out, "# TYPE telemt_me_crc_mismatch_total counter");
let _ = writeln!(
@@ -822,7 +845,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_seq_mismatch_total ME sequence mismatches");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_seq_mismatch_total ME sequence mismatches"
+ );
let _ = writeln!(out, "# TYPE telemt_me_seq_mismatch_total counter");
let _ = writeln!(
out,
@@ -834,7 +860,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn"
+ );
let _ = writeln!(out, "# TYPE telemt_me_route_drop_no_conn_total counter");
let _ = writeln!(
out,
@@ -846,8 +875,14 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed");
- let _ = writeln!(out, "# TYPE telemt_me_route_drop_channel_closed_total counter");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_route_drop_channel_closed_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_route_drop_channel_closed_total {}",
@@ -858,7 +893,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full"
+ );
let _ = writeln!(out, "# TYPE telemt_me_route_drop_queue_full_total counter");
let _ = writeln!(
out,
@@ -897,6 +935,462 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batches_total Total DC->Client flush batches"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_batches_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batches_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_batches_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_frames_total Total DC->Client frames flushed in batches"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_batch_frames_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_batch_frames_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_bytes_total Total DC->Client bytes flushed in batches"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_batch_bytes_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_batch_bytes_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_flush_reason_total DC->Client flush reasons"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_flush_reason_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"queue_drain\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_queue_drain_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"batch_frames\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_batch_frames_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"batch_bytes\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_batch_bytes_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"max_delay\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_max_delay_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"ack_immediate\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_ack_immediate_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_reason_total{{reason=\"close\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_flush_reason_close_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_data_frames_total DC->Client data frames"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_data_frames_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_data_frames_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_data_frames_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_ack_frames_total DC->Client quick-ack frames"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_ack_frames_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_ack_frames_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_ack_frames_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_payload_bytes_total DC->Client payload bytes before transport framing"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_payload_bytes_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_payload_bytes_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_payload_bytes_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_write_mode_total DC->Client writer mode selection"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_write_mode_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_write_mode_total{{mode=\"coalesced\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_write_mode_coalesced_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_write_mode_total{{mode=\"split\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_write_mode_split_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_quota_reject_total DC->Client quota rejects"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_quota_reject_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_quota_reject_total{{stage=\"pre_write\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_quota_reject_pre_write_total()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_quota_reject_total{{stage=\"post_write\"}} {}",
+ if me_allows_normal {
+ stats.get_me_d2c_quota_reject_post_write_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_frame_buf_shrink_total DC->Client reusable frame buffer shrink events"
+ );
+ let _ = writeln!(out, "# TYPE telemt_me_d2c_frame_buf_shrink_total counter");
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_frame_buf_shrink_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_frame_buf_shrink_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_frame_buf_shrink_bytes_total DC->Client reusable frame buffer bytes released"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_frame_buf_shrink_bytes_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_frame_buf_shrink_bytes_total {}",
+ if me_allows_normal {
+ stats.get_me_d2c_frame_buf_shrink_bytes_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_frames_bucket_total DC->Client batch frame count buckets"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_batch_frames_bucket_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"1\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_1()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"2_4\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_2_4()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"5_8\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_5_8()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"9_16\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_9_16()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"17_32\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_17_32()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_frames_bucket_total{{bucket=\"gt_32\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_frames_bucket_gt_32()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_bytes_bucket_total DC->Client batch byte size buckets"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_batch_bytes_bucket_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"0_1k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_0_1k()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"1k_4k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_1k_4k()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"4k_16k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_4k_16k()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"16k_64k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_16k_64k()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"64k_128k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_64k_128k()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_bytes_bucket_total{{bucket=\"gt_128k\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_bytes_bucket_gt_128k()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_flush_duration_us_bucket_total DC->Client flush duration buckets"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_flush_duration_us_bucket_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"0_50\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_0_50()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"51_200\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_51_200()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"201_1000\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_201_1000()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"1001_5000\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_1001_5000()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"5001_20000\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_5001_20000()
+ } else {
+ 0
+ }
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_flush_duration_us_bucket_total{{bucket=\"gt_20000\"}} {}",
+ if me_allows_debug {
+ stats.get_me_d2c_flush_duration_us_bucket_gt_20000()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_timeout_armed_total DC->Client max-delay timer armed events"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_batch_timeout_armed_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_timeout_armed_total {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_timeout_armed_total()
+ } else {
+ 0
+ }
+ );
+
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_d2c_batch_timeout_fired_total DC->Client max-delay timer fired events"
+ );
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_d2c_batch_timeout_fired_total counter"
+ );
+ let _ = writeln!(
+ out,
+ "telemt_me_d2c_batch_timeout_fired_total {}",
+ if me_allows_debug {
+ stats.get_me_d2c_batch_timeout_fired_total()
+ } else {
+ 0
+ }
+ );
+
let _ = writeln!(
out,
"# HELP telemt_me_writer_pick_total ME writer-pick outcomes by mode and result"
@@ -1015,7 +1509,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_writer_pick_mode_switch_total Writer-pick mode switches via runtime updates"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_pick_mode_switch_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_writer_pick_mode_switch_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_writer_pick_mode_switch_total {}",
@@ -1065,7 +1562,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_kdf_drift_total ME KDF input drift detections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_kdf_drift_total ME KDF input drift detections"
+ );
let _ = writeln!(out, "# TYPE telemt_me_kdf_drift_total counter");
let _ = writeln!(
out,
@@ -1111,7 +1611,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_hardswap_pending_ttl_expired_total Pending hardswap generations reset by TTL expiration"
);
- let _ = writeln!(out, "# TYPE telemt_me_hardswap_pending_ttl_expired_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_hardswap_pending_ttl_expired_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_hardswap_pending_ttl_expired_total {}",
@@ -1343,10 +1846,7 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_adaptive_floor_global_cap_raw Runtime raw global adaptive floor cap"
);
- let _ = writeln!(
- out,
- "# TYPE telemt_me_adaptive_floor_global_cap_raw gauge"
- );
+ let _ = writeln!(out, "# TYPE telemt_me_adaptive_floor_global_cap_raw gauge");
let _ = writeln!(
out,
"telemt_me_adaptive_floor_global_cap_raw {}",
@@ -1529,7 +2029,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths"
+ );
let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter");
let _ = writeln!(
out,
@@ -1541,7 +2044,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_desync_total Total crypto-desync detections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_desync_total Total crypto-desync detections"
+ );
let _ = writeln!(out, "# TYPE telemt_desync_total counter");
let _ = writeln!(
out,
@@ -1553,7 +2059,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted"
+ );
let _ = writeln!(out, "# TYPE telemt_desync_full_logged_total counter");
let _ = writeln!(
out,
@@ -1565,7 +2074,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_desync_suppressed_total Suppressed desync forensic events");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_desync_suppressed_total Suppressed desync forensic events"
+ );
let _ = writeln!(out, "# TYPE telemt_desync_suppressed_total counter");
let _ = writeln!(
out,
@@ -1577,7 +2089,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket"
+ );
let _ = writeln!(out, "# TYPE telemt_desync_frames_bucket_total counter");
let _ = writeln!(
out,
@@ -1616,7 +2131,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_pool_swap_total Successful ME pool swaps");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_pool_swap_total Successful ME pool swaps"
+ );
let _ = writeln!(out, "# TYPE telemt_pool_swap_total counter");
let _ = writeln!(
out,
@@ -1628,7 +2146,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_pool_drain_active Active draining ME writers");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_pool_drain_active Active draining ME writers"
+ );
let _ = writeln!(out, "# TYPE telemt_pool_drain_active gauge");
let _ = writeln!(
out,
@@ -1640,7 +2161,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_pool_force_close_total Forced close events for draining writers");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_pool_force_close_total Forced close events for draining writers"
+ );
let _ = writeln!(out, "# TYPE telemt_pool_force_close_total counter");
let _ = writeln!(
out,
@@ -1654,35 +2178,8 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
let _ = writeln!(
out,
- "# HELP telemt_pool_drain_soft_evict_total Soft-evicted client sessions on stuck draining writers"
+ "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds"
);
- let _ = writeln!(out, "# TYPE telemt_pool_drain_soft_evict_total counter");
- let _ = writeln!(
- out,
- "telemt_pool_drain_soft_evict_total {}",
- if me_allows_normal {
- stats.get_pool_drain_soft_evict_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_pool_drain_soft_evict_writer_total Draining writers with at least one soft eviction"
- );
- let _ = writeln!(out, "# TYPE telemt_pool_drain_soft_evict_writer_total counter");
- let _ = writeln!(
- out,
- "telemt_pool_drain_soft_evict_writer_total {}",
- if me_allows_normal {
- stats.get_pool_drain_soft_evict_writer_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(out, "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds");
let _ = writeln!(out, "# TYPE telemt_pool_stale_pick_total counter");
let _ = writeln!(
out,
@@ -1696,56 +2193,8 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
let _ = writeln!(
out,
- "# HELP telemt_me_writer_close_signal_drop_total Close-signal drops for already-removed ME writers"
+ "# HELP telemt_me_writer_removed_total Total ME writer removals"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_close_signal_drop_total counter");
- let _ = writeln!(
- out,
- "telemt_me_writer_close_signal_drop_total {}",
- if me_allows_normal {
- stats.get_me_writer_close_signal_drop_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_close_signal_channel_full_total Close-signal drops caused by full writer command channels"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_me_writer_close_signal_channel_full_total counter"
- );
- let _ = writeln!(
- out,
- "telemt_me_writer_close_signal_channel_full_total {}",
- if me_allows_normal {
- stats.get_me_writer_close_signal_channel_full_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_draining_writers_reap_progress_total Draining-writer removals processed by reap cleanup"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_me_draining_writers_reap_progress_total counter"
- );
- let _ = writeln!(
- out,
- "telemt_me_draining_writers_reap_progress_total {}",
- if me_allows_normal {
- stats.get_me_draining_writers_reap_progress_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(out, "# HELP telemt_me_writer_removed_total Total ME writer removals");
let _ = writeln!(out, "# TYPE telemt_me_writer_removed_total counter");
let _ = writeln!(
out,
@@ -1761,7 +2210,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_writer_removed_unexpected_total Unexpected ME writer removals that triggered refill"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_removed_unexpected_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_writer_removed_unexpected_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_writer_removed_unexpected_total {}",
@@ -1774,168 +2226,8 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
let _ = writeln!(
out,
- "# HELP telemt_me_writer_teardown_attempt_total ME writer teardown attempts by reason and mode"
+ "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_attempt_total counter");
- for reason in MeWriterTeardownReason::ALL {
- for mode in MeWriterTeardownMode::ALL {
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_attempt_total{{reason=\"{}\",mode=\"{}\"}} {}",
- reason.as_str(),
- mode.as_str(),
- if me_allows_normal {
- stats.get_me_writer_teardown_attempt_total(reason, mode)
- } else {
- 0
- }
- );
- }
- }
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_teardown_success_total ME writer teardown successes by mode"
- );
- let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_success_total counter");
- for mode in MeWriterTeardownMode::ALL {
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_success_total{{mode=\"{}\"}} {}",
- mode.as_str(),
- if me_allows_normal {
- stats.get_me_writer_teardown_success_total(mode)
- } else {
- 0
- }
- );
- }
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_teardown_timeout_total Teardown operations that timed out"
- );
- let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_timeout_total counter");
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_timeout_total {}",
- if me_allows_normal {
- stats.get_me_writer_teardown_timeout_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_teardown_escalation_total Watchdog teardown escalations to hard detach"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_me_writer_teardown_escalation_total counter"
- );
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_escalation_total {}",
- if me_allows_normal {
- stats.get_me_writer_teardown_escalation_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_teardown_noop_total Teardown operations that became no-op"
- );
- let _ = writeln!(out, "# TYPE telemt_me_writer_teardown_noop_total counter");
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_noop_total {}",
- if me_allows_normal {
- stats.get_me_writer_teardown_noop_total()
- } else {
- 0
- }
- );
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_teardown_duration_seconds ME writer teardown latency histogram by mode"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_me_writer_teardown_duration_seconds histogram"
- );
- let bucket_labels = Stats::me_writer_teardown_duration_bucket_labels();
- for mode in MeWriterTeardownMode::ALL {
- for (bucket_idx, label) in bucket_labels.iter().enumerate() {
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"{}\"}} {}",
- mode.as_str(),
- label,
- if me_allows_normal {
- stats.get_me_writer_teardown_duration_bucket_total(mode, bucket_idx)
- } else {
- 0
- }
- );
- }
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_duration_seconds_bucket{{mode=\"{}\",le=\"+Inf\"}} {}",
- mode.as_str(),
- if me_allows_normal {
- stats.get_me_writer_teardown_duration_count(mode)
- } else {
- 0
- }
- );
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_duration_seconds_sum{{mode=\"{}\"}} {:.6}",
- mode.as_str(),
- if me_allows_normal {
- stats.get_me_writer_teardown_duration_sum_seconds(mode)
- } else {
- 0.0
- }
- );
- let _ = writeln!(
- out,
- "telemt_me_writer_teardown_duration_seconds_count{{mode=\"{}\"}} {}",
- mode.as_str(),
- if me_allows_normal {
- stats.get_me_writer_teardown_duration_count(mode)
- } else {
- 0
- }
- );
- }
-
- let _ = writeln!(
- out,
- "# HELP telemt_me_writer_cleanup_side_effect_failures_total Failed cleanup side effects by step"
- );
- let _ = writeln!(
- out,
- "# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
- );
- for step in MeWriterCleanupSideEffectStep::ALL {
- let _ = writeln!(
- out,
- "telemt_me_writer_cleanup_side_effect_failures_total{{step=\"{}\"}} {}",
- step.as_str(),
- if me_allows_normal {
- stats.get_me_writer_cleanup_side_effect_failures_total(step)
- } else {
- 0
- }
- );
- }
-
- let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started");
let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter");
let _ = writeln!(
out,
@@ -1951,7 +2243,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_refill_skipped_inflight_total Immediate ME refill skips due to inflight dedup"
);
- let _ = writeln!(out, "# TYPE telemt_me_refill_skipped_inflight_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_refill_skipped_inflight_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_refill_skipped_inflight_total {}",
@@ -1962,7 +2257,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
}
);
- let _ = writeln!(out, "# HELP telemt_me_refill_failed_total Immediate ME refill failures");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_me_refill_failed_total Immediate ME refill failures"
+ );
let _ = writeln!(out, "# TYPE telemt_me_refill_failed_total counter");
let _ = writeln!(
out,
@@ -1978,7 +2276,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_writer_restored_same_endpoint_total Refilled ME writer restored on the same endpoint"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_restored_same_endpoint_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_writer_restored_same_endpoint_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_writer_restored_same_endpoint_total {}",
@@ -1993,7 +2294,10 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
out,
"# HELP telemt_me_writer_restored_fallback_total Refilled ME writer restored via fallback endpoint"
);
- let _ = writeln!(out, "# TYPE telemt_me_writer_restored_fallback_total counter");
+ let _ = writeln!(
+ out,
+ "# TYPE telemt_me_writer_restored_fallback_total counter"
+ );
let _ = writeln!(
out,
"telemt_me_writer_restored_fallback_total {}",
@@ -2071,17 +2375,35 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
unresolved_writer_losses
);
- let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_connections_total Per-user total connections"
+ );
let _ = writeln!(out, "# TYPE telemt_user_connections_total counter");
- let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_connections_current Per-user active connections"
+ );
let _ = writeln!(out, "# TYPE telemt_user_connections_current gauge");
- let _ = writeln!(out, "# HELP telemt_user_octets_from_client Per-user bytes received");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_octets_from_client Per-user bytes received"
+ );
let _ = writeln!(out, "# TYPE telemt_user_octets_from_client counter");
- let _ = writeln!(out, "# HELP telemt_user_octets_to_client Per-user bytes sent");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_octets_to_client Per-user bytes sent"
+ );
let _ = writeln!(out, "# TYPE telemt_user_octets_to_client counter");
- let _ = writeln!(out, "# HELP telemt_user_msgs_from_client Per-user messages received");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_msgs_from_client Per-user messages received"
+ );
let _ = writeln!(out, "# TYPE telemt_user_msgs_from_client counter");
- let _ = writeln!(out, "# HELP telemt_user_msgs_to_client Per-user messages sent");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_msgs_to_client Per-user messages sent"
+ );
let _ = writeln!(out, "# TYPE telemt_user_msgs_to_client counter");
let _ = writeln!(
out,
@@ -2121,12 +2443,45 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
for entry in stats.iter_user_stats() {
let user = entry.key();
let s = entry.value();
- let _ = writeln!(out, "telemt_user_connections_total{{user=\"{}\"}} {}", user, s.connects.load(std::sync::atomic::Ordering::Relaxed));
- let _ = writeln!(out, "telemt_user_connections_current{{user=\"{}\"}} {}", user, s.curr_connects.load(std::sync::atomic::Ordering::Relaxed));
- let _ = writeln!(out, "telemt_user_octets_from_client{{user=\"{}\"}} {}", user, s.octets_from_client.load(std::sync::atomic::Ordering::Relaxed));
- let _ = writeln!(out, "telemt_user_octets_to_client{{user=\"{}\"}} {}", user, s.octets_to_client.load(std::sync::atomic::Ordering::Relaxed));
- let _ = writeln!(out, "telemt_user_msgs_from_client{{user=\"{}\"}} {}", user, s.msgs_from_client.load(std::sync::atomic::Ordering::Relaxed));
- let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed));
+ let _ = writeln!(
+ out,
+ "telemt_user_connections_total{{user=\"{}\"}} {}",
+ user,
+ s.connects.load(std::sync::atomic::Ordering::Relaxed)
+ );
+ let _ = writeln!(
+ out,
+ "telemt_user_connections_current{{user=\"{}\"}} {}",
+ user,
+ s.curr_connects.load(std::sync::atomic::Ordering::Relaxed)
+ );
+ let _ = writeln!(
+ out,
+ "telemt_user_octets_from_client{{user=\"{}\"}} {}",
+ user,
+ s.octets_from_client
+ .load(std::sync::atomic::Ordering::Relaxed)
+ );
+ let _ = writeln!(
+ out,
+ "telemt_user_octets_to_client{{user=\"{}\"}} {}",
+ user,
+ s.octets_to_client
+ .load(std::sync::atomic::Ordering::Relaxed)
+ );
+ let _ = writeln!(
+ out,
+ "telemt_user_msgs_from_client{{user=\"{}\"}} {}",
+ user,
+ s.msgs_from_client
+ .load(std::sync::atomic::Ordering::Relaxed)
+ );
+ let _ = writeln!(
+ out,
+ "telemt_user_msgs_to_client{{user=\"{}\"}} {}",
+ user,
+ s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed)
+ );
}
let ip_stats = ip_tracker.get_stats().await;
@@ -2144,16 +2499,25 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
.get_recent_counts_for_users(&unique_users_vec)
.await;
- let _ = writeln!(out, "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs"
+ );
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_current gauge");
let _ = writeln!(
out,
"# HELP telemt_user_unique_ips_recent_window Per-user unique IPs seen in configured observation window"
);
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_recent_window gauge");
- let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Effective per-user unique IP limit (0 means unlimited)");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_unique_ips_limit Effective per-user unique IP limit (0 means unlimited)"
+ );
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge");
- let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)");
+ let _ = writeln!(
+ out,
+ "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)"
+ );
let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge");
for user in unique_users {
@@ -2164,29 +2528,34 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
.get(&user)
.copied()
.filter(|limit| *limit > 0)
- .or(
- (config.access.user_max_unique_ips_global_each > 0)
- .then_some(config.access.user_max_unique_ips_global_each),
- )
+ .or((config.access.user_max_unique_ips_global_each > 0)
+ .then_some(config.access.user_max_unique_ips_global_each))
.unwrap_or(0);
let utilization = if limit > 0 {
current as f64 / limit as f64
} else {
0.0
};
- let _ = writeln!(out, "telemt_user_unique_ips_current{{user=\"{}\"}} {}", user, current);
+ let _ = writeln!(
+ out,
+ "telemt_user_unique_ips_current{{user=\"{}\"}} {}",
+ user, current
+ );
let _ = writeln!(
out,
"telemt_user_unique_ips_recent_window{{user=\"{}\"}} {}",
user,
recent_counts.get(&user).copied().unwrap_or(0)
);
- let _ = writeln!(out, "telemt_user_unique_ips_limit{{user=\"{}\"}} {}", user, limit);
+ let _ = writeln!(
+ out,
+ "telemt_user_unique_ips_limit{{user=\"{}\"}} {}",
+ user, limit
+ );
let _ = writeln!(
out,
"telemt_user_unique_ips_utilization{{user=\"{}\"}} {:.6}",
- user,
- utilization
+ user, utilization
);
}
}
@@ -2197,8 +2566,8 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp
#[cfg(test)]
mod tests {
use super::*;
- use std::net::IpAddr;
use http_body_util::BodyExt;
+ use std::net::IpAddr;
#[tokio::test]
async fn test_render_metrics_format() {
@@ -2213,8 +2582,6 @@ mod tests {
stats.increment_connects_all();
stats.increment_connects_all();
stats.increment_connects_bad();
- stats.increment_current_connections_direct();
- stats.increment_current_connections_me();
stats.increment_handshake_timeouts();
stats.increment_upstream_connect_attempt_total();
stats.increment_upstream_connect_attempt_total();
@@ -2230,6 +2597,20 @@ mod tests {
stats.increment_me_rpc_proxy_req_signal_response_total();
stats.increment_me_rpc_proxy_req_signal_close_sent_total();
stats.increment_me_idle_close_by_peer_total();
+ stats.increment_relay_idle_soft_mark_total();
+ stats.increment_relay_idle_hard_close_total();
+ stats.increment_relay_pressure_evict_total();
+ stats.increment_relay_protocol_desync_close_total();
+ stats.increment_me_d2c_batches_total();
+ stats.add_me_d2c_batch_frames_total(3);
+ stats.add_me_d2c_batch_bytes_total(2048);
+ stats.increment_me_d2c_flush_reason(crate::stats::MeD2cFlushReason::AckImmediate);
+ stats.increment_me_d2c_data_frames_total();
+ stats.increment_me_d2c_ack_frames_total();
+ stats.add_me_d2c_payload_bytes_total(1800);
+ stats.increment_me_d2c_write_mode(crate::stats::MeD2cWriteMode::Coalesced);
+ stats.increment_me_d2c_quota_reject_total(crate::stats::MeD2cQuotaRejectStage::PostWrite);
+ stats.observe_me_d2c_frame_buf_shrink(4096);
stats.increment_user_connects("alice");
stats.increment_user_curr_connects("alice");
stats.add_user_octets_from("alice", 1024);
@@ -2246,21 +2627,15 @@ mod tests {
assert!(output.contains("telemt_connections_total 2"));
assert!(output.contains("telemt_connections_bad_total 1"));
- assert!(output.contains("telemt_connections_current 2"));
- assert!(output.contains("telemt_connections_direct_current 1"));
- assert!(output.contains("telemt_connections_me_current 1"));
assert!(output.contains("telemt_handshake_timeouts_total 1"));
assert!(output.contains("telemt_upstream_connect_attempt_total 2"));
assert!(output.contains("telemt_upstream_connect_success_total 1"));
assert!(output.contains("telemt_upstream_connect_fail_total 1"));
assert!(output.contains("telemt_upstream_connect_failfast_hard_error_total 1"));
+ assert!(output.contains("telemt_upstream_connect_attempts_per_request{bucket=\"2\"} 1"));
assert!(
- output.contains("telemt_upstream_connect_attempts_per_request{bucket=\"2\"} 1")
- );
- assert!(
- output.contains(
- "telemt_upstream_connect_duration_success_total{bucket=\"101_500ms\"} 1"
- )
+ output
+ .contains("telemt_upstream_connect_duration_success_total{bucket=\"101_500ms\"} 1")
);
assert!(
output.contains("telemt_upstream_connect_duration_fail_total{bucket=\"gt_1000ms\"} 1")
@@ -2271,6 +2646,21 @@ mod tests {
assert!(output.contains("telemt_me_rpc_proxy_req_signal_response_total 1"));
assert!(output.contains("telemt_me_rpc_proxy_req_signal_close_sent_total 1"));
assert!(output.contains("telemt_me_idle_close_by_peer_total 1"));
+ assert!(output.contains("telemt_relay_idle_soft_mark_total 1"));
+ assert!(output.contains("telemt_relay_idle_hard_close_total 1"));
+ assert!(output.contains("telemt_relay_pressure_evict_total 1"));
+ assert!(output.contains("telemt_relay_protocol_desync_close_total 1"));
+ assert!(output.contains("telemt_me_d2c_batches_total 1"));
+ assert!(output.contains("telemt_me_d2c_batch_frames_total 3"));
+ assert!(output.contains("telemt_me_d2c_batch_bytes_total 2048"));
+ assert!(output.contains("telemt_me_d2c_flush_reason_total{reason=\"ack_immediate\"} 1"));
+ assert!(output.contains("telemt_me_d2c_data_frames_total 1"));
+ assert!(output.contains("telemt_me_d2c_ack_frames_total 1"));
+ assert!(output.contains("telemt_me_d2c_payload_bytes_total 1800"));
+ assert!(output.contains("telemt_me_d2c_write_mode_total{mode=\"coalesced\"} 1"));
+ assert!(output.contains("telemt_me_d2c_quota_reject_total{stage=\"post_write\"} 1"));
+ assert!(output.contains("telemt_me_d2c_frame_buf_shrink_total 1"));
+ assert!(output.contains("telemt_me_d2c_frame_buf_shrink_bytes_total 4096"));
assert!(output.contains("telemt_user_connections_total{user=\"alice\"} 1"));
assert!(output.contains("telemt_user_connections_current{user=\"alice\"} 1"));
assert!(output.contains("telemt_user_octets_from_client{user=\"alice\"} 1024"));
@@ -2291,9 +2681,6 @@ mod tests {
let output = render_metrics(&stats, &config, &tracker).await;
assert!(output.contains("telemt_connections_total 0"));
assert!(output.contains("telemt_connections_bad_total 0"));
- assert!(output.contains("telemt_connections_current 0"));
- assert!(output.contains("telemt_connections_direct_current 0"));
- assert!(output.contains("telemt_connections_me_current 0"));
assert!(output.contains("telemt_handshake_timeouts_total 0"));
assert!(output.contains("telemt_user_unique_ips_current{user="));
assert!(output.contains("telemt_user_unique_ips_recent_window{user="));
@@ -2327,42 +2714,24 @@ mod tests {
assert!(output.contains("# TYPE telemt_uptime_seconds gauge"));
assert!(output.contains("# TYPE telemt_connections_total counter"));
assert!(output.contains("# TYPE telemt_connections_bad_total counter"));
- assert!(output.contains("# TYPE telemt_connections_current gauge"));
- assert!(output.contains("# TYPE telemt_connections_direct_current gauge"));
- assert!(output.contains("# TYPE telemt_connections_me_current gauge"));
- assert!(output.contains("# TYPE telemt_relay_adaptive_promotions_total counter"));
- assert!(output.contains("# TYPE telemt_relay_adaptive_demotions_total counter"));
- assert!(output.contains("# TYPE telemt_relay_adaptive_hard_promotions_total counter"));
- assert!(output.contains("# TYPE telemt_reconnect_evict_total counter"));
- assert!(output.contains("# TYPE telemt_reconnect_stale_close_total counter"));
assert!(output.contains("# TYPE telemt_handshake_timeouts_total counter"));
assert!(output.contains("# TYPE telemt_upstream_connect_attempt_total counter"));
assert!(output.contains("# TYPE telemt_me_rpc_proxy_req_signal_sent_total counter"));
assert!(output.contains("# TYPE telemt_me_idle_close_by_peer_total counter"));
+ assert!(output.contains("# TYPE telemt_relay_idle_soft_mark_total counter"));
+ assert!(output.contains("# TYPE telemt_relay_idle_hard_close_total counter"));
+ assert!(output.contains("# TYPE telemt_relay_pressure_evict_total counter"));
+ assert!(output.contains("# TYPE telemt_relay_protocol_desync_close_total counter"));
+ assert!(output.contains("# TYPE telemt_me_d2c_batches_total counter"));
+ assert!(output.contains("# TYPE telemt_me_d2c_flush_reason_total counter"));
+ assert!(output.contains("# TYPE telemt_me_d2c_write_mode_total counter"));
+ assert!(output.contains("# TYPE telemt_me_d2c_batch_frames_bucket_total counter"));
+ assert!(output.contains("# TYPE telemt_me_d2c_flush_duration_us_bucket_total counter"));
assert!(output.contains("# TYPE telemt_me_writer_removed_total counter"));
- assert!(output.contains("# TYPE telemt_me_writer_teardown_attempt_total counter"));
- assert!(output.contains("# TYPE telemt_me_writer_teardown_success_total counter"));
- assert!(output.contains("# TYPE telemt_me_writer_teardown_timeout_total counter"));
- assert!(output.contains("# TYPE telemt_me_writer_teardown_escalation_total counter"));
- assert!(output.contains("# TYPE telemt_me_writer_teardown_noop_total counter"));
- assert!(output.contains(
- "# TYPE telemt_me_writer_teardown_duration_seconds histogram"
- ));
- assert!(output.contains(
- "# TYPE telemt_me_writer_cleanup_side_effect_failures_total counter"
- ));
- assert!(output.contains("# TYPE telemt_me_writer_close_signal_drop_total counter"));
- assert!(output.contains(
- "# TYPE telemt_me_writer_close_signal_channel_full_total counter"
- ));
- assert!(output.contains(
- "# TYPE telemt_me_draining_writers_reap_progress_total counter"
- ));
- assert!(output.contains("# TYPE telemt_pool_drain_soft_evict_total counter"));
- assert!(output.contains("# TYPE telemt_pool_drain_soft_evict_writer_total counter"));
- assert!(output.contains(
- "# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge"
- ));
+ assert!(
+ output
+ .contains("# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge")
+ );
assert!(output.contains("# TYPE telemt_user_unique_ips_current gauge"));
assert!(output.contains("# TYPE telemt_user_unique_ips_recent_window gauge"));
assert!(output.contains("# TYPE telemt_user_unique_ips_limit gauge"));
@@ -2379,14 +2748,17 @@ mod tests {
stats.increment_connects_all();
stats.increment_connects_all();
- let req = Request::builder()
- .uri("/metrics")
- .body(())
+ let req = Request::builder().uri("/metrics").body(()).unwrap();
+ let resp = handle(req, &stats, &beobachten, &tracker, &config)
+ .await
.unwrap();
- let resp = handle(req, &stats, &beobachten, &tracker, &config).await.unwrap();
assert_eq!(resp.status(), StatusCode::OK);
let body = resp.into_body().collect().await.unwrap().to_bytes();
- assert!(std::str::from_utf8(body.as_ref()).unwrap().contains("telemt_connections_total 3"));
+ assert!(
+ std::str::from_utf8(body.as_ref())
+ .unwrap()
+ .contains("telemt_connections_total 3")
+ );
config.general.beobachten = true;
config.general.beobachten_minutes = 10;
@@ -2395,10 +2767,7 @@ mod tests {
"203.0.113.10".parse::().unwrap(),
Duration::from_secs(600),
);
- let req_beob = Request::builder()
- .uri("/beobachten")
- .body(())
- .unwrap();
+ let req_beob = Request::builder().uri("/beobachten").body(()).unwrap();
let resp_beob = handle(req_beob, &stats, &beobachten, &tracker, &config)
.await
.unwrap();
@@ -2408,10 +2777,7 @@ mod tests {
assert!(beob_text.contains("[TLS-scanner]"));
assert!(beob_text.contains("203.0.113.10-1"));
- let req404 = Request::builder()
- .uri("/other")
- .body(())
- .unwrap();
+ let req404 = Request::builder().uri("/other").body(()).unwrap();
let resp404 = handle(req404, &stats, &beobachten, &tracker, &config)
.await
.unwrap();
diff --git a/src/network/dns_overrides.rs b/src/network/dns_overrides.rs
index 447863a..86fb325 100644
--- a/src/network/dns_overrides.rs
+++ b/src/network/dns_overrides.rs
@@ -26,9 +26,7 @@ fn parse_ip_spec(ip_spec: &str) -> Result {
}
let ip = ip_spec.parse::().map_err(|_| {
- ProxyError::Config(format!(
- "network.dns_overrides IP is invalid: '{ip_spec}'"
- ))
+ ProxyError::Config(format!("network.dns_overrides IP is invalid: '{ip_spec}'"))
})?;
if matches!(ip, IpAddr::V6(_)) {
return Err(ProxyError::Config(format!(
@@ -103,9 +101,9 @@ pub fn validate_entries(entries: &[String]) -> Result<()> {
/// Replace runtime DNS overrides with a new validated snapshot.
pub fn install_entries(entries: &[String]) -> Result<()> {
let parsed = parse_entries(entries)?;
- let mut guard = overrides_store()
- .write()
- .map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?;
+ let mut guard = overrides_store().write().map_err(|_| {
+ ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string())
+ })?;
*guard = parsed;
Ok(())
}
diff --git a/src/network/probe.rs b/src/network/probe.rs
index a9e369d..1787b92 100644
--- a/src/network/probe.rs
+++ b/src/network/probe.rs
@@ -1,4 +1,5 @@
#![allow(dead_code)]
+#![allow(clippy::items_after_test_module)]
use std::collections::HashMap;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket};
@@ -10,7 +11,9 @@ use tracing::{debug, info, warn};
use crate::config::{NetworkConfig, UpstreamConfig, UpstreamType};
use crate::error::Result;
-use crate::network::stun::{stun_probe_family_with_bind, DualStunResult, IpFamily, StunProbeResult};
+use crate::network::stun::{
+ DualStunResult, IpFamily, StunProbeResult, stun_probe_family_with_bind,
+};
use crate::transport::UpstreamManager;
#[derive(Debug, Clone, Default)]
@@ -78,13 +81,8 @@ pub async fn run_probe(
warn!("STUN probe is enabled but network.stun_servers is empty");
DualStunResult::default()
} else {
- probe_stun_servers_parallel(
- &servers,
- stun_nat_probe_concurrency.max(1),
- None,
- None,
- )
- .await
+ probe_stun_servers_parallel(&servers, stun_nat_probe_concurrency.max(1), None, None)
+ .await
}
} else if nat_probe {
info!("STUN probe is disabled by network.stun_use=false");
@@ -99,7 +97,8 @@ pub async fn run_probe(
let UpstreamType::Direct {
interface,
bind_addresses,
- } = &upstream.upstream_type else {
+ } = &upstream.upstream_type
+ else {
continue;
};
if let Some(addrs) = bind_addresses.as_ref().filter(|v| !v.is_empty()) {
@@ -199,11 +198,10 @@ pub async fn run_probe(
if nat_probe
&& probe.reflected_ipv4.is_none()
&& probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false)
+ && let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await
{
- if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await {
- probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
- info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
- }
+ probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0));
+ info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback");
}
probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) {
@@ -217,12 +215,20 @@ pub async fn run_probe(
probe.ipv4_usable = config.ipv4
&& probe.detected_ipv4.is_some()
- && (!probe.ipv4_is_bogon || probe.reflected_ipv4.map(|r| !is_bogon(r.ip())).unwrap_or(false));
+ && (!probe.ipv4_is_bogon
+ || probe
+ .reflected_ipv4
+ .map(|r| !is_bogon(r.ip()))
+ .unwrap_or(false));
let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some());
probe.ipv6_usable = ipv6_enabled
&& probe.detected_ipv6.is_some()
- && (!probe.ipv6_is_bogon || probe.reflected_ipv6.map(|r| !is_bogon(r.ip())).unwrap_or(false));
+ && (!probe.ipv6_is_bogon
+ || probe
+ .reflected_ipv6
+ .map(|r| !is_bogon(r.ip()))
+ .unwrap_or(false));
Ok(probe)
}
@@ -280,8 +286,6 @@ async fn probe_stun_servers_parallel(
while next_idx < servers.len() && join_set.len() < concurrency {
let stun_addr = servers[next_idx].clone();
next_idx += 1;
- let bind_v4 = bind_v4;
- let bind_v6 = bind_v6;
join_set.spawn(async move {
let res = timeout(STUN_BATCH_TIMEOUT, async {
let v4 = stun_probe_family_with_bind(&stun_addr, IpFamily::V4, bind_v4).await?;
@@ -300,11 +304,15 @@ async fn probe_stun_servers_parallel(
match task {
Ok((stun_addr, Ok(Ok(result)))) => {
if let Some(v4) = result.v4 {
- let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4));
+ let entry = best_v4_by_ip
+ .entry(v4.reflected_addr.ip())
+ .or_insert((0, v4));
entry.0 += 1;
}
if let Some(v6) = result.v6 {
- let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6));
+ let entry = best_v6_by_ip
+ .entry(v6.reflected_addr.ip())
+ .or_insert((0, v6));
entry.0 += 1;
}
if result.v4.is_some() || result.v6.is_some() {
@@ -324,17 +332,11 @@ async fn probe_stun_servers_parallel(
}
let mut out = DualStunResult::default();
- if let Some((_, best)) = best_v4_by_ip
- .into_values()
- .max_by_key(|(count, _)| *count)
- {
+ if let Some((_, best)) = best_v4_by_ip.into_values().max_by_key(|(count, _)| *count) {
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
out.v4 = Some(best);
}
- if let Some((_, best)) = best_v6_by_ip
- .into_values()
- .max_by_key(|(count, _)| *count)
- {
+ if let Some((_, best)) = best_v6_by_ip.into_values().max_by_key(|(count, _)| *count) {
info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip());
out.v6 = Some(best);
}
@@ -347,7 +349,8 @@ pub fn decide_network_capabilities(
middle_proxy_nat_ip: Option,
) -> NetworkDecision {
let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some();
- let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
+ let ipv6_dc =
+ config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some();
let nat_ip_v4 = matches!(middle_proxy_nat_ip, Some(IpAddr::V4(_)));
let nat_ip_v6 = matches!(middle_proxy_nat_ip, Some(IpAddr::V6(_)));
@@ -534,10 +537,26 @@ pub fn is_bogon_v6(ip: Ipv6Addr) -> bool {
pub fn log_probe_result(probe: &NetworkProbe, decision: &NetworkDecision) {
info!(
- ipv4 = probe.detected_ipv4.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
- ipv6 = probe.detected_ipv6.as_ref().map(|v| v.to_string()).unwrap_or_else(|| "-".into()),
- reflected_v4 = probe.reflected_ipv4.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
- reflected_v6 = probe.reflected_ipv6.as_ref().map(|v| v.ip().to_string()).unwrap_or_else(|| "-".into()),
+ ipv4 = probe
+ .detected_ipv4
+ .as_ref()
+ .map(|v| v.to_string())
+ .unwrap_or_else(|| "-".into()),
+ ipv6 = probe
+ .detected_ipv6
+ .as_ref()
+ .map(|v| v.to_string())
+ .unwrap_or_else(|| "-".into()),
+ reflected_v4 = probe
+ .reflected_ipv4
+ .as_ref()
+ .map(|v| v.ip().to_string())
+ .unwrap_or_else(|| "-".into()),
+ reflected_v6 = probe
+ .reflected_ipv6
+ .as_ref()
+ .map(|v| v.ip().to_string())
+ .unwrap_or_else(|| "-".into()),
ipv4_bogon = probe.ipv4_is_bogon,
ipv6_bogon = probe.ipv6_is_bogon,
ipv4_me = decision.ipv4_me,
diff --git a/src/network/stun.rs b/src/network/stun.rs
index c3a235f..d1e088c 100644
--- a/src/network/stun.rs
+++ b/src/network/stun.rs
@@ -2,13 +2,20 @@
#![allow(dead_code)]
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
+use std::sync::OnceLock;
-use tokio::net::{lookup_host, UdpSocket};
-use tokio::time::{timeout, Duration, sleep};
+use tokio::net::{UdpSocket, lookup_host};
+use tokio::time::{Duration, sleep, timeout};
+use crate::crypto::SecureRandom;
use crate::error::{ProxyError, Result};
use crate::network::dns_overrides::{resolve, split_host_port};
+fn stun_rng() -> &'static SecureRandom {
+ static STUN_RNG: OnceLock = OnceLock::new();
+ STUN_RNG.get_or_init(SecureRandom::new)
+}
+
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum IpFamily {
V4,
@@ -34,13 +41,13 @@ pub async fn stun_probe_dual(stun_addr: &str) -> Result {
stun_probe_family(stun_addr, IpFamily::V6),
);
- Ok(DualStunResult {
- v4: v4?,
- v6: v6?,
- })
+ Ok(DualStunResult { v4: v4?, v6: v6? })
}
-pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result