From 829dc16fa3ccdd12c64a97ee3d561d813c3f8a47 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:35:47 +0300 Subject: [PATCH 01/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 15563dc..dbdb024 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.0.11" +version = "3.0.12" edition = "2024" [dependencies] From 1dfe38c5dbf46b8068395161b3cb3738fb927181 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 14:36:14 +0300 Subject: [PATCH 02/98] Update Cargo.lock --- Cargo.lock | 544 +++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 507 insertions(+), 37 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8c0e1d4..fb45f19 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -55,6 +55,45 @@ version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +[[package]] +name = "asn1-rs" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f6fd5ddaf0351dff5b8da21b2fb4ff8e08ddd02857f0bf69c47639106c0fff0" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "726535892e8eae7e70657b4c8ea93d26b8553afb1ce617caee529ef96d7dee6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "synstructure 0.12.6", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2777730b2039ac0f95f093556e61b6d26cebed5393ca6f152717777cec3a42ed" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "atomic-waker" version = "1.1.2" @@ -88,6 +127,12 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5e764a1d40d510daf35e07be9eb06e75770908c27d411ee6c92109c9840eaaf7" +[[package]] +name = "bitflags" +version = "1.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" + [[package]] name = "bitflags" version = "2.10.0" @@ -155,6 +200,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "cfg_aliases" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" + [[package]] name = "cfg_aliases" version = "0.2.1" @@ -252,6 +303,15 @@ dependencies = [ "libc", ] +[[package]] +name = "crc32c" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3a47af21622d091a8f0fb295b88bc886ac74efcc613efc19f5d0b21de5c89e47" +dependencies = [ + "rustc_version", +] + [[package]] name = "crc32fast" version = "1.5.0" @@ -297,6 +357,15 @@ dependencies = [ "itertools", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + [[package]] name = "crossbeam-deque" version = "0.8.6" @@ -369,6 +438,35 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "data-encoding" +version = "2.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" + +[[package]] +name = "der-parser" +version = "8.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbd676fbbab537128ef0278adb5576cf363cff6aa22a7b24effe97347cfab61e" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "deranged" +version = "0.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" +dependencies = [ + "powerfmt", +] + [[package]] name = "digest" version = "0.10.7" @@ -388,7 +486,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -419,6 +517,17 @@ version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" +[[package]] +name = "filetime" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f98844151eee8917efc50bd9e8318cb963ae8b297431495d3f758616ea5c57db" +dependencies = [ + "cfg-if", + "libc", + "libredox", +] + [[package]] name = "find-msvc-tools" version = "0.1.9" @@ -452,6 +561,15 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "fsevent-sys" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76ee7a02da4d231650c7cea31349b889be2f45ddb3ef3032d2ec8185f6313fd2" +dependencies = [ + "libc", +] + [[package]] name = "futures" version = "0.3.31" @@ -508,7 +626,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -756,7 +874,7 @@ dependencies = [ "tokio", "tokio-rustls", "tower-service", - "webpki-roots", + "webpki-roots 1.0.6", ] [[package]] @@ -926,6 +1044,26 @@ dependencies = [ "serde_core", ] +[[package]] +name = "inotify" +version = "0.9.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8069d3ec154eb856955c1c0fbffefbf5f3c40a104ec912d4797314c1801abff" +dependencies = [ + "bitflags 1.3.2", + "inotify-sys", + "libc", +] + +[[package]] +name = "inotify-sys" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e05c02b5e89bff3b946cedeca278abc628fe811e604f027c45a8aa3cf793d0eb" +dependencies = [ + "libc", +] + [[package]] name = "inout" version = "0.1.4" @@ -942,6 +1080,15 @@ version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" +[[package]] +name = "ipnetwork" +version = "0.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf466541e9d546596ee94f9f69590f89473455f88372423e0008fc1a7daf100e" +dependencies = [ + "serde", +] + [[package]] name = "iri-string" version = "0.7.10" @@ -988,6 +1135,26 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "kqueue" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eac30106d7dce88daf4a3fcb4879ea939476d5074a9b7ddd0fb97fa4bed5596a" +dependencies = [ + "kqueue-sys", + "libc", +] + +[[package]] +name = "kqueue-sys" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed9625ffda8729b85e45cf04090035ac368927b8cebc34898e7c120f52e4838b" +dependencies = [ + "bitflags 1.3.2", + "libc", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -1006,6 +1173,17 @@ version = "0.2.181" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" +[[package]] +name = "libredox" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" +dependencies = [ + "bitflags 2.10.0", + "libc", + "redox_syscall 0.7.1", +] + [[package]] name = "linux-raw-sys" version = "0.11.0" @@ -1073,6 +1251,33 @@ version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" +[[package]] +name = "memoffset" +version = "0.9.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" +dependencies = [ + "autocfg", +] + +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + +[[package]] +name = "mio" +version = "0.8.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +dependencies = [ + "libc", + "log", + "wasi", + "windows-sys 0.48.0", +] + [[package]] name = "mio" version = "1.1.1" @@ -1084,6 +1289,48 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "nix" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" +dependencies = [ + "bitflags 2.10.0", + "cfg-if", + "cfg_aliases 0.1.1", + "libc", + "memoffset", +] + +[[package]] +name = "nom" +version = "7.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d273983c5a657a70a3e8f2a01329822f3b8c8172b73826411a55751e404a0a4a" +dependencies = [ + "memchr", + "minimal-lexical", +] + +[[package]] +name = "notify" +version = "6.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6205bd8bb1e454ad2e27422015fb5e4f2bcc7e08fa8f27058670d208324a4d2d" +dependencies = [ + "bitflags 2.10.0", + "crossbeam-channel", + "filetime", + "fsevent-sys", + "inotify", + "kqueue", + "libc", + "log", + "mio 0.8.11", + "walkdir", + "windows-sys 0.48.0", +] + [[package]] name = "nu-ansi-term" version = "0.50.3" @@ -1103,6 +1350,12 @@ dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + [[package]] name = "num-integer" version = "0.1.46" @@ -1121,6 +1374,15 @@ dependencies = [ "autocfg", ] +[[package]] +name = "oid-registry" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bedf36ffb6ba96c2eb7144ef6270557b52e54b20c0a8e1eb2ff99a6c6959bff" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -1151,7 +1413,7 @@ checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.5.18", "smallvec", "windows-link", ] @@ -1211,6 +1473,12 @@ dependencies = [ "zerovec", ] +[[package]] +name = "powerfmt" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + [[package]] name = "ppv-lite86" version = "0.2.21" @@ -1227,7 +1495,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn", + "syn 2.0.114", ] [[package]] @@ -1247,7 +1515,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags", + "bitflags 2.10.0", "num-traits", "rand", "rand_chacha", @@ -1271,14 +1539,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" dependencies = [ "bytes", - "cfg_aliases", + "cfg_aliases 0.2.1", "pin-project-lite", "quinn-proto", "quinn-udp", "rustc-hash", "rustls", "socket2 0.6.2", - "thiserror", + "thiserror 2.0.18", "tokio", "tracing", "web-time", @@ -1299,7 +1567,7 @@ dependencies = [ "rustls", "rustls-pki-types", "slab", - "thiserror", + "thiserror 2.0.18", "tinyvec", "tracing", "web-time", @@ -1311,7 +1579,7 @@ version = "0.5.14" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" dependencies = [ - "cfg_aliases", + "cfg_aliases 0.2.1", "libc", "once_cell", "socket2 0.6.2", @@ -1398,7 +1666,16 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags", + "bitflags 2.10.0", +] + +[[package]] +name = "redox_syscall" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" +dependencies = [ + "bitflags 2.10.0", ] [[package]] @@ -1465,7 +1742,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", + "webpki-roots 1.0.6", ] [[package]] @@ -1488,13 +1765,31 @@ version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags", + "bitflags 2.10.0", "errno", "libc", "linux-raw-sys", @@ -1608,7 +1903,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -1736,6 +2031,17 @@ version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + [[package]] name = "syn" version = "2.0.114" @@ -1756,6 +2062,18 @@ dependencies = [ "futures-core", ] +[[package]] +name = "synstructure" +version = "0.12.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", + "unicode-xid", +] + [[package]] name = "synstructure" version = "0.13.2" @@ -1764,18 +2082,20 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] name = "telemt" -version = "3.0.0" +version = "3.0.10" dependencies = [ "aes", + "anyhow", "base64", "bytes", "cbc", "chrono", + "crc32c", "crc32fast", "criterion", "crossbeam-queue", @@ -1788,9 +2108,12 @@ dependencies = [ "httpdate", "hyper", "hyper-util", + "ipnetwork", "libc", "lru", "md-5", + "nix", + "notify", "num-bigint", "num-traits", "parking_lot", @@ -1798,19 +2121,23 @@ dependencies = [ "rand", "regex", "reqwest", + "rustls", "serde", "serde_json", "sha1", "sha2", "socket2 0.5.10", - "thiserror", + "thiserror 2.0.18", "tokio", + "tokio-rustls", "tokio-test", "tokio-util", "toml", "tracing", "tracing-subscriber", "url", + "webpki-roots 0.26.11", + "x509-parser", "zeroize", ] @@ -1827,13 +2154,33 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "thiserror" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +dependencies = [ + "thiserror-impl 1.0.69", +] + [[package]] name = "thiserror" version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ - "thiserror-impl", + "thiserror-impl 2.0.18", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.114", ] [[package]] @@ -1844,7 +2191,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -1856,6 +2203,37 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "time" +version = "0.3.47" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" +dependencies = [ + "deranged", + "itoa", + "num-conv", + "powerfmt", + "serde_core", + "time-core", + "time-macros", +] + +[[package]] +name = "time-core" +version = "0.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + +[[package]] +name = "time-macros" +version = "0.2.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" +dependencies = [ + "num-conv", + "time-core", +] + [[package]] name = "tinystr" version = "0.8.2" @@ -1899,7 +2277,7 @@ checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ "bytes", "libc", - "mio", + "mio 1.1.1", "parking_lot", "pin-project-lite", "signal-hook-registry", @@ -1917,7 +2295,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2031,7 +2409,7 @@ version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ - "bitflags", + "bitflags 2.10.0", "bytes", "futures-util", "http", @@ -2074,7 +2452,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2280,7 +2658,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -2321,7 +2699,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags", + "bitflags 2.10.0", "hashbrown 0.15.5", "indexmap", "semver", @@ -2347,6 +2725,15 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "webpki-roots" +version = "0.26.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "521bc38abb08001b01866da9f51eb7c5d647a19260e00054a8c7fd5f9e57f7a9" +dependencies = [ + "webpki-roots 1.0.6", +] + [[package]] name = "webpki-roots" version = "1.0.6" @@ -2386,7 +2773,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2397,7 +2784,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2424,6 +2811,15 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets 0.48.5", +] + [[package]] name = "windows-sys" version = "0.52.0" @@ -2451,6 +2847,21 @@ dependencies = [ "windows-link", ] +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm 0.48.5", + "windows_aarch64_msvc 0.48.5", + "windows_i686_gnu 0.48.5", + "windows_i686_msvc 0.48.5", + "windows_x86_64_gnu 0.48.5", + "windows_x86_64_gnullvm 0.48.5", + "windows_x86_64_msvc 0.48.5", +] + [[package]] name = "windows-targets" version = "0.52.6" @@ -2484,6 +2895,12 @@ dependencies = [ "windows_x86_64_msvc 0.53.1", ] +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + [[package]] name = "windows_aarch64_gnullvm" version = "0.52.6" @@ -2496,6 +2913,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + [[package]] name = "windows_aarch64_msvc" version = "0.52.6" @@ -2508,6 +2931,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + [[package]] name = "windows_i686_gnu" version = "0.52.6" @@ -2532,6 +2961,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + [[package]] name = "windows_i686_msvc" version = "0.52.6" @@ -2544,6 +2979,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + [[package]] name = "windows_x86_64_gnu" version = "0.52.6" @@ -2556,6 +2997,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + [[package]] name = "windows_x86_64_gnullvm" version = "0.52.6" @@ -2568,6 +3015,12 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + [[package]] name = "windows_x86_64_msvc" version = "0.52.6" @@ -2619,7 +3072,7 @@ dependencies = [ "heck", "indexmap", "prettyplease", - "syn", + "syn 2.0.114", "wasm-metadata", "wit-bindgen-core", "wit-component", @@ -2635,7 +3088,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn", + "syn 2.0.114", "wit-bindgen-core", "wit-bindgen-rust", ] @@ -2647,7 +3100,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags", + "bitflags 2.10.0", "indexmap", "log", "serde", @@ -2683,6 +3136,23 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" +[[package]] +name = "x509-parser" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7069fba5b66b9193bd2c5d3d4ff12b839118f6bcbef5328efafafb5395cf63da" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 1.0.69", + "time", +] + [[package]] name = "yoke" version = "0.8.1" @@ -2702,8 +3172,8 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.114", + "synstructure 0.13.2", ] [[package]] @@ -2723,7 +3193,7 @@ checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2743,8 +3213,8 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn", - "synstructure", + "syn 2.0.114", + "synstructure 0.13.2", ] [[package]] @@ -2764,7 +3234,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] @@ -2797,7 +3267,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn", + "syn 2.0.114", ] [[package]] From d08ddd718a9715ea5837d1641bcb1d6d64931273 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 15:28:02 +0300 Subject: [PATCH 03/98] Desync Full Forensics Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/cli.rs | 1 + src/config/defaults.rs | 4 + src/config/hot_reload.rs | 10 ++ src/config/types.rs | 6 + src/metrics.rs | 35 ++++++ src/proxy/middle_relay.rs | 226 ++++++++++++++++++++++++++++++++------ src/stats/mod.rs | 53 +++++++++ 7 files changed, 304 insertions(+), 31 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index cf98121..7e31f26 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -194,6 +194,7 @@ prefer_ipv6 = false fast_mode = true use_middle_proxy = false log_level = "normal" +desync_all_full = false [network] ipv4 = true diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 90dd6f9..5216e29 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -118,6 +118,10 @@ pub(crate) fn default_max_client_frame() -> usize { 16 * 1024 * 1024 } +pub(crate) fn default_desync_all_full() -> bool { + false +} + pub(crate) fn default_tls_new_session_tickets() -> u8 { 0 } diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index 6c3b8ff..56cfa0f 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -10,6 +10,7 @@ //! | `general` | `ad_tag` | Passed on next connection | //! | `general` | `middle_proxy_pool_size` | Passed on next connection | //! | `general` | `me_keepalive_*` | Passed on next connection | +//! | `general` | `desync_all_full` | Applied immediately | //! | `access` | All user/quota fields | Effective immediately | //! //! Fields that require re-binding sockets (`server.port`, `censorship.*`, @@ -34,6 +35,7 @@ pub struct HotFields { pub log_level: LogLevel, pub ad_tag: Option, pub middle_proxy_pool_size: usize, + pub desync_all_full: bool, pub me_keepalive_enabled: bool, pub me_keepalive_interval_secs: u64, pub me_keepalive_jitter_secs: u64, @@ -47,6 +49,7 @@ impl HotFields { log_level: cfg.general.log_level.clone(), ad_tag: cfg.general.ad_tag.clone(), middle_proxy_pool_size: cfg.general.middle_proxy_pool_size, + desync_all_full: cfg.general.desync_all_full, me_keepalive_enabled: cfg.general.me_keepalive_enabled, me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs, me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs, @@ -175,6 +178,13 @@ fn log_changes( ); } + if old_hot.desync_all_full != new_hot.desync_all_full { + info!( + "config reload: desync_all_full: {} → {}", + old_hot.desync_all_full, new_hot.desync_all_full, + ); + } + if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled || old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs || old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs diff --git a/src/config/types.rs b/src/config/types.rs index a303db8..39ba683 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -201,6 +201,11 @@ pub struct GeneralConfig { #[serde(default = "default_max_client_frame")] pub max_client_frame: usize, + /// Emit full crypto-desync forensic logs for every event. + /// When false, full forensic details are emitted once per key window. + #[serde(default = "default_desync_all_full")] + pub desync_all_full: bool, + /// Enable staggered warmup of extra ME writers. #[serde(default = "default_true")] pub me_warmup_stagger_enabled: bool, @@ -310,6 +315,7 @@ impl Default for GeneralConfig { links: LinksConfig::default(), crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), + desync_all_full: default_desync_all_full(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(), proxy_config_auto_reload_secs: default_proxy_config_reload_secs(), diff --git a/src/metrics.rs b/src/metrics.rs index e00091f..326d333 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -140,6 +140,41 @@ fn render_metrics(stats: &Stats) -> String { let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter"); let _ = writeln!(out, "telemt_secure_padding_invalid_total {}", stats.get_secure_padding_invalid()); + let _ = writeln!(out, "# HELP telemt_desync_total Total crypto-desync detections"); + let _ = writeln!(out, "# TYPE telemt_desync_total counter"); + let _ = writeln!(out, "telemt_desync_total {}", stats.get_desync_total()); + + let _ = writeln!(out, "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted"); + let _ = writeln!(out, "# TYPE telemt_desync_full_logged_total counter"); + let _ = writeln!(out, "telemt_desync_full_logged_total {}", stats.get_desync_full_logged()); + + let _ = writeln!(out, "# HELP telemt_desync_suppressed_total Suppressed desync forensic events"); + let _ = writeln!(out, "# TYPE telemt_desync_suppressed_total counter"); + let _ = writeln!(out, "telemt_desync_suppressed_total {}", stats.get_desync_suppressed()); + + let _ = writeln!(out, "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket"); + let _ = writeln!(out, "# TYPE telemt_desync_frames_bucket_total counter"); + let _ = writeln!( + out, + "telemt_desync_frames_bucket_total{{bucket=\"0\"}} {}", + stats.get_desync_frames_bucket_0() + ); + let _ = writeln!( + out, + "telemt_desync_frames_bucket_total{{bucket=\"1_2\"}} {}", + stats.get_desync_frames_bucket_1_2() + ); + let _ = writeln!( + out, + "telemt_desync_frames_bucket_total{{bucket=\"3_10\"}} {}", + stats.get_desync_frames_bucket_3_10() + ); + let _ = writeln!( + out, + "telemt_desync_frames_bucket_total{{bucket=\"gt_10\"}} {}", + stats.get_desync_frames_bucket_gt_10() + ); + let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections"); let _ = writeln!(out, "# TYPE telemt_user_connections_total counter"); let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections"); diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs index 3b98112..d55e5a2 100644 --- a/src/proxy/middle_relay.rs +++ b/src/proxy/middle_relay.rs @@ -1,5 +1,10 @@ -use std::net::SocketAddr; -use std::sync::Arc; +use std::collections::HashMap; +use std::collections::hash_map::DefaultHasher; +use std::hash::{Hash, Hasher}; +use std::net::{IpAddr, SocketAddr}; +use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::{Arc, Mutex, OnceLock}; +use std::time::{Duration, Instant}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio::sync::{mpsc, oneshot}; @@ -19,6 +24,148 @@ enum C2MeCommand { Close, } +const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60); +const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync"; +static DESYNC_DEDUP: OnceLock>> = OnceLock::new(); + +struct RelayForensicsState { + trace_id: u64, + conn_id: u64, + user: String, + peer: SocketAddr, + peer_hash: u64, + started_at: Instant, + bytes_c2me: u64, + bytes_me2c: Arc, + desync_all_full: bool, +} + +fn hash_value(value: &T) -> u64 { + let mut hasher = DefaultHasher::new(); + value.hash(&mut hasher); + hasher.finish() +} + +fn hash_ip(ip: IpAddr) -> u64 { + hash_value(&ip) +} + +fn should_emit_full_desync(key: u64, all_full: bool, now: Instant) -> bool { + if all_full { + return true; + } + + let dedup = DESYNC_DEDUP.get_or_init(|| Mutex::new(HashMap::new())); + let mut guard = dedup.lock().expect("desync dedup mutex poisoned"); + guard.retain(|_, seen_at| now.duration_since(*seen_at) < DESYNC_DEDUP_WINDOW); + + match guard.get_mut(&key) { + Some(seen_at) => { + if now.duration_since(*seen_at) >= DESYNC_DEDUP_WINDOW { + *seen_at = now; + true + } else { + false + } + } + None => { + guard.insert(key, now); + true + } + } +} + +fn report_desync_frame_too_large( + state: &RelayForensicsState, + proto_tag: ProtoTag, + frame_counter: u64, + max_frame: usize, + len: usize, + raw_len_bytes: Option<[u8; 4]>, + stats: &Stats, +) -> ProxyError { + let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes()); + let looks_like_tls = raw_len_bytes + .map(|b| b[0] == 0x16 && b[1] == 0x03) + .unwrap_or(false); + let looks_like_http = raw_len_bytes + .map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D')) + .unwrap_or(false); + let now = Instant::now(); + let dedup_key = hash_value(&( + state.user.as_str(), + state.peer_hash, + proto_tag, + DESYNC_ERROR_CLASS, + )); + let emit_full = should_emit_full_desync(dedup_key, state.desync_all_full, now); + let duration_ms = state.started_at.elapsed().as_millis() as u64; + let bytes_me2c = state.bytes_me2c.load(Ordering::Relaxed); + + stats.increment_desync_total(); + stats.observe_desync_frames_ok(frame_counter); + if emit_full { + stats.increment_desync_full_logged(); + warn!( + trace_id = format_args!("0x{:016x}", state.trace_id), + conn_id = state.conn_id, + user = %state.user, + peer_hash = format_args!("0x{:016x}", state.peer_hash), + proto = ?proto_tag, + mode = "middle_proxy", + is_tls = true, + duration_ms, + bytes_c2me = state.bytes_c2me, + bytes_me2c, + raw_len = len, + raw_len_hex = format_args!("0x{:08x}", len), + raw_bytes = format_args!( + "{:02x} {:02x} {:02x} {:02x}", + len_buf[0], len_buf[1], len_buf[2], len_buf[3] + ), + max_frame, + tls_like = looks_like_tls, + http_like = looks_like_http, + frames_ok = frame_counter, + dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(), + desync_all_full = state.desync_all_full, + full_reason = if state.desync_all_full { "desync_all_full" } else { "first_in_dedup_window" }, + error_class = DESYNC_ERROR_CLASS, + "Frame too large — crypto desync forensics" + ); + debug!( + trace_id = format_args!("0x{:016x}", state.trace_id), + conn_id = state.conn_id, + user = %state.user, + peer = %state.peer, + "Frame too large forensic peer detail" + ); + } else { + stats.increment_desync_suppressed(); + debug!( + trace_id = format_args!("0x{:016x}", state.trace_id), + conn_id = state.conn_id, + user = %state.user, + peer_hash = format_args!("0x{:016x}", state.peer_hash), + proto = ?proto_tag, + duration_ms, + bytes_c2me = state.bytes_c2me, + bytes_me2c, + raw_len = len, + frames_ok = frame_counter, + dedup_window_secs = DESYNC_DEDUP_WINDOW.as_secs(), + error_class = DESYNC_ERROR_CLASS, + "Frame too large — crypto desync forensic suppressed" + ); + } + + ProxyError::Proxy(format!( + "Frame too large: {len} (max {max_frame}), frames_ok={frame_counter}, conn_id={}, trace_id=0x{:016x}", + state.conn_id, + state.trace_id + )) +} + pub(crate) async fn handle_via_middle_proxy( mut crypto_reader: CryptoReader, crypto_writer: CryptoWriter, @@ -48,14 +195,30 @@ where ); let (conn_id, me_rx) = me_pool.registry().register().await; + let trace_id = conn_id; + let bytes_me2c = Arc::new(AtomicU64::new(0)); + let mut forensics = RelayForensicsState { + trace_id, + conn_id, + user: user.clone(), + peer, + peer_hash: hash_ip(peer.ip()), + started_at: Instant::now(), + bytes_c2me: 0, + bytes_me2c: bytes_me2c.clone(), + desync_all_full: config.general.desync_all_full, + }; stats.increment_user_connects(&user); stats.increment_user_curr_connects(&user); let proto_flags = proto_flags_for_tag(proto_tag, me_pool.has_proxy_tag()); debug!( + trace_id = format_args!("0x{:016x}", trace_id), user = %user, conn_id, + peer_hash = format_args!("0x{:016x}", forensics.peer_hash), + desync_all_full = forensics.desync_all_full, proto_flags = format_args!("0x{:08x}", proto_flags), "ME relay started" ); @@ -93,6 +256,7 @@ where let stats_clone = stats.clone(); let rng_clone = rng.clone(); let user_clone = user.clone(); + let bytes_me2c_clone = bytes_me2c.clone(); let me_writer = tokio::spawn(async move { let mut writer = crypto_writer; let mut frame_buf = Vec::with_capacity(16 * 1024); @@ -102,6 +266,7 @@ where match msg { Some(MeResponse::Data { flags, data }) => { trace!(conn_id, bytes = data.len(), flags, "ME->C data"); + bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed); stats_clone.add_user_octets_to(&user_clone, data.len() as u64); write_client_payload( &mut writer, @@ -118,6 +283,7 @@ where match next { MeResponse::Data { flags, data } => { trace!(conn_id, bytes = data.len(), flags, "ME->C data (batched)"); + bytes_me2c_clone.fetch_add(data.len() as u64, Ordering::Relaxed); stats_clone.add_user_octets_to(&user_clone, data.len() as u64); write_client_payload( &mut writer, @@ -173,12 +339,15 @@ where &mut crypto_reader, proto_tag, frame_limit, - &user, + &forensics, &mut frame_counter, &stats, ).await { Ok(Some((payload, quickack))) => { trace!(conn_id, bytes = payload.len(), "C->ME frame"); + forensics.bytes_c2me = forensics + .bytes_c2me + .saturating_add(payload.len() as u64); stats.add_user_octets_from(&user, payload.len() as u64); let mut flags = proto_flags; if quickack { @@ -237,7 +406,16 @@ where (_, _, Err(e)) => Err(e), }; - debug!(user = %user, conn_id, "ME relay cleanup"); + debug!( + user = %user, + conn_id, + trace_id = format_args!("0x{:016x}", trace_id), + duration_ms = forensics.started_at.elapsed().as_millis() as u64, + bytes_c2me = forensics.bytes_c2me, + bytes_me2c = forensics.bytes_me2c.load(Ordering::Relaxed), + frames_ok = frame_counter, + "ME relay cleanup" + ); me_pool.registry().unregister(conn_id).await; stats.decrement_user_curr_connects(&user); result @@ -247,7 +425,7 @@ async fn read_client_payload( client_reader: &mut CryptoReader, proto_tag: ProtoTag, max_frame: usize, - user: &str, + forensics: &RelayForensicsState, frame_counter: &mut u64, stats: &Stats, ) -> Result, bool)>> @@ -302,7 +480,9 @@ where } if len < 4 && proto_tag != ProtoTag::Abridged { warn!( - user = %user, + trace_id = format_args!("0x{:016x}", forensics.trace_id), + conn_id = forensics.conn_id, + user = %forensics.user, len, proto = ?proto_tag, "Frame too small — corrupt or probe" @@ -311,31 +491,15 @@ where } if len > max_frame { - let len_buf = raw_len_bytes.unwrap_or((len as u32).to_le_bytes()); - let looks_like_tls = raw_len_bytes - .map(|b| b[0] == 0x16 && b[1] == 0x03) - .unwrap_or(false); - let looks_like_http = raw_len_bytes - .map(|b| matches!(b[0], b'G' | b'P' | b'H' | b'C' | b'D')) - .unwrap_or(false); - warn!( - user = %user, - raw_len = len, - raw_len_hex = format_args!("0x{:08x}", len), - raw_bytes = format_args!( - "{:02x} {:02x} {:02x} {:02x}", - len_buf[0], len_buf[1], len_buf[2], len_buf[3] - ), - proto = ?proto_tag, - tls_like = looks_like_tls, - http_like = looks_like_http, - frames_ok = *frame_counter, - "Frame too large — crypto desync forensics" - ); - return Err(ProxyError::Proxy(format!( - "Frame too large: {len} (max {max_frame}), frames_ok={}", - *frame_counter - ))); + return Err(report_desync_frame_too_large( + forensics, + proto_tag, + *frame_counter, + max_frame, + len, + raw_len_bytes, + stats, + )); } let secure_payload_len = if proto_tag == ProtoTag::Secure { diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 307da6d..4c16d25 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -31,6 +31,13 @@ pub struct Stats { me_route_drop_channel_closed: AtomicU64, me_route_drop_queue_full: AtomicU64, secure_padding_invalid: AtomicU64, + desync_total: AtomicU64, + desync_full_logged: AtomicU64, + desync_suppressed: AtomicU64, + desync_frames_bucket_0: AtomicU64, + desync_frames_bucket_1_2: AtomicU64, + desync_frames_bucket_3_10: AtomicU64, + desync_frames_bucket_gt_10: AtomicU64, user_stats: DashMap, start_time: parking_lot::RwLock>, } @@ -76,6 +83,31 @@ impl Stats { pub fn increment_secure_padding_invalid(&self) { self.secure_padding_invalid.fetch_add(1, Ordering::Relaxed); } + pub fn increment_desync_total(&self) { + self.desync_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_desync_full_logged(&self) { + self.desync_full_logged.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_desync_suppressed(&self) { + self.desync_suppressed.fetch_add(1, Ordering::Relaxed); + } + pub fn observe_desync_frames_ok(&self, frames_ok: u64) { + match frames_ok { + 0 => { + self.desync_frames_bucket_0.fetch_add(1, Ordering::Relaxed); + } + 1..=2 => { + self.desync_frames_bucket_1_2.fetch_add(1, Ordering::Relaxed); + } + 3..=10 => { + self.desync_frames_bucket_3_10.fetch_add(1, Ordering::Relaxed); + } + _ => { + self.desync_frames_bucket_gt_10.fetch_add(1, Ordering::Relaxed); + } + } + } pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) } pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) } pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) } @@ -96,6 +128,27 @@ impl Stats { pub fn get_secure_padding_invalid(&self) -> u64 { self.secure_padding_invalid.load(Ordering::Relaxed) } + pub fn get_desync_total(&self) -> u64 { + self.desync_total.load(Ordering::Relaxed) + } + pub fn get_desync_full_logged(&self) -> u64 { + self.desync_full_logged.load(Ordering::Relaxed) + } + pub fn get_desync_suppressed(&self) -> u64 { + self.desync_suppressed.load(Ordering::Relaxed) + } + pub fn get_desync_frames_bucket_0(&self) -> u64 { + self.desync_frames_bucket_0.load(Ordering::Relaxed) + } + pub fn get_desync_frames_bucket_1_2(&self) -> u64 { + self.desync_frames_bucket_1_2.load(Ordering::Relaxed) + } + pub fn get_desync_frames_bucket_3_10(&self) -> u64 { + self.desync_frames_bucket_3_10.load(Ordering::Relaxed) + } + pub fn get_desync_frames_bucket_gt_10(&self) -> u64 { + self.desync_frames_bucket_gt_10.load(Ordering::Relaxed) + } pub fn increment_user_connects(&self, user: &str) { self.user_stats.entry(user.to_string()).or_default() From d8dcbbb61e92fd6019b88776f6c4d76caff94552 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:04:19 +0300 Subject: [PATCH 04/98] ME Pool Updater + Soft-staged Reinit w/o Reconcile Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/cli.rs | 2 + src/config/defaults.rs | 8 + src/config/hot_reload.rs | 20 +++ src/config/load.rs | 28 ++++ src/config/types.rs | 25 ++- src/main.rs | 26 ++-- src/transport/middle_proxy/config_updater.rs | 153 ++++++++++++++----- src/transport/middle_proxy/pool.rs | 134 ++++++++++++++-- 8 files changed, 336 insertions(+), 60 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 7e31f26..3525a22 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -195,6 +195,8 @@ fast_mode = true use_middle_proxy = false log_level = "normal" desync_all_full = false +update_every = 43200 +me_reinit_drain_timeout_secs = 300 [network] ipv4 = true diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 5216e29..6c3e60d 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -171,6 +171,14 @@ pub(crate) fn default_proxy_config_reload_secs() -> u64 { 12 * 60 * 60 } +pub(crate) fn default_update_every_secs() -> u64 { + 12 * 60 * 60 +} + +pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { + 300 +} + pub(crate) fn default_ntp_check() -> bool { true } diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index 56cfa0f..5c7263f 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -11,6 +11,8 @@ //! | `general` | `middle_proxy_pool_size` | Passed on next connection | //! | `general` | `me_keepalive_*` | Passed on next connection | //! | `general` | `desync_all_full` | Applied immediately | +//! | `general` | `update_every` | Applied to ME updater immediately | +//! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update | //! | `access` | All user/quota fields | Effective immediately | //! //! Fields that require re-binding sockets (`server.port`, `censorship.*`, @@ -36,6 +38,8 @@ pub struct HotFields { pub ad_tag: Option, pub middle_proxy_pool_size: usize, pub desync_all_full: bool, + pub update_every_secs: u64, + pub me_reinit_drain_timeout_secs: u64, pub me_keepalive_enabled: bool, pub me_keepalive_interval_secs: u64, pub me_keepalive_jitter_secs: u64, @@ -50,6 +54,8 @@ impl HotFields { ad_tag: cfg.general.ad_tag.clone(), middle_proxy_pool_size: cfg.general.middle_proxy_pool_size, desync_all_full: cfg.general.desync_all_full, + update_every_secs: cfg.general.effective_update_every_secs(), + me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs, me_keepalive_enabled: cfg.general.me_keepalive_enabled, me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs, me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs, @@ -185,6 +191,20 @@ fn log_changes( ); } + if old_hot.update_every_secs != new_hot.update_every_secs { + info!( + "config reload: update_every(effective): {}s → {}s", + old_hot.update_every_secs, new_hot.update_every_secs, + ); + } + + if old_hot.me_reinit_drain_timeout_secs != new_hot.me_reinit_drain_timeout_secs { + info!( + "config reload: me_reinit_drain_timeout_secs: {}s → {}s", + old_hot.me_reinit_drain_timeout_secs, new_hot.me_reinit_drain_timeout_secs, + ); + } + if old_hot.me_keepalive_enabled != new_hot.me_keepalive_enabled || old_hot.me_keepalive_interval_secs != new_hot.me_keepalive_interval_secs || old_hot.me_keepalive_jitter_secs != new_hot.me_keepalive_jitter_secs diff --git a/src/config/load.rs b/src/config/load.rs index 827687a..5a8b8a5 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -117,6 +117,34 @@ impl ProxyConfig { let mut config: ProxyConfig = toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?; + if let Some(update_every) = config.general.update_every { + if update_every == 0 { + return Err(ProxyError::Config( + "general.update_every must be > 0".to_string(), + )); + } + } else { + let legacy_secret = config.general.proxy_secret_auto_reload_secs; + let legacy_config = config.general.proxy_config_auto_reload_secs; + let effective = legacy_secret.min(legacy_config); + if effective == 0 { + return Err(ProxyError::Config( + "legacy proxy_*_auto_reload_secs values must be > 0 when general.update_every is not set".to_string(), + )); + } + + if legacy_secret != default_proxy_secret_reload_secs() + || legacy_config != default_proxy_config_reload_secs() + { + warn!( + proxy_secret_auto_reload_secs = legacy_secret, + proxy_config_auto_reload_secs = legacy_config, + effective_update_every_secs = effective, + "proxy_*_auto_reload_secs are deprecated; set general.update_every" + ); + } + } + // Validate secrets. for (user, secret) in &config.access.users { if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 { diff --git a/src/config/types.rs b/src/config/types.rs index 39ba683..54a20f3 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -257,11 +257,23 @@ pub struct GeneralConfig { #[serde(default = "default_fast_mode_min_tls_record")] pub fast_mode_min_tls_record: usize, - /// Automatically reload proxy-secret every N seconds. + /// Unified ME updater interval in seconds for getProxyConfig/getProxyConfigV6/getProxySecret. + /// When omitted, effective value falls back to legacy proxy_*_auto_reload_secs fields. + #[serde(default)] + pub update_every: Option, + + /// Drain timeout in seconds for stale ME writers after endpoint map changes. + /// Set to 0 to keep stale writers draining indefinitely (no force-close). + #[serde(default = "default_me_reinit_drain_timeout_secs")] + pub me_reinit_drain_timeout_secs: u64, + + /// Deprecated legacy setting; kept for backward compatibility fallback. + /// Use `update_every` instead. #[serde(default = "default_proxy_secret_reload_secs")] pub proxy_secret_auto_reload_secs: u64, - /// Automatically reload proxy-multi.conf every N seconds. + /// Deprecated legacy setting; kept for backward compatibility fallback. + /// Use `update_every` instead. #[serde(default = "default_proxy_config_reload_secs")] pub proxy_config_auto_reload_secs: u64, @@ -317,6 +329,8 @@ impl Default for GeneralConfig { max_client_frame: default_max_client_frame(), desync_all_full: default_desync_all_full(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), + update_every: Some(default_update_every_secs()), + me_reinit_drain_timeout_secs: default_me_reinit_drain_timeout_secs(), proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(), proxy_config_auto_reload_secs: default_proxy_config_reload_secs(), ntp_check: default_ntp_check(), @@ -327,6 +341,13 @@ impl Default for GeneralConfig { } } +impl GeneralConfig { + pub fn effective_update_every_secs(&self) -> u64 { + self.update_every + .unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs)) + } +} + /// `[general.links]` — proxy link generation settings. #[derive(Debug, Clone, Serialize, Deserialize, Default)] pub struct LinksConfig { diff --git a/src/main.rs b/src/main.rs index 61debb9..af1a069 100644 --- a/src/main.rs +++ b/src/main.rs @@ -392,18 +392,6 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai .await; }); - // Periodic updater: getProxyConfig + proxy-secret - let pool_clone2 = pool.clone(); - let rng_clone2 = rng.clone(); - tokio::spawn(async move { - crate::transport::middle_proxy::me_config_updater( - pool_clone2, - rng_clone2, - std::time::Duration::from_secs(12 * 3600), - ) - .await; - }); - Some(pool) } Err(e) => { @@ -702,6 +690,20 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai detected_ip_v6, ); + if let Some(ref pool) = me_pool { + let pool_clone = pool.clone(); + let rng_clone = rng.clone(); + let config_rx_clone = config_rx.clone(); + tokio::spawn(async move { + crate::transport::middle_proxy::me_config_updater( + pool_clone, + rng_clone, + config_rx_clone, + ) + .await; + }); + } + let mut listeners = Vec::new(); for listener_conf in &config.server.listeners { diff --git a/src/transport/middle_proxy/config_updater.rs b/src/transport/middle_proxy/config_updater.rs index d2bb550..479a880 100644 --- a/src/transport/middle_proxy/config_updater.rs +++ b/src/transport/middle_proxy/config_updater.rs @@ -4,8 +4,10 @@ use std::sync::Arc; use std::time::Duration; use httpdate; +use tokio::sync::watch; use tracing::{debug, info, warn}; +use crate::config::ProxyConfig; use crate::error::Result; use super::MePool; @@ -128,49 +130,126 @@ pub async fn fetch_proxy_config(url: &str) -> Result { Ok(ProxyConfigData { map, default_dc }) } -pub async fn me_config_updater(pool: Arc, rng: Arc, interval: Duration) { - let mut tick = tokio::time::interval(interval); - // skip immediate tick to avoid double-fetch right after startup - tick.tick().await; +async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &ProxyConfig) { + let mut maps_changed = false; + + // Update proxy config v4 + let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await; + if let Some(cfg_v4) = cfg_v4 { + let changed = pool.update_proxy_maps(cfg_v4.map.clone(), None).await; + if let Some(dc) = cfg_v4.default_dc { + pool.default_dc + .store(dc, std::sync::atomic::Ordering::Relaxed); + } + if changed { + maps_changed = true; + info!("ME config updated (v4)"); + } else { + debug!("ME config v4 unchanged"); + } + } + + // Update proxy config v6 (optional) + let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await; + if let Some(cfg_v6) = cfg_v6 { + let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await; + if changed { + maps_changed = true; + info!("ME config updated (v6)"); + } else { + debug!("ME config v6 unchanged"); + } + } + + if maps_changed { + let drain_timeout = if cfg.general.me_reinit_drain_timeout_secs == 0 { + None + } else { + Some(Duration::from_secs(cfg.general.me_reinit_drain_timeout_secs)) + }; + pool.zero_downtime_reinit_after_map_change(rng.as_ref(), drain_timeout) + .await; + } + + pool.reset_stun_state(); + + // Update proxy-secret + match download_proxy_secret().await { + Ok(secret) => { + if pool.update_secret(secret).await { + info!("proxy-secret updated and pool reconnect scheduled"); + } + } + Err(e) => warn!(error = %e, "proxy-secret update failed"), + } +} + +pub async fn me_config_updater( + pool: Arc, + rng: Arc, + mut config_rx: watch::Receiver>, +) { + let mut update_every_secs = config_rx + .borrow() + .general + .effective_update_every_secs() + .max(1); + let mut update_every = Duration::from_secs(update_every_secs); + let mut next_tick = tokio::time::Instant::now() + update_every; + info!(update_every_secs, "ME config updater started"); + loop { - tick.tick().await; + let sleep = tokio::time::sleep_until(next_tick); + tokio::pin!(sleep); - // Update proxy config v4 - let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await; - if let Some(cfg) = cfg_v4 { - let changed = pool.update_proxy_maps(cfg.map.clone(), None).await; - if let Some(dc) = cfg.default_dc { - pool.default_dc.store(dc, std::sync::atomic::Ordering::Relaxed); + tokio::select! { + _ = &mut sleep => { + let cfg = config_rx.borrow().clone(); + run_update_cycle(&pool, &rng, cfg.as_ref()).await; + let refreshed_secs = cfg.general.effective_update_every_secs().max(1); + if refreshed_secs != update_every_secs { + info!( + old_update_every_secs = update_every_secs, + new_update_every_secs = refreshed_secs, + "ME config updater interval changed" + ); + update_every_secs = refreshed_secs; + update_every = Duration::from_secs(update_every_secs); + } + next_tick = tokio::time::Instant::now() + update_every; } - if changed { - info!("ME config updated (v4), reconciling connections"); - pool.reconcile_connections(&rng).await; - } else { - debug!("ME config v4 unchanged"); - } - } + changed = config_rx.changed() => { + if changed.is_err() { + warn!("ME config updater stopped: config channel closed"); + break; + } + let cfg = config_rx.borrow().clone(); + let new_secs = cfg.general.effective_update_every_secs().max(1); + if new_secs == update_every_secs { + continue; + } - // Update proxy config v6 (optional) - let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await; - if let Some(cfg_v6) = cfg_v6 { - let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await; - if changed { - info!("ME config updated (v6), reconciling connections"); - pool.reconcile_connections(&rng).await; - } else { - debug!("ME config v6 unchanged"); - } - } - pool.reset_stun_state(); - - // Update proxy-secret - match download_proxy_secret().await { - Ok(secret) => { - if pool.update_secret(secret).await { - info!("proxy-secret updated and pool reconnect scheduled"); + if new_secs < update_every_secs { + info!( + old_update_every_secs = update_every_secs, + new_update_every_secs = new_secs, + "ME config updater interval decreased, running immediate refresh" + ); + update_every_secs = new_secs; + update_every = Duration::from_secs(update_every_secs); + run_update_cycle(&pool, &rng, cfg.as_ref()).await; + next_tick = tokio::time::Instant::now() + update_every; + } else { + info!( + old_update_every_secs = update_every_secs, + new_update_every_secs = new_secs, + "ME config updater interval increased" + ); + update_every_secs = new_secs; + update_every = Duration::from_secs(update_every_secs); + next_tick = tokio::time::Instant::now() + update_every; } } - Err(e) => warn!(error = %e, "proxy-secret update failed"), } } } diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 8faeabf..858d4bf 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering}; @@ -178,7 +178,6 @@ impl MePool { } pub async fn reconcile_connections(self: &Arc, rng: &SecureRandom) { - use std::collections::HashSet; let writers = self.writers.read().await; let current: HashSet = writers .iter() @@ -210,6 +209,101 @@ impl MePool { } } + async fn desired_dc_endpoints(&self) -> HashMap> { + let mut out: HashMap> = HashMap::new(); + + if self.decision.ipv4_me { + let map_v4 = self.proxy_map_v4.read().await.clone(); + for (dc, addrs) in map_v4 { + let entry = out.entry(dc.abs()).or_default(); + for (ip, port) in addrs { + entry.insert(SocketAddr::new(ip, port)); + } + } + } + + if self.decision.ipv6_me { + let map_v6 = self.proxy_map_v6.read().await.clone(); + for (dc, addrs) in map_v6 { + let entry = out.entry(dc.abs()).or_default(); + for (ip, port) in addrs { + entry.insert(SocketAddr::new(ip, port)); + } + } + } + + out + } + + pub async fn zero_downtime_reinit_after_map_change( + self: &Arc, + rng: &SecureRandom, + drain_timeout: Option, + ) { + self.reconcile_connections(rng).await; + + let desired_by_dc = self.desired_dc_endpoints().await; + if desired_by_dc.is_empty() { + warn!("ME endpoint map is empty after update; skipping stale writer drain"); + return; + } + + let writers = self.writers.read().await; + let active_writer_addrs: HashSet = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .map(|w| w.addr) + .collect(); + + let mut missing_dc = Vec::::new(); + for (dc, endpoints) in &desired_by_dc { + if endpoints.is_empty() { + continue; + } + if !endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) { + missing_dc.push(*dc); + } + } + + if !missing_dc.is_empty() { + missing_dc.sort_unstable(); + warn!( + missing_dc = ?missing_dc, + "ME reinit coverage incomplete after map update; keeping stale writers" + ); + return; + } + + let desired_addrs: HashSet = desired_by_dc + .values() + .flat_map(|set| set.iter().copied()) + .collect(); + + let stale_writer_ids: Vec = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| !desired_addrs.contains(&w.addr)) + .map(|w| w.id) + .collect(); + drop(writers); + + if stale_writer_ids.is_empty() { + debug!("ME map update completed with no stale writers"); + return; + } + + let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0); + info!( + stale_writers = stale_writer_ids.len(), + drain_timeout_secs, + "ME map update covered; draining stale writers" + ); + for writer_id in stale_writer_ids { + self.mark_writer_draining_with_timeout(writer_id, drain_timeout) + .await; + } + } + pub async fn update_proxy_maps( &self, new_v4: HashMap>, @@ -631,23 +725,40 @@ impl MePool { self.registry.writer_lost(writer_id).await } - pub(crate) async fn mark_writer_draining(self: &Arc, writer_id: u64) { - { + pub(crate) async fn mark_writer_draining_with_timeout( + self: &Arc, + writer_id: u64, + timeout: Option, + ) { + let timeout = timeout.filter(|d| !d.is_zero()); + let found = { let mut ws = self.writers.write().await; if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) { w.draining.store(true, Ordering::Relaxed); + true + } else { + false } + }; + + if !found { + return; } + let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0); + debug!(writer_id, timeout_secs, "ME writer marked draining"); + let pool = Arc::downgrade(self); tokio::spawn(async move { - let deadline = Instant::now() + Duration::from_secs(300); + let deadline = timeout.map(|t| Instant::now() + t); loop { if let Some(p) = pool.upgrade() { - if Instant::now() >= deadline { - warn!(writer_id, "Drain timeout, force-closing"); - let _ = p.remove_writer_and_close_clients(writer_id).await; - break; + if let Some(deadline_at) = deadline { + if Instant::now() >= deadline_at { + warn!(writer_id, "Drain timeout, force-closing"); + let _ = p.remove_writer_and_close_clients(writer_id).await; + break; + } } if p.registry.is_writer_empty(writer_id).await { let _ = p.remove_writer_only(writer_id).await; @@ -661,6 +772,11 @@ impl MePool { }); } + pub(crate) async fn mark_writer_draining(self: &Arc, writer_id: u64) { + self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300))) + .await; + } + } fn hex_dump(data: &[u8]) -> String { From fc2ac3d10f19602565d51a5a4244259234cc4140 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:09:09 +0300 Subject: [PATCH 05/98] ME Pool Reinit polishing --- src/config/load.rs | 64 ++++++++++++++++++++++++++++++ src/config/types.rs | 2 + src/transport/middle_proxy/pool.rs | 2 + 3 files changed, 68 insertions(+) diff --git a/src/config/load.rs b/src/config/load.rs index 5a8b8a5..fa61539 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -375,4 +375,68 @@ mod tests { .unwrap_or(false)); let _ = std::fs::remove_file(path); } + + #[test] + fn update_every_overrides_legacy_fields() { + let toml = r#" + [general] + update_every = 123 + proxy_secret_auto_reload_secs = 700 + proxy_config_auto_reload_secs = 800 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_update_every_override_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!(cfg.general.effective_update_every_secs(), 123); + let _ = std::fs::remove_file(path); + } + + #[test] + fn update_every_fallback_to_legacy_min() { + let toml = r#" + [general] + proxy_secret_auto_reload_secs = 600 + proxy_config_auto_reload_secs = 120 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_update_every_legacy_min_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!(cfg.general.update_every, None); + assert_eq!(cfg.general.effective_update_every_secs(), 120); + let _ = std::fs::remove_file(path); + } + + #[test] + fn update_every_zero_is_rejected() { + let toml = r#" + [general] + update_every = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_update_every_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.update_every must be > 0")); + let _ = std::fs::remove_file(path); + } } diff --git a/src/config/types.rs b/src/config/types.rs index 54a20f3..eb16885 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -342,6 +342,8 @@ impl Default for GeneralConfig { } impl GeneralConfig { + /// Resolve the active updater interval for ME infrastructure refresh tasks. + /// `update_every` has priority, otherwise legacy proxy_*_auto_reload_secs are used. pub fn effective_update_every_secs(&self) -> u64 { self.update_every .unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs)) diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 858d4bf..bd7c9cc 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -240,6 +240,7 @@ impl MePool { rng: &SecureRandom, drain_timeout: Option, ) { + // Stage 1: prewarm writers for new endpoint maps before draining old ones. self.reconcile_connections(rng).await; let desired_by_dc = self.desired_dc_endpoints().await; @@ -269,6 +270,7 @@ impl MePool { missing_dc.sort_unstable(); warn!( missing_dc = ?missing_dc, + // Keep stale writers alive when fresh coverage is incomplete. "ME reinit coverage incomplete after map update; keeping stale writers" ); return; From 75bfbe6e95e248573cb59d3ca32c88eed4feb0cf Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 16:10:39 +0300 Subject: [PATCH 06/98] Update defaults.rs --- src/config/defaults.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 6c3e60d..01cdcb0 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -172,7 +172,7 @@ pub(crate) fn default_proxy_config_reload_secs() -> u64 { } pub(crate) fn default_update_every_secs() -> u64 { - 12 * 60 * 60 + 2 * 60 * 60 } pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { From 872b47067aef23643d791691b0dc962ed7d4c318 Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Mon, 23 Feb 2026 17:22:56 +0300 Subject: [PATCH 07/98] Improve CLI help text with comprehensive options - Add version number to help header - Restructure help into USAGE, ARGS, OPTIONS, INIT OPTIONS, EXAMPLES sections - Include all command-line options with descriptions - Add usage examples for common scenarios --- src/main.rs | 43 ++++++++++++++++++++++++++----------------- 1 file changed, 26 insertions(+), 17 deletions(-) diff --git a/src/main.rs b/src/main.rs index 61debb9..119d3fd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -73,27 +73,36 @@ fn parse_cli() -> (String, bool, Option) { log_level = Some(s.trim_start_matches("--log-level=").to_string()); } "--help" | "-h" => { - eprintln!("Usage: telemt [config.toml] [OPTIONS]"); + eprintln!("telemt - Telegram MTProto Proxy v{}", env!("CARGO_PKG_VERSION")); eprintln!(); - eprintln!("Options:"); - eprintln!(" --silent, -s Suppress info logs"); - eprintln!(" --log-level debug|verbose|normal|silent"); - eprintln!(" --help, -h Show this help"); + eprintln!("USAGE:"); + eprintln!(" telemt [CONFIG] [OPTIONS]"); + eprintln!(" telemt --init [INIT_OPTIONS]"); eprintln!(); - eprintln!("Setup (fire-and-forget):"); - eprintln!( - " --init Generate config, install systemd service, start" - ); + eprintln!("ARGS:"); + eprintln!(" Path to config file (default: config.toml)"); + eprintln!(); + eprintln!("OPTIONS:"); + eprintln!(" -s, --silent Suppress info logs (equivalent to --log-level silent)"); + eprintln!(" --log-level Set log level [possible values: debug, verbose, normal, silent]"); + eprintln!(" -h, --help Show this help message"); + eprintln!(" -V, --version Print version number"); + eprintln!(); + eprintln!("INIT OPTIONS (fire-and-forget setup):"); + eprintln!(" --init Generate config, install systemd service, and start"); eprintln!(" --port Listen port (default: 443)"); - eprintln!( - " --domain TLS domain for masking (default: www.google.com)" - ); - eprintln!( - " --secret 32-char hex secret (auto-generated if omitted)" - ); - eprintln!(" --user Username (default: user)"); + eprintln!(" --domain TLS domain for masking (default: www.google.com)"); + eprintln!(" --secret 32-char hex secret (auto-generated if omitted)"); + eprintln!(" --user Username for proxy access (default: user)"); eprintln!(" --config-dir Config directory (default: /etc/telemt)"); - eprintln!(" --no-start Don't start the service after install"); + eprintln!(" --no-start Create config and service but don't start"); + eprintln!(); + eprintln!("EXAMPLES:"); + eprintln!(" telemt # Run with default config"); + eprintln!(" telemt /etc/telemt/config.toml # Run with specific config"); + eprintln!(" telemt --log-level debug # Run with debug logging"); + eprintln!(" telemt --init # Quick setup with defaults"); + eprintln!(" telemt --init --port 8443 --user admin # Custom setup"); std::process::exit(0); } "--version" | "-V" => { From a917dcc1624e3b73dca11f44e127d6b4e7bc8b86 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 18:34:23 +0300 Subject: [PATCH 08/98] Update Dockerfile --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 662ec22..7abe548 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # ========================== # Stage 1: Build # ========================== -FROM rust:1.85-slim-bookworm AS builder +FROM rust:1.88-slim-bookworm AS builder RUN apt-get update && apt-get install -y --no-install-recommends \ pkg-config \ @@ -40,4 +40,4 @@ EXPOSE 443 EXPOSE 9090 ENTRYPOINT ["/app/telemt"] -CMD ["config.toml"] \ No newline at end of file +CMD ["config.toml"] From 9aed6c86317a49b4810eaf2a0a0e597bbf8e073b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 18:47:26 +0300 Subject: [PATCH 09/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index dbdb024..fd1d892 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.0.12" +version = "3.0.13" edition = "2024" [dependencies] From e2e471a78c87b97811c79328b1e3678b9bbd19a6 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 19:43:03 +0300 Subject: [PATCH 10/98] Delete AGENTS.md --- AGENTS.md | 40 ---------------------------------------- 1 file changed, 40 deletions(-) delete mode 100644 AGENTS.md diff --git a/AGENTS.md b/AGENTS.md deleted file mode 100644 index dc582ae..0000000 --- a/AGENTS.md +++ /dev/null @@ -1,40 +0,0 @@ -# AGENTS.md - -** Use general system promt from AGENTS_SYSTEM_PROMT.md ** -** Additional techiques and architectury details are here ** - -This file provides guidance to agents when working with code in this repository. - -## Build & Test Commands -```bash -cargo build --release # Production build -cargo test # Run all tests -cargo test --lib error # Run tests for specific module (error module) -cargo bench --bench crypto_bench # Run crypto benchmarks -cargo clippy -- -D warnings # Lint with clippy -``` - -## Project-Specific Conventions - -### Rust Edition -- Uses **Rust edition 2024** (not 2021) - specified in Cargo.toml - -### Error Handling Pattern -- Custom [`Recoverable`](src/error.rs:110) trait distinguishes recoverable vs fatal errors -- [`HandshakeResult`](src/error.rs:292) returns streams on bad client for masking - do not drop them -- Always use [`ProxyError`](src/error.rs:168) from [`src/error.rs`](src/error.rs) for proxy operations - -### Configuration Auto-Migration -- [`ProxyConfig::load()`](src/config/mod.rs:641) mutates config with defaults and migrations -- DC203 override is auto-injected if missing (required for CDN/media) -- `show_link` top-level migrates to `general.links.show` - -### Middle-End Proxy Requirements -- Requires public IP on interface OR 1:1 NAT with STUN probing -- Falls back to direct mode on STUN/interface mismatch unless `stun_iface_mismatch_ignore=true` -- Proxy-secret from Telegram is separate from user secrets - -### TLS Fronting Behavior -- Invalid handshakes are transparently proxied to `mask_host` for DPI evasion -- `fake_cert_len` is randomized at startup (1024-4096 bytes) -- `mask_unix_sock` and `mask_host` are mutually exclusive From f86ced8e62fb50bb4ede3504510d93f3ad6d2374 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 19:43:34 +0300 Subject: [PATCH 11/98] Rename AGENTS_SYSTEM_PROMT.md to AGENTS.md --- AGENTS_SYSTEM_PROMT.md => AGENTS.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename AGENTS_SYSTEM_PROMT.md => AGENTS.md (100%) diff --git a/AGENTS_SYSTEM_PROMT.md b/AGENTS.md similarity index 100% rename from AGENTS_SYSTEM_PROMT.md rename to AGENTS.md From 81843cc56cf785c6db12bd740f2e5dfb3fc3b94b Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Mon, 23 Feb 2026 20:46:56 +0300 Subject: [PATCH 12/98] Update types.rs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit По умолчанию использовало me_reconnect_max_concurrent_per_dc = 4 --- src/config/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/types.rs b/src/config/types.rs index eb16885..2f098c9 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -316,7 +316,7 @@ impl Default for GeneralConfig { me_warmup_stagger_enabled: true, me_warmup_step_delay_ms: default_warmup_step_delay_ms(), me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(), - me_reconnect_max_concurrent_per_dc: 4, + me_reconnect_max_concurrent_per_dc: 1, me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_fast_retry_count: 8, From 02cfe1305c0344d9e6fd5e536505ba390ab0be90 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Mon, 23 Feb 2026 20:50:39 +0300 Subject: [PATCH 13/98] Update config.toml --- config.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.toml b/config.toml index be27ca6..b7fcf26 100644 --- a/config.toml +++ b/config.toml @@ -42,6 +42,13 @@ me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIM me_reconnect_backoff_base_ms = 500 # Backoff start me_reconnect_backoff_cap_ms = 30000 # Backoff cap me_reconnect_fast_retry_count = 11 # Quick retries before backoff +update_every = 7200 # Resolve the active updater interval for ME infrastructure refresh tasks. +crypto_pending_buffer = 262144 # Max pending ciphertext buffer per client writer (bytes). Controls FakeTLS backpressure vs throughput. +max_client_frame = 16777216 # Maximum allowed client MTProto frame size (bytes). +desync_all_full = false # Emit full crypto-desync forensic logs for every event. When false, full forensic details are emitted once per key window. +me_reinit_drain_timeout_secs = 300 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). +auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. +degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. [general.modes] classic = false From 890bd98b173cd014e493e7a89f51f235665419a1 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Mon, 23 Feb 2026 21:10:25 +0300 Subject: [PATCH 14/98] Update types.rs --- src/config/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/types.rs b/src/config/types.rs index 2f098c9..eb16885 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -316,7 +316,7 @@ impl Default for GeneralConfig { me_warmup_stagger_enabled: true, me_warmup_step_delay_ms: default_warmup_step_delay_ms(), me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(), - me_reconnect_max_concurrent_per_dc: 1, + me_reconnect_max_concurrent_per_dc: 4, me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_fast_retry_count: 8, From 3f0c53b010df1222c1d7ba698c1bbf52d31d9a24 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Mon, 23 Feb 2026 21:10:53 +0300 Subject: [PATCH 15/98] Update config.toml --- config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.toml b/config.toml index b7fcf26..e146598 100644 --- a/config.toml +++ b/config.toml @@ -38,7 +38,7 @@ me_warmup_stagger_enabled = true me_warmup_step_delay_ms = 500 # Base delay between extra connects me_warmup_step_jitter_ms = 300 # Jitter for warmup delay # Reconnect policy knobs. -me_reconnect_max_concurrent_per_dc = 1 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE! +me_reconnect_max_concurrent_per_dc = 4 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE! me_reconnect_backoff_base_ms = 500 # Backoff start me_reconnect_backoff_cap_ms = 30000 # Backoff cap me_reconnect_fast_retry_count = 11 # Quick retries before backoff From 1f486e0df2337da029e36c635060d590ec5106ed Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Mon, 23 Feb 2026 21:30:22 +0300 Subject: [PATCH 16/98] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a88b8df..8d0c41a 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ - Улучшение обработки ошибок в edge-case транспортных сценариях Релиз: -[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9) +[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12) --- @@ -69,7 +69,7 @@ Additionally, we implemented a set of robustness enhancements designed to: - Improve error handling in edge-case transport scenarios Release: -[3.0.9](https://github.com/telemt/telemt/releases/tag/3.0.9) +[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12) --- From 0e2d42624f1bb12145b8f12363ab922bb63560c7 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:04:12 +0300 Subject: [PATCH 17/98] ME Pool Hardswap --- src/cli.rs | 5 +- src/config/defaults.rs | 17 +- src/config/hot_reload.rs | 30 +++ src/config/load.rs | 59 +++++ src/config/types.rs | 24 ++ src/main.rs | 47 ++-- src/metrics.rs | 24 ++ src/proxy/middle_relay.rs | 3 + src/stats/mod.rs | 45 ++++ src/transport/middle_proxy/config_updater.rs | 20 +- src/transport/middle_proxy/health.rs | 1 + src/transport/middle_proxy/pool.rs | 253 +++++++++++++++++-- src/transport/middle_proxy/send.rs | 27 +- 13 files changed, 491 insertions(+), 64 deletions(-) diff --git a/src/cli.rs b/src/cli.rs index 3525a22..a1182a7 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -196,7 +196,10 @@ use_middle_proxy = false log_level = "normal" desync_all_full = false update_every = 43200 -me_reinit_drain_timeout_secs = 300 +hardswap = false +me_pool_drain_ttl_secs = 90 +me_pool_min_fresh_ratio = 0.8 +me_reinit_drain_timeout_secs = 120 [network] ipv4 = true diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 01cdcb0..775692e 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -1,4 +1,3 @@ -use std::net::IpAddr; use std::collections::HashMap; use ipnetwork::IpNetwork; use serde::Deserialize; @@ -172,11 +171,23 @@ pub(crate) fn default_proxy_config_reload_secs() -> u64 { } pub(crate) fn default_update_every_secs() -> u64 { - 2 * 60 * 60 + 12 * 60 * 60 } pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { - 300 + 120 +} + +pub(crate) fn default_me_pool_drain_ttl_secs() -> u64 { + 90 +} + +pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 { + 0.8 +} + +pub(crate) fn default_hardswap() -> bool { + false } pub(crate) fn default_ntp_check() -> bool { diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index 5c7263f..7f121f6 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -12,6 +12,9 @@ //! | `general` | `me_keepalive_*` | Passed on next connection | //! | `general` | `desync_all_full` | Applied immediately | //! | `general` | `update_every` | Applied to ME updater immediately | +//! | `general` | `hardswap` | Applied on next ME map update | +//! | `general` | `me_pool_drain_ttl_secs` | Applied on next ME map update | +//! | `general` | `me_pool_min_fresh_ratio` | Applied on next ME map update | //! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update | //! | `access` | All user/quota fields | Effective immediately | //! @@ -39,6 +42,9 @@ pub struct HotFields { pub middle_proxy_pool_size: usize, pub desync_all_full: bool, pub update_every_secs: u64, + pub hardswap: bool, + pub me_pool_drain_ttl_secs: u64, + pub me_pool_min_fresh_ratio: f32, pub me_reinit_drain_timeout_secs: u64, pub me_keepalive_enabled: bool, pub me_keepalive_interval_secs: u64, @@ -55,6 +61,9 @@ impl HotFields { middle_proxy_pool_size: cfg.general.middle_proxy_pool_size, desync_all_full: cfg.general.desync_all_full, update_every_secs: cfg.general.effective_update_every_secs(), + hardswap: cfg.general.hardswap, + me_pool_drain_ttl_secs: cfg.general.me_pool_drain_ttl_secs, + me_pool_min_fresh_ratio: cfg.general.me_pool_min_fresh_ratio, me_reinit_drain_timeout_secs: cfg.general.me_reinit_drain_timeout_secs, me_keepalive_enabled: cfg.general.me_keepalive_enabled, me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs, @@ -198,6 +207,27 @@ fn log_changes( ); } + if old_hot.hardswap != new_hot.hardswap { + info!( + "config reload: hardswap: {} → {}", + old_hot.hardswap, new_hot.hardswap, + ); + } + + if old_hot.me_pool_drain_ttl_secs != new_hot.me_pool_drain_ttl_secs { + info!( + "config reload: me_pool_drain_ttl_secs: {}s → {}s", + old_hot.me_pool_drain_ttl_secs, new_hot.me_pool_drain_ttl_secs, + ); + } + + if (old_hot.me_pool_min_fresh_ratio - new_hot.me_pool_min_fresh_ratio).abs() > f32::EPSILON { + info!( + "config reload: me_pool_min_fresh_ratio: {:.3} → {:.3}", + old_hot.me_pool_min_fresh_ratio, new_hot.me_pool_min_fresh_ratio, + ); + } + if old_hot.me_reinit_drain_timeout_secs != new_hot.me_reinit_drain_timeout_secs { info!( "config reload: me_reinit_drain_timeout_secs: {}s → {}s", diff --git a/src/config/load.rs b/src/config/load.rs index fa61539..4d59e60 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -145,6 +145,24 @@ impl ProxyConfig { } } + if !(0.0..=1.0).contains(&config.general.me_pool_min_fresh_ratio) { + return Err(ProxyError::Config( + "general.me_pool_min_fresh_ratio must be within [0.0, 1.0]".to_string(), + )); + } + + if config.general.effective_me_pool_force_close_secs() > 0 + && config.general.effective_me_pool_force_close_secs() + < config.general.me_pool_drain_ttl_secs + { + warn!( + me_pool_drain_ttl_secs = config.general.me_pool_drain_ttl_secs, + me_reinit_drain_timeout_secs = config.general.effective_me_pool_force_close_secs(), + "force-close timeout is lower than drain TTL; bumping force-close timeout to TTL" + ); + config.general.me_reinit_drain_timeout_secs = config.general.me_pool_drain_ttl_secs; + } + // Validate secrets. for (user, secret) in &config.access.users { if !secret.chars().all(|c| c.is_ascii_hexdigit()) || secret.len() != 32 { @@ -439,4 +457,45 @@ mod tests { assert!(err.contains("general.update_every must be > 0")); let _ = std::fs::remove_file(path); } + + #[test] + fn me_pool_min_fresh_ratio_out_of_range_is_rejected() { + let toml = r#" + [general] + me_pool_min_fresh_ratio = 1.5 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_pool_min_ratio_invalid_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_pool_min_fresh_ratio must be within [0.0, 1.0]")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn force_close_bumped_when_below_drain_ttl() { + let toml = r#" + [general] + me_pool_drain_ttl_secs = 90 + me_reinit_drain_timeout_secs = 30 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_force_close_bump_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90); + let _ = std::fs::remove_file(path); + } } diff --git a/src/config/types.rs b/src/config/types.rs index eb16885..b33b5fd 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -206,6 +206,11 @@ pub struct GeneralConfig { #[serde(default = "default_desync_all_full")] pub desync_all_full: bool, + /// Enable C-like hard-swap for ME pool generations. + /// When true, Telemt prewarms a new generation and switches once full coverage is reached. + #[serde(default = "default_hardswap")] + pub hardswap: bool, + /// Enable staggered warmup of extra ME writers. #[serde(default = "default_true")] pub me_warmup_stagger_enabled: bool, @@ -262,6 +267,16 @@ pub struct GeneralConfig { #[serde(default)] pub update_every: Option, + /// Drain-TTL in seconds for stale ME writers after endpoint map changes. + /// During TTL, stale writers may be used only as fallback for new bindings. + #[serde(default = "default_me_pool_drain_ttl_secs")] + pub me_pool_drain_ttl_secs: u64, + + /// Minimum desired-DC coverage ratio required before draining stale writers. + /// Range: 0.0..=1.0. + #[serde(default = "default_me_pool_min_fresh_ratio")] + pub me_pool_min_fresh_ratio: f32, + /// Drain timeout in seconds for stale ME writers after endpoint map changes. /// Set to 0 to keep stale writers draining indefinitely (no force-close). #[serde(default = "default_me_reinit_drain_timeout_secs")] @@ -328,8 +343,11 @@ impl Default for GeneralConfig { crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), desync_all_full: default_desync_all_full(), + hardswap: default_hardswap(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), update_every: Some(default_update_every_secs()), + me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(), + me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(), me_reinit_drain_timeout_secs: default_me_reinit_drain_timeout_secs(), proxy_secret_auto_reload_secs: default_proxy_secret_reload_secs(), proxy_config_auto_reload_secs: default_proxy_config_reload_secs(), @@ -348,6 +366,12 @@ impl GeneralConfig { self.update_every .unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs)) } + + /// Resolve force-close timeout for stale writers. + /// `me_reinit_drain_timeout_secs` remains backward-compatible alias. + pub fn effective_me_pool_force_close_secs(&self) -> u64 { + self.me_reinit_drain_timeout_secs + } } /// `[general.links]` — proxy link generation settings. diff --git a/src/main.rs b/src/main.rs index 3a6ad1a..0601215 100644 --- a/src/main.rs +++ b/src/main.rs @@ -73,36 +73,27 @@ fn parse_cli() -> (String, bool, Option) { log_level = Some(s.trim_start_matches("--log-level=").to_string()); } "--help" | "-h" => { - eprintln!("telemt - Telegram MTProto Proxy v{}", env!("CARGO_PKG_VERSION")); + eprintln!("Usage: telemt [config.toml] [OPTIONS]"); eprintln!(); - eprintln!("USAGE:"); - eprintln!(" telemt [CONFIG] [OPTIONS]"); - eprintln!(" telemt --init [INIT_OPTIONS]"); + eprintln!("Options:"); + eprintln!(" --silent, -s Suppress info logs"); + eprintln!(" --log-level debug|verbose|normal|silent"); + eprintln!(" --help, -h Show this help"); eprintln!(); - eprintln!("ARGS:"); - eprintln!(" Path to config file (default: config.toml)"); - eprintln!(); - eprintln!("OPTIONS:"); - eprintln!(" -s, --silent Suppress info logs (equivalent to --log-level silent)"); - eprintln!(" --log-level Set log level [possible values: debug, verbose, normal, silent]"); - eprintln!(" -h, --help Show this help message"); - eprintln!(" -V, --version Print version number"); - eprintln!(); - eprintln!("INIT OPTIONS (fire-and-forget setup):"); - eprintln!(" --init Generate config, install systemd service, and start"); + eprintln!("Setup (fire-and-forget):"); + eprintln!( + " --init Generate config, install systemd service, start" + ); eprintln!(" --port Listen port (default: 443)"); - eprintln!(" --domain TLS domain for masking (default: www.google.com)"); - eprintln!(" --secret 32-char hex secret (auto-generated if omitted)"); - eprintln!(" --user Username for proxy access (default: user)"); + eprintln!( + " --domain TLS domain for masking (default: www.google.com)" + ); + eprintln!( + " --secret 32-char hex secret (auto-generated if omitted)" + ); + eprintln!(" --user Username (default: user)"); eprintln!(" --config-dir Config directory (default: /etc/telemt)"); - eprintln!(" --no-start Create config and service but don't start"); - eprintln!(); - eprintln!("EXAMPLES:"); - eprintln!(" telemt # Run with default config"); - eprintln!(" telemt /etc/telemt/config.toml # Run with specific config"); - eprintln!(" telemt --log-level debug # Run with debug logging"); - eprintln!(" telemt --init # Quick setup with defaults"); - eprintln!(" telemt --init --port 8443 --user admin # Custom setup"); + eprintln!(" --no-start Don't start the service after install"); std::process::exit(0); } "--version" | "-V" => { @@ -371,6 +362,10 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai config.general.me_reconnect_backoff_base_ms, config.general.me_reconnect_backoff_cap_ms, config.general.me_reconnect_fast_retry_count, + config.general.hardswap, + config.general.me_pool_drain_ttl_secs, + config.general.effective_me_pool_force_close_secs(), + config.general.me_pool_min_fresh_ratio, ); let pool_size = config.general.middle_proxy_pool_size.max(1); diff --git a/src/metrics.rs b/src/metrics.rs index 326d333..d11c302 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -175,6 +175,30 @@ fn render_metrics(stats: &Stats) -> String { stats.get_desync_frames_bucket_gt_10() ); + let _ = writeln!(out, "# HELP telemt_pool_swap_total Successful ME pool swaps"); + let _ = writeln!(out, "# TYPE telemt_pool_swap_total counter"); + let _ = writeln!(out, "telemt_pool_swap_total {}", stats.get_pool_swap_total()); + + let _ = writeln!(out, "# HELP telemt_pool_drain_active Active draining ME writers"); + let _ = writeln!(out, "# TYPE telemt_pool_drain_active gauge"); + let _ = writeln!(out, "telemt_pool_drain_active {}", stats.get_pool_drain_active()); + + let _ = writeln!(out, "# HELP telemt_pool_force_close_total Forced close events for draining writers"); + let _ = writeln!(out, "# TYPE telemt_pool_force_close_total counter"); + let _ = writeln!( + out, + "telemt_pool_force_close_total {}", + stats.get_pool_force_close_total() + ); + + let _ = writeln!(out, "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds"); + let _ = writeln!(out, "# TYPE telemt_pool_stale_pick_total counter"); + let _ = writeln!( + out, + "telemt_pool_stale_pick_total {}", + stats.get_pool_stale_pick_total() + ); + let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections"); let _ = writeln!(out, "# TYPE telemt_user_connections_total counter"); let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections"); diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs index d55e5a2..a6a11e1 100644 --- a/src/proxy/middle_relay.rs +++ b/src/proxy/middle_relay.rs @@ -184,6 +184,7 @@ where let user = success.user.clone(); let peer = success.peer; let proto_tag = success.proto_tag; + let pool_generation = me_pool.current_generation(); info!( user = %user, @@ -191,6 +192,7 @@ where dc = success.dc_idx, proto = ?proto_tag, mode = "middle_proxy", + pool_generation, "Routing via Middle-End" ); @@ -220,6 +222,7 @@ where peer_hash = format_args!("0x{:016x}", forensics.peer_hash), desync_all_full = forensics.desync_all_full, proto_flags = format_args!("0x{:08x}", proto_flags), + pool_generation, "ME relay started" ); diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 4c16d25..1994b36 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -38,6 +38,10 @@ pub struct Stats { desync_frames_bucket_1_2: AtomicU64, desync_frames_bucket_3_10: AtomicU64, desync_frames_bucket_gt_10: AtomicU64, + pool_swap_total: AtomicU64, + pool_drain_active: AtomicU64, + pool_force_close_total: AtomicU64, + pool_stale_pick_total: AtomicU64, user_stats: DashMap, start_time: parking_lot::RwLock>, } @@ -108,6 +112,35 @@ impl Stats { } } } + pub fn increment_pool_swap_total(&self) { + self.pool_swap_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_pool_drain_active(&self) { + self.pool_drain_active.fetch_add(1, Ordering::Relaxed); + } + pub fn decrement_pool_drain_active(&self) { + let mut current = self.pool_drain_active.load(Ordering::Relaxed); + loop { + if current == 0 { + break; + } + match self.pool_drain_active.compare_exchange_weak( + current, + current - 1, + Ordering::Relaxed, + Ordering::Relaxed, + ) { + Ok(_) => break, + Err(actual) => current = actual, + } + } + } + pub fn increment_pool_force_close_total(&self) { + self.pool_force_close_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_pool_stale_pick_total(&self) { + self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed); + } pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) } pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) } pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) } @@ -149,6 +182,18 @@ impl Stats { pub fn get_desync_frames_bucket_gt_10(&self) -> u64 { self.desync_frames_bucket_gt_10.load(Ordering::Relaxed) } + pub fn get_pool_swap_total(&self) -> u64 { + self.pool_swap_total.load(Ordering::Relaxed) + } + pub fn get_pool_drain_active(&self) -> u64 { + self.pool_drain_active.load(Ordering::Relaxed) + } + pub fn get_pool_force_close_total(&self) -> u64 { + self.pool_force_close_total.load(Ordering::Relaxed) + } + pub fn get_pool_stale_pick_total(&self) -> u64 { + self.pool_stale_pick_total.load(Ordering::Relaxed) + } pub fn increment_user_connects(&self, user: &str) { self.user_stats.entry(user.to_string()).or_default() diff --git a/src/transport/middle_proxy/config_updater.rs b/src/transport/middle_proxy/config_updater.rs index 479a880..96d5f91 100644 --- a/src/transport/middle_proxy/config_updater.rs +++ b/src/transport/middle_proxy/config_updater.rs @@ -131,6 +131,13 @@ pub async fn fetch_proxy_config(url: &str) -> Result { } async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &ProxyConfig) { + pool.update_runtime_reinit_policy( + cfg.general.hardswap, + cfg.general.me_pool_drain_ttl_secs, + cfg.general.effective_me_pool_force_close_secs(), + cfg.general.me_pool_min_fresh_ratio, + ); + let mut maps_changed = false; // Update proxy config v4 @@ -162,12 +169,7 @@ async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &Pro } if maps_changed { - let drain_timeout = if cfg.general.me_reinit_drain_timeout_secs == 0 { - None - } else { - Some(Duration::from_secs(cfg.general.me_reinit_drain_timeout_secs)) - }; - pool.zero_downtime_reinit_after_map_change(rng.as_ref(), drain_timeout) + pool.zero_downtime_reinit_after_map_change(rng.as_ref()) .await; } @@ -224,6 +226,12 @@ pub async fn me_config_updater( break; } let cfg = config_rx.borrow().clone(); + pool.update_runtime_reinit_policy( + cfg.general.hardswap, + cfg.general.me_pool_drain_ttl_secs, + cfg.general.effective_me_pool_force_close_secs(), + cfg.general.me_pool_min_fresh_ratio, + ); let new_secs = cfg.general.effective_update_every_secs().max(1); if new_secs == update_every_secs { continue; diff --git a/src/transport/middle_proxy/health.rs b/src/transport/middle_proxy/health.rs index d4d4a70..18814cd 100644 --- a/src/transport/middle_proxy/health.rs +++ b/src/transport/middle_proxy/health.rs @@ -68,6 +68,7 @@ async fn check_family( .read() .await .iter() + .filter(|w| !w.draining.load(std::sync::atomic::Ordering::Relaxed)) .map(|w| w.addr) .collect(); diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index bd7c9cc..8e159db 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -1,14 +1,14 @@ use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; -use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU64, AtomicUsize, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, AtomicU64, AtomicUsize, Ordering}; use bytes::BytesMut; use rand::Rng; use rand::seq::SliceRandom; use tokio::sync::{Mutex, RwLock, mpsc, Notify}; use tokio_util::sync::CancellationToken; use tracing::{debug, info, warn}; -use std::time::{Duration, Instant}; +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; use crate::crypto::SecureRandom; use crate::error::{ProxyError, Result}; @@ -27,10 +27,13 @@ const ME_ACTIVE_PING_JITTER_SECS: i64 = 5; pub struct MeWriter { pub id: u64, pub addr: SocketAddr, + pub generation: u64, pub tx: mpsc::Sender, pub cancel: CancellationToken, pub degraded: Arc, pub draining: Arc, + pub draining_started_at_epoch_secs: Arc, + pub allow_drain_fallback: Arc, } pub struct MePool { @@ -73,6 +76,11 @@ pub struct MePool { pub(super) writer_available: Arc, pub(super) conn_count: AtomicUsize, pub(super) stats: Arc, + pub(super) generation: AtomicU64, + pub(super) hardswap: AtomicBool, + pub(super) me_pool_drain_ttl_secs: AtomicU64, + pub(super) me_pool_force_close_secs: AtomicU64, + pub(super) me_pool_min_fresh_ratio_permille: AtomicU32, pool_size: usize, } @@ -83,6 +91,22 @@ pub struct NatReflectionCache { } impl MePool { + fn ratio_to_permille(ratio: f32) -> u32 { + let clamped = ratio.clamp(0.0, 1.0); + (clamped * 1000.0).round() as u32 + } + + fn permille_to_ratio(permille: u32) -> f32 { + (permille.min(1000) as f32) / 1000.0 + } + + fn now_epoch_secs() -> u64 { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_secs() + } + pub fn new( proxy_tag: Option>, proxy_secret: Vec, @@ -110,6 +134,10 @@ impl MePool { me_reconnect_backoff_base_ms: u64, me_reconnect_backoff_cap_ms: u64, me_reconnect_fast_retry_count: u32, + hardswap: bool, + me_pool_drain_ttl_secs: u64, + me_pool_force_close_secs: u64, + me_pool_min_fresh_ratio: f32, ) -> Arc { Arc::new(Self { registry: Arc::new(ConnRegistry::new()), @@ -152,6 +180,11 @@ impl MePool { nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())), writer_available: Arc::new(Notify::new()), conn_count: AtomicUsize::new(0), + generation: AtomicU64::new(1), + hardswap: AtomicBool::new(hardswap), + me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs), + me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs), + me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(me_pool_min_fresh_ratio)), }) } @@ -159,6 +192,25 @@ impl MePool { self.proxy_tag.is_some() } + pub fn current_generation(&self) -> u64 { + self.generation.load(Ordering::Relaxed) + } + + pub fn update_runtime_reinit_policy( + &self, + hardswap: bool, + drain_ttl_secs: u64, + force_close_secs: u64, + min_fresh_ratio: f32, + ) { + self.hardswap.store(hardswap, Ordering::Relaxed); + self.me_pool_drain_ttl_secs.store(drain_ttl_secs, Ordering::Relaxed); + self.me_pool_force_close_secs + .store(force_close_secs, Ordering::Relaxed); + self.me_pool_min_fresh_ratio_permille + .store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed); + } + pub fn reset_stun_state(&self) { self.nat_probe_attempts.store(0, Ordering::Relaxed); self.nat_probe_disabled.store(false, Ordering::Relaxed); @@ -177,6 +229,42 @@ impl MePool { self.writers.clone() } + fn force_close_timeout(&self) -> Option { + let secs = self.me_pool_force_close_secs.load(Ordering::Relaxed); + if secs == 0 { + None + } else { + Some(Duration::from_secs(secs)) + } + } + + fn coverage_ratio( + desired_by_dc: &HashMap>, + active_writer_addrs: &HashSet, + ) -> (f32, Vec) { + if desired_by_dc.is_empty() { + return (1.0, Vec::new()); + } + + let mut missing_dc = Vec::::new(); + let mut covered = 0usize; + for (dc, endpoints) in desired_by_dc { + if endpoints.is_empty() { + continue; + } + if endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) { + covered += 1; + } else { + missing_dc.push(*dc); + } + } + + missing_dc.sort_unstable(); + let total = desired_by_dc.len().max(1); + let ratio = (covered as f32) / (total as f32); + (ratio, missing_dc) + } + pub async fn reconcile_connections(self: &Arc, rng: &SecureRandom) { let writers = self.writers.read().await; let current: HashSet = writers @@ -235,39 +323,104 @@ impl MePool { out } + async fn warmup_generation_for_all_dcs( + self: &Arc, + rng: &SecureRandom, + generation: u64, + desired_by_dc: &HashMap>, + ) { + for endpoints in desired_by_dc.values() { + if endpoints.is_empty() { + continue; + } + + let has_fresh = { + let ws = self.writers.read().await; + ws.iter().any(|w| { + !w.draining.load(Ordering::Relaxed) + && w.generation == generation + && endpoints.contains(&w.addr) + }) + }; + + if has_fresh { + continue; + } + + let mut shuffled: Vec = endpoints.iter().copied().collect(); + shuffled.shuffle(&mut rand::rng()); + for addr in shuffled { + if self.connect_one(addr, rng).await.is_ok() { + break; + } + } + } + } + pub async fn zero_downtime_reinit_after_map_change( self: &Arc, rng: &SecureRandom, - drain_timeout: Option, ) { - // Stage 1: prewarm writers for new endpoint maps before draining old ones. - self.reconcile_connections(rng).await; - let desired_by_dc = self.desired_dc_endpoints().await; if desired_by_dc.is_empty() { warn!("ME endpoint map is empty after update; skipping stale writer drain"); return; } + let previous_generation = self.current_generation(); + let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1; + let hardswap = self.hardswap.load(Ordering::Relaxed); + + if hardswap { + self.warmup_generation_for_all_dcs(rng, generation, &desired_by_dc) + .await; + } else { + self.reconcile_connections(rng).await; + } + let writers = self.writers.read().await; let active_writer_addrs: HashSet = writers .iter() .filter(|w| !w.draining.load(Ordering::Relaxed)) .map(|w| w.addr) .collect(); - - let mut missing_dc = Vec::::new(); - for (dc, endpoints) in &desired_by_dc { - if endpoints.is_empty() { - continue; - } - if !endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) { - missing_dc.push(*dc); - } + let min_ratio = Self::permille_to_ratio( + self.me_pool_min_fresh_ratio_permille + .load(Ordering::Relaxed), + ); + let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs); + if !hardswap && coverage_ratio < min_ratio { + warn!( + previous_generation, + generation, + coverage_ratio = format_args!("{coverage_ratio:.3}"), + min_ratio = format_args!("{min_ratio:.3}"), + missing_dc = ?missing_dc, + "ME reinit coverage below threshold; keeping stale writers" + ); + return; } - if !missing_dc.is_empty() { - missing_dc.sort_unstable(); + if hardswap { + let fresh_writer_addrs: HashSet = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .map(|w| w.addr) + .collect(); + let (fresh_ratio, fresh_missing_dc) = + Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs); + if !fresh_missing_dc.is_empty() { + warn!( + previous_generation, + generation, + fresh_ratio = format_args!("{fresh_ratio:.3}"), + missing_dc = ?fresh_missing_dc, + "ME hardswap pending: fresh generation coverage incomplete" + ); + return; + } + } else if !missing_dc.is_empty() { warn!( missing_dc = ?missing_dc, // Keep stale writers alive when fresh coverage is incomplete. @@ -284,7 +437,13 @@ impl MePool { let stale_writer_ids: Vec = writers .iter() .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| !desired_addrs.contains(&w.addr)) + .filter(|w| { + if hardswap { + w.generation < generation + } else { + !desired_addrs.contains(&w.addr) + } + }) .map(|w| w.id) .collect(); drop(writers); @@ -294,14 +453,21 @@ impl MePool { return; } + let drain_timeout = self.force_close_timeout(); let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0); info!( stale_writers = stale_writer_ids.len(), + previous_generation, + generation, + hardswap, + coverage_ratio = format_args!("{coverage_ratio:.3}"), + min_ratio = format_args!("{min_ratio:.3}"), drain_timeout_secs, "ME map update covered; draining stale writers" ); + self.stats.increment_pool_swap_total(); for writer_id in stale_writer_ids { - self.mark_writer_draining_with_timeout(writer_id, drain_timeout) + self.mark_writer_draining_with_timeout(writer_id, drain_timeout, !hardswap) .await; } } @@ -507,9 +673,12 @@ impl MePool { let hs = self.handshake_only(stream, addr, rng).await?; let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed); + let generation = self.current_generation(); let cancel = CancellationToken::new(); let degraded = Arc::new(AtomicBool::new(false)); let draining = Arc::new(AtomicBool::new(false)); + let draining_started_at_epoch_secs = Arc::new(AtomicU64::new(0)); + let allow_drain_fallback = Arc::new(AtomicBool::new(false)); let (tx, mut rx) = mpsc::channel::(4096); let mut rpc_writer = RpcWriter { writer: hs.wr, @@ -540,10 +709,13 @@ impl MePool { let writer = MeWriter { id: writer_id, addr, + generation, tx: tx.clone(), cancel: cancel.clone(), degraded: degraded.clone(), draining: draining.clone(), + draining_started_at_epoch_secs: draining_started_at_epoch_secs.clone(), + allow_drain_fallback: allow_drain_fallback.clone(), }; self.writers.write().await.push(writer.clone()); self.conn_count.fetch_add(1, Ordering::Relaxed); @@ -715,6 +887,9 @@ impl MePool { let mut ws = self.writers.write().await; if let Some(pos) = ws.iter().position(|w| w.id == writer_id) { let w = ws.remove(pos); + if w.draining.load(Ordering::Relaxed) { + self.stats.decrement_pool_drain_active(); + } w.cancel.cancel(); close_tx = Some(w.tx.clone()); self.conn_count.fetch_sub(1, Ordering::Relaxed); @@ -731,11 +906,20 @@ impl MePool { self: &Arc, writer_id: u64, timeout: Option, + allow_drain_fallback: bool, ) { let timeout = timeout.filter(|d| !d.is_zero()); let found = { let mut ws = self.writers.write().await; if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) { + let already_draining = w.draining.swap(true, Ordering::Relaxed); + w.allow_drain_fallback + .store(allow_drain_fallback, Ordering::Relaxed); + w.draining_started_at_epoch_secs + .store(Self::now_epoch_secs(), Ordering::Relaxed); + if !already_draining { + self.stats.increment_pool_drain_active(); + } w.draining.store(true, Ordering::Relaxed); true } else { @@ -748,7 +932,12 @@ impl MePool { } let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0); - debug!(writer_id, timeout_secs, "ME writer marked draining"); + debug!( + writer_id, + timeout_secs, + allow_drain_fallback, + "ME writer marked draining" + ); let pool = Arc::downgrade(self); tokio::spawn(async move { @@ -758,6 +947,7 @@ impl MePool { if let Some(deadline_at) = deadline { if Instant::now() >= deadline_at { warn!(writer_id, "Drain timeout, force-closing"); + p.stats.increment_pool_force_close_total(); let _ = p.remove_writer_and_close_clients(writer_id).await; break; } @@ -775,10 +965,31 @@ impl MePool { } pub(crate) async fn mark_writer_draining(self: &Arc, writer_id: u64) { - self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300))) + self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300)), false) .await; } + pub(super) fn writer_accepts_new_binding(&self, writer: &MeWriter) -> bool { + if !writer.draining.load(Ordering::Relaxed) { + return true; + } + if !writer.allow_drain_fallback.load(Ordering::Relaxed) { + return false; + } + + let ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed); + if ttl_secs == 0 { + return true; + } + + let started = writer.draining_started_at_epoch_secs.load(Ordering::Relaxed); + if started == 0 { + return false; + } + + Self::now_epoch_secs().saturating_sub(started) <= ttl_secs + } + } fn hex_dump(data: &[u8]) -> String { diff --git a/src/transport/middle_proxy/send.rs b/src/transport/middle_proxy/send.rs index 2ebafea..56bd17a 100644 --- a/src/transport/middle_proxy/send.rs +++ b/src/transport/middle_proxy/send.rs @@ -134,8 +134,8 @@ impl MePool { candidate_indices.sort_by_key(|idx| { let w = &writers_snapshot[*idx]; let degraded = w.degraded.load(Ordering::Relaxed); - let draining = w.draining.load(Ordering::Relaxed); - (draining as usize, degraded as usize) + let stale = (w.generation < self.current_generation()) as usize; + (stale, degraded as usize) }); let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len(); @@ -143,13 +143,23 @@ impl MePool { for offset in 0..candidate_indices.len() { let idx = candidate_indices[(start + offset) % candidate_indices.len()]; let w = &writers_snapshot[idx]; - if w.draining.load(Ordering::Relaxed) { + if !self.writer_accepts_new_binding(w) { continue; } if w.tx.send(WriterCommand::Data(payload.clone())).await.is_ok() { self.registry .bind_writer(conn_id, w.id, w.tx.clone(), meta.clone()) .await; + if w.generation < self.current_generation() { + self.stats.increment_pool_stale_pick_total(); + debug!( + conn_id, + writer_id = w.id, + writer_generation = w.generation, + current_generation = self.current_generation(), + "Selected stale ME writer for fallback bind" + ); + } return Ok(()); } else { warn!(writer_id = w.id, "ME writer channel closed"); @@ -159,7 +169,7 @@ impl MePool { } let w = writers_snapshot[candidate_indices[start]].clone(); - if w.draining.load(Ordering::Relaxed) { + if !self.writer_accepts_new_binding(&w) { continue; } match w.tx.send(WriterCommand::Data(payload.clone())).await { @@ -167,6 +177,9 @@ impl MePool { self.registry .bind_writer(conn_id, w.id, w.tx.clone(), meta.clone()) .await; + if w.generation < self.current_generation() { + self.stats.increment_pool_stale_pick_total(); + } return Ok(()); } Err(_) => { @@ -245,13 +258,13 @@ impl MePool { if preferred.is_empty() { return (0..writers.len()) - .filter(|i| !writers[*i].draining.load(Ordering::Relaxed)) + .filter(|i| self.writer_accepts_new_binding(&writers[*i])) .collect(); } let mut out = Vec::new(); for (idx, w) in writers.iter().enumerate() { - if w.draining.load(Ordering::Relaxed) { + if !self.writer_accepts_new_binding(w) { continue; } if preferred.iter().any(|p| *p == w.addr) { @@ -260,7 +273,7 @@ impl MePool { } if out.is_empty() { return (0..writers.len()) - .filter(|i| !writers[*i].draining.load(Ordering::Relaxed)) + .filter(|i| self.writer_accepts_new_binding(&writers[*i])) .collect(); } out From f710a2192aed3d9cd7538e1108b744d08daf1823 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:08:03 +0300 Subject: [PATCH 18/98] Update types.rs --- src/config/types.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/types.rs b/src/config/types.rs index b33b5fd..166cdb6 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -323,7 +323,7 @@ impl Default for GeneralConfig { middle_proxy_nat_stun: None, middle_proxy_nat_stun_servers: Vec::new(), middle_proxy_pool_size: default_pool_size(), - middle_proxy_warm_standby: 8, + middle_proxy_warm_standby: 16, me_keepalive_enabled: true, me_keepalive_interval_secs: default_keepalive_interval(), me_keepalive_jitter_secs: default_keepalive_jitter(), @@ -331,7 +331,7 @@ impl Default for GeneralConfig { me_warmup_stagger_enabled: true, me_warmup_step_delay_ms: default_warmup_step_delay_ms(), me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(), - me_reconnect_max_concurrent_per_dc: 4, + me_reconnect_max_concurrent_per_dc: 11, me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_fast_retry_count: 8, From 08138451d855e8d4b5450f1d41bb5ecb68267e1d Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:15:37 +0300 Subject: [PATCH 19/98] Update types.rs --- src/config/types.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/types.rs b/src/config/types.rs index 166cdb6..8d573df 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -331,7 +331,7 @@ impl Default for GeneralConfig { me_warmup_stagger_enabled: true, me_warmup_step_delay_ms: default_warmup_step_delay_ms(), me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(), - me_reconnect_max_concurrent_per_dc: 11, + me_reconnect_max_concurrent_per_dc: 8, me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_fast_retry_count: 8, From 122e4729c50fa969d542be9b5f64feb801727a54 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 00:17:33 +0300 Subject: [PATCH 20/98] Update defaults.rs --- src/config/defaults.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 775692e..13496d0 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -82,7 +82,7 @@ pub(crate) fn default_unknown_dc_log_path() -> Option { } pub(crate) fn default_pool_size() -> usize { - 2 + 8 } pub(crate) fn default_keepalive_interval() -> u64 { @@ -163,11 +163,11 @@ pub(crate) fn default_cache_public_ip_path() -> String { } pub(crate) fn default_proxy_secret_reload_secs() -> u64 { - 12 * 60 * 60 + 2 * 60 * 60 } pub(crate) fn default_proxy_config_reload_secs() -> u64 { - 12 * 60 * 60 + 2 * 60 * 60 } pub(crate) fn default_update_every_secs() -> u64 { @@ -187,7 +187,7 @@ pub(crate) fn default_me_pool_min_fresh_ratio() -> f32 { } pub(crate) fn default_hardswap() -> bool { - false + true } pub(crate) fn default_ntp_check() -> bool { From 8b47fc3575acef0190a4cbb86b1162abd8e38b49 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 02:12:44 +0300 Subject: [PATCH 21/98] Update defaults.rs --- src/config/defaults.rs | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 13496d0..a0443fc 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -143,10 +143,18 @@ pub(crate) fn default_alpn_enforce() -> bool { pub(crate) fn default_stun_servers() -> Vec { vec![ + "stun.l.google.com:5349".to_string(), + "stun1.l.google.com:3478".to_string(), + "stun.gmx.net:3478".to_string(), "stun.l.google.com:19302".to_string(), + "stun.1und1.de:3478".to_string(), "stun1.l.google.com:19302".to_string(), "stun2.l.google.com:19302".to_string(), + "stun3.l.google.com:19302".to_string(), + "stun4.l.google.com:19302".to_string(), + "stun.services.mozilla.com:3478".to_string(), "stun.stunprotocol.org:3478".to_string(), + "stun.nextcloud.com:3478".to_string(), "stun.voip.eutelia.it:3478".to_string(), ] } @@ -163,15 +171,15 @@ pub(crate) fn default_cache_public_ip_path() -> String { } pub(crate) fn default_proxy_secret_reload_secs() -> u64 { - 2 * 60 * 60 + 1 * 60 * 60 } pub(crate) fn default_proxy_config_reload_secs() -> u64 { - 2 * 60 * 60 + 1 * 60 * 60 } pub(crate) fn default_update_every_secs() -> u64 { - 12 * 60 * 60 + 1 * 30 * 60 } pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { From d38d7f2bee1647ac5b6485dbcf58bd4ca562b9a5 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 02:31:12 +0300 Subject: [PATCH 22/98] Update release.yml --- .github/workflows/release.yml | 78 +++++++++++++---------------------- 1 file changed, 28 insertions(+), 50 deletions(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 116c1d4..87a8e30 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -3,11 +3,12 @@ name: Release on: push: tags: - - '[0-9]+.[0-9]+.[0-9]+' # Matches tags like 3.0.0, 3.1.2, etc. - workflow_dispatch: # Manual trigger from GitHub Actions UI + - '[0-9]+.[0-9]+.[0-9]+' + workflow_dispatch: permissions: contents: read + packages: write env: CARGO_TERM_COLOR: always @@ -37,11 +38,9 @@ jobs: asset_name: telemt-aarch64-linux-musl steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@v4 - - name: Install stable Rust toolchain - uses: dtolnay/rust-toolchain@888c2e1ea69ab0d4330cbf0af1ecc7b68f368cc1 # v1 + - uses: dtolnay/rust-toolchain@v1 with: toolchain: stable targets: ${{ matrix.target }} @@ -51,8 +50,7 @@ jobs: sudo apt-get update sudo apt-get install -y gcc-aarch64-linux-gnu - - name: Cache cargo registry & build artifacts - uses: actions/cache@d4323d4df104b026a6aa633fdb11d772146be0bf # v4.2.2 + - uses: actions/cache@v4 with: path: | ~/.cargo/registry @@ -76,8 +74,7 @@ jobs: tar -czvf ${{ matrix.asset_name }}.tar.gz ${{ matrix.artifact_name }} sha256sum ${{ matrix.asset_name }}.tar.gz > ${{ matrix.asset_name }}.sha256 - - name: Upload artifact - uses: actions/upload-artifact@65c4c4a1ddee5b72f698fdd19549f0f0fb45cf08 # v4.6.0 + - uses: actions/upload-artifact@v4 with: name: ${{ matrix.asset_name }} path: | @@ -85,30 +82,37 @@ jobs: target/${{ matrix.target }}/release/${{ matrix.asset_name }}.sha256 build-docker-image: + needs: build runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: - - name: Checkout - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - - name: Set up QEMU - uses: docker/setup-qemu-action@v3 + - uses: docker/setup-qemu-action@v3 + - uses: docker/setup-buildx-action@v3 - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Login to GitHub Container Registry - uses: docker/login-action@v2 + - name: Login to GHCR + uses: docker/login-action@v3 with: registry: ghcr.io - username: ${{ github.repository_owner }} - password: ${{ secrets.TOKEN_GH_DEPLOY }} + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Extract version + id: vars + run: echo "VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_OUTPUT - name: Build and push uses: docker/build-push-action@v6 with: context: . push: true - tags: ${{ github.ref }} + tags: | + ghcr.io/${{ github.repository }}:${{ steps.vars.outputs.VERSION }} + ghcr.io/${{ github.repository }}:latest release: name: Create Release @@ -118,40 +122,14 @@ jobs: contents: write steps: - - name: Checkout repository - uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + - uses: actions/checkout@v4 with: fetch-depth: 0 - token: ${{ secrets.GITHUB_TOKEN }} - - name: Download all artifacts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + - uses: actions/download-artifact@v4 with: path: artifacts - - name: Update version in Cargo.toml and Cargo.lock - run: | - # Extract version from tag (remove 'v' prefix if present) - VERSION="${GITHUB_REF#refs/tags/}" - VERSION="${VERSION#v}" - - # Install cargo-edit for version bumping - cargo install cargo-edit - - # Update Cargo.toml version - cargo set-version "$VERSION" - - # Configure git - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - - # Commit and push changes - #git add Cargo.toml Cargo.lock - #git commit -m "chore: bump version to $VERSION" || echo "No changes to commit" - #git push origin HEAD:main - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - name: Create Release uses: softprops/action-gh-release@v2 with: From 267c8bf2f107686d7e9b8dfa57886f8990ae5f1b Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Tue, 24 Feb 2026 03:03:19 +0300 Subject: [PATCH 23/98] Update config.toml --- config.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config.toml b/config.toml index e146598..22490d9 100644 --- a/config.toml +++ b/config.toml @@ -49,6 +49,10 @@ desync_all_full = false # Emit full crypto-desync forensic log me_reinit_drain_timeout_secs = 300 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. +hardswap = false # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. +me_pool_drain_ttl_secs = 90 # Drain-TTL in seconds for stale ME writers after endpoint map changes. During TTL, stale writers may be used only as fallback for new bindings. +me_pool_min_fresh_ratio = 0.8 # Minimum desired-DC coverage ratio required before draining stale writers. Range: 0.0..=1.0. +me_reinit_drain_timeout_secs = 120 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). [general.modes] classic = false From 68c3abee6cab54f09b115e96750d877ecf4ffedf Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Tue, 24 Feb 2026 03:40:59 +0300 Subject: [PATCH 24/98] fix: eliminate all compiler warnings - Remove unused imports across multiple modules - Add #![allow(dead_code)] for public API items preserved for future use - Add #![allow(deprecated)] for rand::Rng::gen_range usage - Add #![allow(unused_assignments)] in main.rs - Add #![allow(unreachable_code)] in network/stun.rs - Prefix unused variables with underscore (_ip_tracker, _prefer_ipv6) - Fix unused_must_use warning in tls_front/cache.rs This ensures clean compilation without warnings while preserving public API items that may be used in the future. --- .gitignore | 4 ++++ Cargo.lock | 2 +- proxy-secret | 1 + src/config/load.rs | 2 ++ src/crypto/aes.rs | 2 ++ src/crypto/mod.rs | 3 +-- src/crypto/random.rs | 3 +++ src/error.rs | 2 ++ src/ip_tracker.rs | 4 +++- src/main.rs | 3 +++ src/metrics.rs | 2 +- src/network/probe.rs | 2 ++ src/network/stun.rs | 3 +++ src/protocol/constants.rs | 4 +++- src/protocol/frame.rs | 2 ++ src/protocol/mod.rs | 4 ++++ src/protocol/obfuscation.rs | 3 ++- src/protocol/tls.rs | 3 ++- src/proxy/client.rs | 8 ++++---- src/proxy/handshake.rs | 2 ++ src/proxy/mod.rs | 3 +++ src/stats/mod.rs | 3 ++- src/stream/buffer_pool.rs | 2 ++ src/stream/crypto_stream.rs | 4 +++- src/stream/frame.rs | 2 ++ src/stream/frame_codec.rs | 2 ++ src/stream/frame_stream.rs | 4 +++- src/stream/mod.rs | 8 +++++++- src/stream/state.rs | 2 ++ src/stream/tls_stream.rs | 2 ++ src/stream/traits.rs | 2 ++ src/tls_front/cache.rs | 3 ++- src/tls_front/mod.rs | 1 + src/transport/middle_proxy/health.rs | 1 + src/transport/middle_proxy/mod.rs | 2 ++ src/transport/middle_proxy/ping.rs | 1 + src/transport/middle_proxy/pool.rs | 2 ++ src/transport/middle_proxy/pool_nat.rs | 5 ++++- src/transport/middle_proxy/registry.rs | 3 +++ src/transport/middle_proxy/secret.rs | 2 -- src/transport/mod.rs | 4 ++++ src/transport/pool.rs | 4 +++- src/transport/proxy_protocol.rs | 6 ++++++ src/transport/socket.rs | 6 ++++++ src/transport/socks.rs | 2 +- src/transport/upstream.rs | 8 ++++++-- src/util/ip.rs | 18 ++++++++++++++---- src/util/mod.rs | 2 ++ src/util/time.rs | 5 +++++ 49 files changed, 140 insertions(+), 28 deletions(-) create mode 100644 proxy-secret diff --git a/.gitignore b/.gitignore index ad67955..6b5f1d5 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,7 @@ target # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ +*.rs +target +Cargo.lock +src diff --git a/Cargo.lock b/Cargo.lock index fb45f19..251f0b7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2087,7 +2087,7 @@ dependencies = [ [[package]] name = "telemt" -version = "3.0.10" +version = "3.0.13" dependencies = [ "aes", "anyhow", diff --git a/proxy-secret b/proxy-secret new file mode 100644 index 0000000..ef77163 --- /dev/null +++ b/proxy-secret @@ -0,0 +1 @@ +ʖxHl~,D0d]UJUAM'!FnRZD>ϳF>yZfa*ߜڋ o8zM:dq>\3w}n\TĐy'VIil&] \ No newline at end of file diff --git a/src/config/load.rs b/src/config/load.rs index 4d59e60..dce5fbc 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -1,3 +1,5 @@ +#![allow(deprecated)] + use std::collections::HashMap; use std::net::IpAddr; use std::path::Path; diff --git a/src/crypto/aes.rs b/src/crypto/aes.rs index 9e123bf..674e4cb 100644 --- a/src/crypto/aes.rs +++ b/src/crypto/aes.rs @@ -11,6 +11,8 @@ //! `HandshakeSuccess`, `ObfuscationParams`) are responsible for //! zeroizing their own copies. +#![allow(dead_code)] + use aes::Aes256; use ctr::{Ctr128BE, cipher::{KeyIvInit, StreamCipher}}; use zeroize::Zeroize; diff --git a/src/crypto/mod.rs b/src/crypto/mod.rs index 266a3cb..9108f34 100644 --- a/src/crypto/mod.rs +++ b/src/crypto/mod.rs @@ -6,7 +6,6 @@ pub mod random; pub use aes::{AesCtr, AesCbc}; pub use hash::{ - build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, md5, sha1, sha256, - sha256_hmac, + build_middleproxy_prekey, crc32, crc32c, derive_middleproxy_keys, sha256, sha256_hmac, }; pub use random::SecureRandom; diff --git a/src/crypto/random.rs b/src/crypto/random.rs index f3432e0..0dd5f1a 100644 --- a/src/crypto/random.rs +++ b/src/crypto/random.rs @@ -1,5 +1,8 @@ //! Pseudorandom +#![allow(deprecated)] +#![allow(dead_code)] + use rand::{Rng, RngCore, SeedableRng}; use rand::rngs::StdRng; use parking_lot::Mutex; diff --git a/src/error.rs b/src/error.rs index f934672..eaebd88 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,5 +1,7 @@ //! Error Types +#![allow(dead_code)] + use std::fmt; use std::net::SocketAddr; use thiserror::Error; diff --git a/src/ip_tracker.rs b/src/ip_tracker.rs index 17cebc7..32fcbe3 100644 --- a/src/ip_tracker.rs +++ b/src/ip_tracker.rs @@ -1,5 +1,7 @@ // src/ip_tracker.rs -// Модуль для отслеживания и ограничения уникальных IP-адресов пользователей +// IP address tracking and limiting for users + +#![allow(dead_code)] use std::collections::{HashMap, HashSet}; use std::net::IpAddr; diff --git a/src/main.rs b/src/main.rs index 0601215..0d1eccc 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,5 +1,7 @@ //! telemt — Telegram MTProto Proxy +#![allow(unused_assignments)] + use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -417,6 +419,7 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai if me_pool.is_some() { info!("Transport: Middle-End Proxy - all DC-over-RPC"); } else { + let _ = use_middle_proxy; use_middle_proxy = false; // Make runtime config reflect direct-only mode for handlers. config.general.use_middle_proxy = false; diff --git a/src/metrics.rs b/src/metrics.rs index d11c302..620840d 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -2,7 +2,7 @@ use std::convert::Infallible; use std::net::SocketAddr; use std::sync::Arc; -use http_body_util::{Full, BodyExt}; +use http_body_util::Full; use hyper::body::Bytes; use hyper::server::conn::http1; use hyper::service::service_fn; diff --git a/src/network/probe.rs b/src/network/probe.rs index d290ac1..eda69b8 100644 --- a/src/network/probe.rs +++ b/src/network/probe.rs @@ -1,3 +1,5 @@ +#![allow(dead_code)] + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; use tracing::{info, warn}; diff --git a/src/network/stun.rs b/src/network/stun.rs index 6a93339..c47aa49 100644 --- a/src/network/stun.rs +++ b/src/network/stun.rs @@ -1,3 +1,6 @@ +#![allow(unreachable_code)] +#![allow(dead_code)] + use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr}; use tokio::net::{lookup_host, UdpSocket}; diff --git a/src/protocol/constants.rs b/src/protocol/constants.rs index c930a1b..e6ddbaf 100644 --- a/src/protocol/constants.rs +++ b/src/protocol/constants.rs @@ -1,6 +1,8 @@ //! Protocol constants and datacenter addresses -use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; +#![allow(dead_code)] + +use std::net::{IpAddr, Ipv4Addr}; use crate::crypto::SecureRandom; use std::sync::LazyLock; diff --git a/src/protocol/frame.rs b/src/protocol/frame.rs index f4517b4..a332be0 100644 --- a/src/protocol/frame.rs +++ b/src/protocol/frame.rs @@ -1,5 +1,7 @@ //! MTProto frame types and metadata +#![allow(dead_code)] + use std::collections::HashMap; /// Extra metadata associated with a frame diff --git a/src/protocol/mod.rs b/src/protocol/mod.rs index 4081f1c..5518df2 100644 --- a/src/protocol/mod.rs +++ b/src/protocol/mod.rs @@ -5,7 +5,11 @@ pub mod frame; pub mod obfuscation; pub mod tls; +#[allow(unused_imports)] pub use constants::*; +#[allow(unused_imports)] pub use frame::*; +#[allow(unused_imports)] pub use obfuscation::*; +#[allow(unused_imports)] pub use tls::*; \ No newline at end of file diff --git a/src/protocol/obfuscation.rs b/src/protocol/obfuscation.rs index 4d2197d..d9d1c0a 100644 --- a/src/protocol/obfuscation.rs +++ b/src/protocol/obfuscation.rs @@ -1,8 +1,9 @@ //! MTProto Obfuscation +#![allow(dead_code)] + use zeroize::Zeroize; use crate::crypto::{sha256, AesCtr}; -use crate::error::Result; use super::constants::*; /// Obfuscation parameters from handshake diff --git a/src/protocol/tls.rs b/src/protocol/tls.rs index 93d111f..f124c46 100644 --- a/src/protocol/tls.rs +++ b/src/protocol/tls.rs @@ -4,8 +4,9 @@ //! for domain fronting. The handshake looks like valid TLS 1.3 but //! actually carries MTProto authentication data. +#![allow(dead_code)] + use crate::crypto::{sha256_hmac, SecureRandom}; -use crate::error::{ProxyError, Result}; use super::constants::*; use std::time::{SystemTime, UNIX_EPOCH}; use num_bigint::BigUint; diff --git a/src/proxy/client.rs b/src/proxy/client.rs index 14b45da..051ce9e 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -271,7 +271,7 @@ impl RunningClientHandler { self.peer = normalize_ip(self.peer); let peer = self.peer; - let ip_tracker = self.ip_tracker.clone(); + let _ip_tracker = self.ip_tracker.clone(); debug!(peer = %peer, "New connection"); if let Err(e) = configure_client_socket( @@ -331,7 +331,7 @@ impl RunningClientHandler { let is_tls = tls::is_tls_handshake(&first_bytes[..3]); let peer = self.peer; - let ip_tracker = self.ip_tracker.clone(); + let _ip_tracker = self.ip_tracker.clone(); debug!(peer = %peer, is_tls = is_tls, "Handshake type detected"); @@ -344,7 +344,7 @@ impl RunningClientHandler { async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result { let peer = self.peer; - let ip_tracker = self.ip_tracker.clone(); + let _ip_tracker = self.ip_tracker.clone(); let tls_len = u16::from_be_bytes([first_bytes[3], first_bytes[4]]) as usize; @@ -440,7 +440,7 @@ impl RunningClientHandler { async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result { let peer = self.peer; - let ip_tracker = self.ip_tracker.clone(); + let _ip_tracker = self.ip_tracker.clone(); if !self.config.general.modes.classic && !self.config.general.modes.secure { debug!(peer = %peer, "Non-TLS modes disabled"); diff --git a/src/proxy/handshake.rs b/src/proxy/handshake.rs index 750d839..5c63636 100644 --- a/src/proxy/handshake.rs +++ b/src/proxy/handshake.rs @@ -1,5 +1,7 @@ //! MTProto Handshake +#![allow(dead_code)] + use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; diff --git a/src/proxy/mod.rs b/src/proxy/mod.rs index d6243aa..bedae1a 100644 --- a/src/proxy/mod.rs +++ b/src/proxy/mod.rs @@ -8,6 +8,9 @@ pub mod middle_relay; pub mod relay; pub use client::ClientHandler; +#[allow(unused_imports)] pub use handshake::*; +#[allow(unused_imports)] pub use masking::*; +#[allow(unused_imports)] pub use relay::*; diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 1994b36..3169f1d 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -1,7 +1,8 @@ //! Statistics and replay protection +#![allow(dead_code)] + use std::sync::atomic::{AtomicU64, Ordering}; -use std::sync::Arc; use std::time::{Instant, Duration}; use dashmap::DashMap; use parking_lot::Mutex; diff --git a/src/stream/buffer_pool.rs b/src/stream/buffer_pool.rs index 0de5532..9c46922 100644 --- a/src/stream/buffer_pool.rs +++ b/src/stream/buffer_pool.rs @@ -3,6 +3,8 @@ //! This module provides a thread-safe pool of BytesMut buffers //! that can be reused across connections to reduce allocation pressure. +#![allow(dead_code)] + use bytes::BytesMut; use crossbeam_queue::ArrayQueue; use std::ops::{Deref, DerefMut}; diff --git a/src/stream/crypto_stream.rs b/src/stream/crypto_stream.rs index ebb6f43..67d8c95 100644 --- a/src/stream/crypto_stream.rs +++ b/src/stream/crypto_stream.rs @@ -18,6 +18,8 @@ //! is either written to upstream or stored in our pending buffer //! - when upstream is pending -> ciphertext is buffered/bounded and backpressure is applied //! + +#![allow(dead_code)] //! ======================= //! Writer state machine //! ======================= @@ -55,7 +57,7 @@ use std::io::{self, ErrorKind, Result}; use std::pin::Pin; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, AsyncWrite, ReadBuf}; -use tracing::{debug, trace, warn}; +use tracing::{debug, trace}; use crate::crypto::AesCtr; use super::state::{StreamState, YieldBuffer}; diff --git a/src/stream/frame.rs b/src/stream/frame.rs index b97d4cf..5c93ea7 100644 --- a/src/stream/frame.rs +++ b/src/stream/frame.rs @@ -3,6 +3,8 @@ //! This module defines the common types and traits used by all //! frame encoding/decoding implementations. +#![allow(dead_code)] + use bytes::{Bytes, BytesMut}; use std::io::Result; use std::sync::Arc; diff --git a/src/stream/frame_codec.rs b/src/stream/frame_codec.rs index 6b90892..3de8257 100644 --- a/src/stream/frame_codec.rs +++ b/src/stream/frame_codec.rs @@ -3,6 +3,8 @@ //! This module provides Encoder/Decoder implementations compatible //! with tokio-util's Framed wrapper for easy async frame I/O. +#![allow(dead_code)] + use bytes::{Bytes, BytesMut, BufMut}; use std::io::{self, Error, ErrorKind}; use std::sync::Arc; diff --git a/src/stream/frame_stream.rs b/src/stream/frame_stream.rs index 1726a06..b66c2cd 100644 --- a/src/stream/frame_stream.rs +++ b/src/stream/frame_stream.rs @@ -1,6 +1,8 @@ //! MTProto frame stream wrappers -use bytes::{Bytes, BytesMut}; +#![allow(dead_code)] + +use bytes::Bytes; use std::io::{Error, ErrorKind, Result}; use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt}; use crate::protocol::constants::*; diff --git a/src/stream/mod.rs b/src/stream/mod.rs index ea30e5e..a1cff9a 100644 --- a/src/stream/mod.rs +++ b/src/stream/mod.rs @@ -12,28 +12,34 @@ pub mod frame_codec; pub mod frame_stream; // Re-export state machine types +#[allow(unused_imports)] pub use state::{ StreamState, Transition, PollResult, ReadBuffer, WriteBuffer, HeaderBuffer, YieldBuffer, }; // Re-export buffer pool +#[allow(unused_imports)] pub use buffer_pool::{BufferPool, PooledBuffer, PoolStats}; // Re-export stream implementations +#[allow(unused_imports)] pub use crypto_stream::{CryptoReader, CryptoWriter, PassthroughStream}; pub use tls_stream::{FakeTlsReader, FakeTlsWriter}; // Re-export frame types +#[allow(unused_imports)] pub use frame::{Frame, FrameMeta, FrameCodec as FrameCodecTrait, create_codec}; -// Re-export tokio-util compatible codecs +// Re-export tokio-util compatible codecs +#[allow(unused_imports)] pub use frame_codec::{ FrameCodec, AbridgedCodec, IntermediateCodec, SecureCodec, }; // Legacy re-exports for compatibility +#[allow(unused_imports)] pub use frame_stream::{ AbridgedFrameReader, AbridgedFrameWriter, IntermediateFrameReader, IntermediateFrameWriter, diff --git a/src/stream/state.rs b/src/stream/state.rs index c4f52e6..7c57792 100644 --- a/src/stream/state.rs +++ b/src/stream/state.rs @@ -3,6 +3,8 @@ //! This module provides core types and traits for implementing //! stateful async streams with proper partial read/write handling. +#![allow(dead_code)] + use bytes::{Bytes, BytesMut}; use std::io; diff --git a/src/stream/tls_stream.rs b/src/stream/tls_stream.rs index edf970d..fa165db 100644 --- a/src/stream/tls_stream.rs +++ b/src/stream/tls_stream.rs @@ -18,6 +18,8 @@ //! - Explicit state machines for all async operations //! - Never lose data on partial reads //! - Atomic TLS record formation for writes + +#![allow(dead_code)] //! - Proper handling of all TLS record types //! //! Important nuance (Telegram FakeTLS): diff --git a/src/stream/traits.rs b/src/stream/traits.rs index 6419824..f6d7c4f 100644 --- a/src/stream/traits.rs +++ b/src/stream/traits.rs @@ -1,5 +1,7 @@ //! Stream traits and common types +#![allow(dead_code)] + use bytes::Bytes; use std::io::Result; use std::pin::Pin; diff --git a/src/tls_front/cache.rs b/src/tls_front/cache.rs index 15a97af..a425a35 100644 --- a/src/tls_front/cache.rs +++ b/src/tls_front/cache.rs @@ -19,6 +19,7 @@ pub struct TlsFrontCache { disk_path: PathBuf, } +#[allow(dead_code)] impl TlsFrontCache { pub fn new(domains: &[String], default_len: usize, disk_path: impl AsRef) -> Self { let default_template = ParsedServerHello { @@ -173,7 +174,7 @@ impl TlsFrontCache { tokio::spawn(async move { loop { for domain in &domains { - fetcher(domain.clone()).await; + let _ = fetcher(domain.clone()).await; } sleep(interval).await; } diff --git a/src/tls_front/mod.rs b/src/tls_front/mod.rs index 89f3988..311920d 100644 --- a/src/tls_front/mod.rs +++ b/src/tls_front/mod.rs @@ -4,4 +4,5 @@ pub mod fetcher; pub mod emulator; pub use cache::TlsFrontCache; +#[allow(unused_imports)] pub use types::{CachedTlsData, TlsFetchResult}; diff --git a/src/transport/middle_proxy/health.rs b/src/transport/middle_proxy/health.rs index 18814cd..e73e5f1 100644 --- a/src/transport/middle_proxy/health.rs +++ b/src/transport/middle_proxy/health.rs @@ -14,6 +14,7 @@ use super::MePool; const HEALTH_INTERVAL_SECS: u64 = 1; const JITTER_FRAC_NUM: u64 = 2; // jitter up to 50% of backoff +#[allow(dead_code)] const MAX_CONCURRENT_PER_DC_DEFAULT: usize = 1; pub async fn me_health_monitor(pool: Arc, rng: Arc, _min_connections: usize) { diff --git a/src/transport/middle_proxy/mod.rs b/src/transport/middle_proxy/mod.rs index 1027221..f9f8c85 100644 --- a/src/transport/middle_proxy/mod.rs +++ b/src/transport/middle_proxy/mod.rs @@ -17,8 +17,10 @@ mod wire; use bytes::Bytes; pub use health::me_health_monitor; +#[allow(unused_imports)] pub use ping::{run_me_ping, format_sample_line, MePingReport, MePingSample, MePingFamily}; pub use pool::MePool; +#[allow(unused_imports)] pub use pool_nat::{stun_probe, detect_public_ip}; pub use registry::ConnRegistry; pub use secret::fetch_proxy_secret; diff --git a/src/transport/middle_proxy/ping.rs b/src/transport/middle_proxy/ping.rs index 36ef4e7..a1dd1e6 100644 --- a/src/transport/middle_proxy/ping.rs +++ b/src/transport/middle_proxy/ping.rs @@ -24,6 +24,7 @@ pub struct MePingSample { } #[derive(Debug, Clone)] +#[allow(dead_code)] pub struct MePingReport { pub dc: i32, pub family: MePingFamily, diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 8e159db..2047e80 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -36,6 +36,7 @@ pub struct MeWriter { pub allow_drain_fallback: Arc, } +#[allow(dead_code)] pub struct MePool { pub(super) registry: Arc, pub(super) writers: Arc>>, @@ -992,6 +993,7 @@ impl MePool { } +#[allow(dead_code)] fn hex_dump(data: &[u8]) -> String { const MAX: usize = 64; let mut out = String::with_capacity(data.len() * 2 + 3); diff --git a/src/transport/middle_proxy/pool_nat.rs b/src/transport/middle_proxy/pool_nat.rs index d3dec16..4d9e2a1 100644 --- a/src/transport/middle_proxy/pool_nat.rs +++ b/src/transport/middle_proxy/pool_nat.rs @@ -1,7 +1,7 @@ use std::net::{IpAddr, Ipv4Addr}; use std::time::Duration; -use tracing::{info, warn, debug}; +use tracing::{info, warn}; use crate::error::{ProxyError, Result}; use crate::network::probe::is_bogon; @@ -9,11 +9,14 @@ use crate::network::stun::{stun_probe_dual, IpFamily, StunProbeResult}; use super::MePool; use std::time::Instant; + +#[allow(dead_code)] pub async fn stun_probe(stun_addr: Option) -> Result { let stun_addr = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string()); stun_probe_dual(&stun_addr).await } +#[allow(dead_code)] pub async fn detect_public_ip() -> Option { fetch_public_ipv4_with_retry().await.ok().flatten().map(IpAddr::V4) } diff --git a/src/transport/middle_proxy/registry.rs b/src/transport/middle_proxy/registry.rs index 6a9250d..2122ed8 100644 --- a/src/transport/middle_proxy/registry.rs +++ b/src/transport/middle_proxy/registry.rs @@ -21,6 +21,7 @@ pub enum RouteResult { } #[derive(Clone)] +#[allow(dead_code)] pub struct ConnMeta { pub target_dc: i16, pub client_addr: SocketAddr, @@ -29,6 +30,7 @@ pub struct ConnMeta { } #[derive(Clone)] +#[allow(dead_code)] pub struct BoundConn { pub conn_id: u64, pub meta: ConnMeta, @@ -167,6 +169,7 @@ impl ConnRegistry { out } + #[allow(dead_code)] pub async fn get_meta(&self, conn_id: u64) -> Option { let inner = self.inner.read().await; inner.meta.get(&conn_id).cloned() diff --git a/src/transport/middle_proxy/secret.rs b/src/transport/middle_proxy/secret.rs index a9e224d..9641143 100644 --- a/src/transport/middle_proxy/secret.rs +++ b/src/transport/middle_proxy/secret.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - use tracing::{debug, info, warn}; use std::time::SystemTime; use httpdate; diff --git a/src/transport/mod.rs b/src/transport/mod.rs index 51cffa4..ead0565 100644 --- a/src/transport/mod.rs +++ b/src/transport/mod.rs @@ -6,9 +6,13 @@ pub mod socket; pub mod socks; pub mod upstream; +#[allow(unused_imports)] pub use pool::ConnectionPool; +#[allow(unused_imports)] pub use proxy_protocol::{ProxyProtocolInfo, parse_proxy_protocol}; pub use socket::*; +#[allow(unused_imports)] pub use socks::*; +#[allow(unused_imports)] pub use upstream::{DcPingResult, StartupPingResult, UpstreamManager}; pub mod middle_proxy; diff --git a/src/transport/pool.rs b/src/transport/pool.rs index 8d83321..fa9dcad 100644 --- a/src/transport/pool.rs +++ b/src/transport/pool.rs @@ -1,5 +1,7 @@ //! Connection Pool +#![allow(dead_code)] + use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; @@ -8,7 +10,7 @@ use tokio::net::TcpStream; use tokio::sync::Mutex; use tokio::time::timeout; use parking_lot::RwLock; -use tracing::{debug, warn}; +use tracing::debug; use crate::error::{ProxyError, Result}; use super::socket::configure_tcp_socket; diff --git a/src/transport/proxy_protocol.rs b/src/transport/proxy_protocol.rs index 56746c4..770be7e 100644 --- a/src/transport/proxy_protocol.rs +++ b/src/transport/proxy_protocol.rs @@ -28,6 +28,7 @@ mod address_family { /// Information extracted from PROXY protocol header #[derive(Debug, Clone)] +#[allow(dead_code)] pub struct ProxyProtocolInfo { /// Source (client) address pub src_addr: SocketAddr, @@ -37,6 +38,7 @@ pub struct ProxyProtocolInfo { pub version: u8, } +#[allow(dead_code)] impl ProxyProtocolInfo { /// Create info with just source address pub fn new(src_addr: SocketAddr) -> Self { @@ -231,12 +233,14 @@ async fn parse_v2( } /// Builder for PROXY protocol v1 header +#[allow(dead_code)] pub struct ProxyProtocolV1Builder { family: &'static str, src_addr: Option, dst_addr: Option, } +#[allow(dead_code)] impl ProxyProtocolV1Builder { pub fn new() -> Self { Self { @@ -284,11 +288,13 @@ impl Default for ProxyProtocolV1Builder { } /// Builder for PROXY protocol v2 header +#[allow(dead_code)] pub struct ProxyProtocolV2Builder { src: Option, dst: Option, } +#[allow(dead_code)] impl ProxyProtocolV2Builder { pub fn new() -> Self { Self { src: None, dst: None } diff --git a/src/transport/socket.rs b/src/transport/socket.rs index b41cfd1..0a20c3c 100644 --- a/src/transport/socket.rs +++ b/src/transport/socket.rs @@ -10,6 +10,7 @@ use socket2::{Socket, TcpKeepalive, Domain, Type, Protocol}; use tracing::debug; /// Configure TCP socket with recommended settings for proxy use +#[allow(dead_code)] pub fn configure_tcp_socket( stream: &TcpStream, keepalive: bool, @@ -82,6 +83,7 @@ pub fn configure_client_socket( } /// Set socket to send RST on close (for masking) +#[allow(dead_code)] pub fn set_linger_zero(stream: &TcpStream) -> Result<()> { let socket = socket2::SockRef::from(stream); socket.set_linger(Some(Duration::ZERO))?; @@ -89,6 +91,7 @@ pub fn set_linger_zero(stream: &TcpStream) -> Result<()> { } /// Create a new TCP socket for outgoing connections +#[allow(dead_code)] pub fn create_outgoing_socket(addr: SocketAddr) -> Result { create_outgoing_socket_bound(addr, None) } @@ -120,6 +123,7 @@ pub fn create_outgoing_socket_bound(addr: SocketAddr, bind_addr: Option) /// Get local address of a socket +#[allow(dead_code)] pub fn get_local_addr(stream: &TcpStream) -> Option { stream.local_addr().ok() } @@ -157,11 +161,13 @@ pub fn resolve_interface_ip(_name: &str, _want_ipv6: bool) -> Option { } /// Get peer address of a socket +#[allow(dead_code)] pub fn get_peer_addr(stream: &TcpStream) -> Option { stream.peer_addr().ok() } /// Check if address is IPv6 +#[allow(dead_code)] pub fn is_ipv6(addr: &SocketAddr) -> bool { addr.is_ipv6() } diff --git a/src/transport/socks.rs b/src/transport/socks.rs index 35268cb..188d369 100644 --- a/src/transport/socks.rs +++ b/src/transport/socks.rs @@ -1,7 +1,7 @@ //! SOCKS4/5 Client Implementation use std::net::{IpAddr, SocketAddr}; -use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; +use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; use crate::error::{ProxyError, Result}; diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index 6dcc36f..887fa99 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -1,7 +1,9 @@ //! Upstream Management with per-DC latency-weighted selection -//! +//! //! IPv6/IPv4 connectivity checks with configurable preference. +#![allow(deprecated)] + use std::collections::HashMap; use std::net::{SocketAddr, IpAddr}; use std::sync::Arc; @@ -549,7 +551,7 @@ impl UpstreamManager { /// Tests BOTH IPv6 and IPv4, returns separate results for each. pub async fn ping_all_dcs( &self, - prefer_ipv6: bool, + _prefer_ipv6: bool, dc_overrides: &HashMap>, ipv4_enabled: bool, ipv6_enabled: bool, @@ -907,6 +909,7 @@ impl UpstreamManager { } /// Get the preferred IP for a DC (for use by other components) + #[allow(dead_code)] pub async fn get_dc_ip_preference(&self, dc_idx: i16) -> Option { let guard = self.upstreams.read().await; if guard.is_empty() { @@ -918,6 +921,7 @@ impl UpstreamManager { } /// Get preferred DC address based on config preference + #[allow(dead_code)] pub async fn get_dc_addr(&self, dc_idx: i16, prefer_ipv6: bool) -> Option { let arr_idx = UpstreamState::dc_array_idx(dc_idx)?; diff --git a/src/util/ip.rs b/src/util/ip.rs index 9bde513..f3e774f 100644 --- a/src/util/ip.rs +++ b/src/util/ip.rs @@ -1,22 +1,24 @@ //! IP Addr Detect -use std::net::{IpAddr, SocketAddr, UdpSocket}; +use std::net::{IpAddr, UdpSocket}; use std::time::Duration; use tracing::{debug, warn}; /// Detected IP addresses #[derive(Debug, Clone, Default)] +#[allow(dead_code)] pub struct IpInfo { pub ipv4: Option, pub ipv6: Option, } +#[allow(dead_code)] impl IpInfo { /// Check if any IP is detected pub fn has_any(&self) -> bool { self.ipv4.is_some() || self.ipv6.is_some() } - + /// Get preferred IP (IPv6 if available and preferred) pub fn preferred(&self, prefer_ipv6: bool) -> Option { if prefer_ipv6 { @@ -28,12 +30,14 @@ impl IpInfo { } /// URLs for IP detection +#[allow(dead_code)] const IPV4_URLS: &[&str] = &[ "http://v4.ident.me/", "http://ipv4.icanhazip.com/", "http://api.ipify.org/", ]; +#[allow(dead_code)] const IPV6_URLS: &[&str] = &[ "http://v6.ident.me/", "http://ipv6.icanhazip.com/", @@ -42,12 +46,14 @@ const IPV6_URLS: &[&str] = &[ /// Detect local IP address by connecting to a public DNS /// This does not actually send any packets +#[allow(dead_code)] fn get_local_ip(target: &str) -> Option { let socket = UdpSocket::bind("0.0.0.0:0").ok()?; socket.connect(target).ok()?; socket.local_addr().ok().map(|addr| addr.ip()) } +#[allow(dead_code)] fn get_local_ipv6(target: &str) -> Option { let socket = UdpSocket::bind("[::]:0").ok()?; socket.connect(target).ok()?; @@ -55,6 +61,7 @@ fn get_local_ipv6(target: &str) -> Option { } /// Detect public IP addresses +#[allow(dead_code)] pub async fn detect_ip() -> IpInfo { let mut info = IpInfo::default(); @@ -119,6 +126,7 @@ pub async fn detect_ip() -> IpInfo { info } +#[allow(dead_code)] fn is_private_ip(ip: IpAddr) -> bool { match ip { IpAddr::V4(ipv4) => { @@ -131,19 +139,21 @@ fn is_private_ip(ip: IpAddr) -> bool { } /// Fetch IP from URL +#[allow(dead_code)] async fn fetch_ip(url: &str) -> Option { let client = reqwest::Client::builder() .timeout(Duration::from_secs(5)) .build() .ok()?; - + let response = client.get(url).send().await.ok()?; let text = response.text().await.ok()?; - + text.trim().parse().ok() } /// Synchronous IP detection (for startup) +#[allow(dead_code)] pub fn detect_ip_sync() -> IpInfo { tokio::runtime::Handle::current().block_on(detect_ip()) } diff --git a/src/util/mod.rs b/src/util/mod.rs index 5d293d2..8851f51 100644 --- a/src/util/mod.rs +++ b/src/util/mod.rs @@ -3,5 +3,7 @@ pub mod ip; pub mod time; +#[allow(unused_imports)] pub use ip::*; +#[allow(unused_imports)] pub use time::*; \ No newline at end of file diff --git a/src/util/time.rs b/src/util/time.rs index 7db1633..310b015 100644 --- a/src/util/time.rs +++ b/src/util/time.rs @@ -4,11 +4,14 @@ use std::time::Duration; use chrono::{DateTime, Utc}; use tracing::{debug, warn, error}; +#[allow(dead_code)] const TIME_SYNC_URL: &str = "https://core.telegram.org/getProxySecret"; +#[allow(dead_code)] const MAX_TIME_SKEW_SECS: i64 = 30; /// Time sync result #[derive(Debug, Clone)] +#[allow(dead_code)] pub struct TimeSyncResult { pub server_time: DateTime, pub local_time: DateTime, @@ -17,6 +20,7 @@ pub struct TimeSyncResult { } /// Check time synchronization with Telegram servers +#[allow(dead_code)] pub async fn check_time_sync() -> Option { let client = reqwest::Client::builder() .timeout(Duration::from_secs(10)) @@ -60,6 +64,7 @@ pub async fn check_time_sync() -> Option { } /// Background time sync task +#[allow(dead_code)] pub async fn time_sync_task(check_interval: Duration) -> ! { loop { if let Some(result) = check_time_sync().await { From 3397d829241c139320d9315eb75c563479509f9d Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 03:54:17 +0300 Subject: [PATCH 25/98] Apply suggestion from @axkurcom --- config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.toml b/config.toml index 22490d9..375cd7f 100644 --- a/config.toml +++ b/config.toml @@ -49,7 +49,7 @@ desync_all_full = false # Emit full crypto-desync forensic log me_reinit_drain_timeout_secs = 300 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. -hardswap = false # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. +hardswap = true # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. me_pool_drain_ttl_secs = 90 # Drain-TTL in seconds for stale ME writers after endpoint map changes. During TTL, stale writers may be used only as fallback for new bindings. me_pool_min_fresh_ratio = 0.8 # Minimum desired-DC coverage ratio required before draining stale writers. Range: 0.0..=1.0. me_reinit_drain_timeout_secs = 120 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). From 1d71b7e90ccda4aee7730a24698413fa760c285a Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Tue, 24 Feb 2026 04:07:14 +0300 Subject: [PATCH 26/98] fix: add missing imports in test code - Add ProxyError import and fix Result type annotation in tls.rs - Add Arc import in stats/mod.rs test module - Add BodyExt import in metrics.rs test module These imports were missing causing compilation failures in cargo test --release with 10 errors. --- src/metrics.rs | 1 + src/protocol/tls.rs | 3 ++- src/stats/mod.rs | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/src/metrics.rs b/src/metrics.rs index 620840d..53ddd5d 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -229,6 +229,7 @@ fn render_metrics(stats: &Stats) -> String { #[cfg(test)] mod tests { use super::*; + use http_body_util::BodyExt; #[test] fn test_render_metrics_format() { diff --git a/src/protocol/tls.rs b/src/protocol/tls.rs index f124c46..c02c8b4 100644 --- a/src/protocol/tls.rs +++ b/src/protocol/tls.rs @@ -7,6 +7,7 @@ #![allow(dead_code)] use crate::crypto::{sha256_hmac, SecureRandom}; +use crate::error::ProxyError; use super::constants::*; use std::time::{SystemTime, UNIX_EPOCH}; use num_bigint::BigUint; @@ -614,7 +615,7 @@ pub fn parse_tls_record_header(header: &[u8; 5]) -> Option<(u8, u16)> { /// /// This is useful for testing that our ServerHello is well-formed. #[cfg(test)] -fn validate_server_hello_structure(data: &[u8]) -> Result<()> { +fn validate_server_hello_structure(data: &[u8]) -> Result<(), ProxyError> { if data.len() < 5 { return Err(ProxyError::InvalidTlsRecord { record_type: 0, diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 3169f1d..31e9d4f 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -497,6 +497,7 @@ impl ReplayStats { #[cfg(test)] mod tests { use super::*; + use std::sync::Arc; #[test] fn test_stats_shared_counters() { From d6214c6bbf922d34ab304c9ef51ab282f9122de0 Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Tue, 24 Feb 2026 04:20:30 +0300 Subject: [PATCH 27/98] fix: add #[cfg(test)] to unused ProxyError import The ProxyError import in tls.rs is only used in test code (validate_server_hello_structure function), so guard it with #[cfg(test)] to eliminate the unused import warning. --- src/protocol/tls.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/protocol/tls.rs b/src/protocol/tls.rs index c02c8b4..091092a 100644 --- a/src/protocol/tls.rs +++ b/src/protocol/tls.rs @@ -7,6 +7,7 @@ #![allow(dead_code)] use crate::crypto::{sha256_hmac, SecureRandom}; +#[cfg(test)] use crate::error::ProxyError; use super::constants::*; use std::time::{SystemTime, UNIX_EPOCH}; From 09f56dede221b90161c72d480cb0c58b0c124f23 Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Tue, 24 Feb 2026 05:57:53 +0300 Subject: [PATCH 28/98] fix: resolve clippy warnings Reduce clippy warnings from54 to16 by fixing mechanical issues: - collapsible_if: collapse nested if-let chains with let-chains - clone_on_copy: remove unnecessary .clone() on Copy types - manual_clamp: replace .max().min() with .clamp() - unnecessary_cast: remove redundant type casts - collapsible_else_if: flatten else-if chains - contains_vs_iter_any: replace .iter().any() with .contains() - unnecessary_closure: replace .or_else(|| x) with .or(x) - useless_conversion: remove redundant .into() calls - is_none_or: replace .map_or(true, ...) with .is_none_or(...) - while_let_loop: convert loop with if-let-break to while-let Remaining16 warnings are design-level issues (too_many_arguments, await_holding_lock, type_complexity, new_ret_no_self) that require architectural changes to fix. --- src/config/defaults.rs | 6 +- src/config/load.rs | 26 ++++---- src/config/types.rs | 9 +-- src/crypto/aes.rs | 16 ++--- src/crypto/hash.rs | 2 + src/crypto/random.rs | 2 +- src/error.rs | 11 +--- src/main.rs | 16 +++-- src/network/probe.rs | 29 +++++---- src/network/stun.rs | 9 +-- src/protocol/constants.rs | 5 +- src/protocol/frame.rs | 2 +- src/protocol/tls.rs | 14 ++--- src/proxy/client.rs | 38 ++++++------ src/proxy/direct_relay.rs | 8 +-- src/proxy/masking.rs | 10 ++-- src/proxy/middle_relay.rs | 18 +++--- src/stats/mod.rs | 8 +-- src/stream/crypto_stream.rs | 13 ++-- src/stream/frame_codec.rs | 2 +- src/stream/frame_stream.rs | 4 +- src/stream/tls_stream.rs | 16 ++--- src/tls_front/cache.rs | 50 ++++++++-------- src/tls_front/emulator.rs | 20 +++---- src/tls_front/fetcher.rs | 2 +- src/transport/middle_proxy/codec.rs | 5 +- src/transport/middle_proxy/config_updater.rs | 44 +++++++------- src/transport/middle_proxy/handshake.rs | 24 ++++---- src/transport/middle_proxy/health.rs | 8 +-- src/transport/middle_proxy/pool.rs | 62 +++++++++----------- src/transport/middle_proxy/pool_nat.rs | 32 +++++----- src/transport/middle_proxy/secret.rs | 26 ++++---- src/transport/middle_proxy/send.rs | 10 ++-- src/transport/socket.rs | 20 +++---- src/transport/socks.rs | 24 ++++---- src/transport/upstream.rs | 25 ++++---- src/util/ip.rs | 62 ++++++++++---------- src/util/time.rs | 16 ++--- 38 files changed, 336 insertions(+), 358 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index a0443fc..b9b0da1 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -171,15 +171,15 @@ pub(crate) fn default_cache_public_ip_path() -> String { } pub(crate) fn default_proxy_secret_reload_secs() -> u64 { - 1 * 60 * 60 + 60 * 60 } pub(crate) fn default_proxy_config_reload_secs() -> u64 { - 1 * 60 * 60 + 60 * 60 } pub(crate) fn default_update_every_secs() -> u64 { - 1 * 30 * 60 + 30 * 60 } pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { diff --git a/src/config/load.rs b/src/config/load.rs index dce5fbc..750e0dc 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -278,23 +278,25 @@ impl ProxyConfig { reuse_allow: false, }); } - if let Some(ipv6_str) = &config.server.listen_addr_ipv6 { - if let Ok(ipv6) = ipv6_str.parse::() { - config.server.listeners.push(ListenerConfig { - ip: ipv6, - announce: None, - announce_ip: None, - proxy_protocol: None, - reuse_allow: false, - }); - } + if let Some(ipv6_str) = &config.server.listen_addr_ipv6 + && let Ok(ipv6) = ipv6_str.parse::() + { + config.server.listeners.push(ListenerConfig { + ip: ipv6, + announce: None, + announce_ip: None, + proxy_protocol: None, + reuse_allow: false, + }); } } // Migration: announce_ip → announce for each listener. for listener in &mut config.server.listeners { - if listener.announce.is_none() && listener.announce_ip.is_some() { - listener.announce = Some(listener.announce_ip.unwrap().to_string()); + if listener.announce.is_none() + && let Some(ip) = listener.announce_ip.take() + { + listener.announce = Some(ip.to_string()); } } diff --git a/src/config/types.rs b/src/config/types.rs index 8d573df..c9ceea4 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -677,9 +677,10 @@ pub struct ListenerConfig { /// - `show_link = "*"` — show links for all users /// - `show_link = ["a", "b"]` — show links for specific users /// - omitted — show no links (default) -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default)] pub enum ShowLink { /// Don't show any links (default when omitted). + #[default] None, /// Show links for all configured users. All, @@ -687,12 +688,6 @@ pub enum ShowLink { Specific(Vec), } -impl Default for ShowLink { - fn default() -> Self { - ShowLink::None - } -} - impl ShowLink { /// Returns true if no links should be shown. pub fn is_empty(&self) -> bool { diff --git a/src/crypto/aes.rs b/src/crypto/aes.rs index 674e4cb..deda730 100644 --- a/src/crypto/aes.rs +++ b/src/crypto/aes.rs @@ -23,13 +23,13 @@ type Aes256Ctr = Ctr128BE; // ============= AES-256-CTR ============= /// AES-256-CTR encryptor/decryptor -/// +/// /// CTR mode is symmetric — encryption and decryption are the same operation. /// /// **Zeroize note:** The inner `Aes256Ctr` cipher state (expanded key schedule -/// + counter) is opaque and cannot be zeroized. If you need to protect key -/// material, zeroize the `[u8; 32]` key and `u128` IV at the call site -/// before dropping them. +/// + counter) is opaque and cannot be zeroized. If you need to protect key +/// material, zeroize the `[u8; 32]` key and `u128` IV at the call site +/// before dropping them. pub struct AesCtr { cipher: Aes256Ctr, } @@ -149,7 +149,7 @@ impl AesCbc { /// /// CBC Encryption: C[i] = AES_Encrypt(P[i] XOR C[i-1]), where C[-1] = IV pub fn encrypt(&self, data: &[u8]) -> Result> { - if data.len() % Self::BLOCK_SIZE != 0 { + if !data.len().is_multiple_of(Self::BLOCK_SIZE) { return Err(ProxyError::Crypto( format!("CBC data must be aligned to 16 bytes, got {}", data.len()) )); @@ -180,7 +180,7 @@ impl AesCbc { /// /// CBC Decryption: P[i] = AES_Decrypt(C[i]) XOR C[i-1], where C[-1] = IV pub fn decrypt(&self, data: &[u8]) -> Result> { - if data.len() % Self::BLOCK_SIZE != 0 { + if !data.len().is_multiple_of(Self::BLOCK_SIZE) { return Err(ProxyError::Crypto( format!("CBC data must be aligned to 16 bytes, got {}", data.len()) )); @@ -209,7 +209,7 @@ impl AesCbc { /// Encrypt data in-place pub fn encrypt_in_place(&self, data: &mut [u8]) -> Result<()> { - if data.len() % Self::BLOCK_SIZE != 0 { + if !data.len().is_multiple_of(Self::BLOCK_SIZE) { return Err(ProxyError::Crypto( format!("CBC data must be aligned to 16 bytes, got {}", data.len()) )); @@ -242,7 +242,7 @@ impl AesCbc { /// Decrypt data in-place pub fn decrypt_in_place(&self, data: &mut [u8]) -> Result<()> { - if data.len() % Self::BLOCK_SIZE != 0 { + if !data.len().is_multiple_of(Self::BLOCK_SIZE) { return Err(ProxyError::Crypto( format!("CBC data must be aligned to 16 bytes, got {}", data.len()) )); diff --git a/src/crypto/hash.rs b/src/crypto/hash.rs index d3f6f55..fa3e441 100644 --- a/src/crypto/hash.rs +++ b/src/crypto/hash.rs @@ -64,6 +64,7 @@ pub fn crc32c(data: &[u8]) -> u32 { /// /// Returned buffer layout (IPv4): /// nonce_srv | nonce_clt | clt_ts | srv_ip | clt_port | purpose | clt_ip | srv_port | secret | nonce_srv | [clt_v6 | srv_v6] | nonce_clt +#[allow(clippy::too_many_arguments)] pub fn build_middleproxy_prekey( nonce_srv: &[u8; 16], nonce_clt: &[u8; 16], @@ -108,6 +109,7 @@ pub fn build_middleproxy_prekey( /// Uses MD5 + SHA-1 as mandated by the Telegram Middle Proxy protocol. /// These algorithms are NOT replaceable here — changing them would break /// interoperability with Telegram's middle proxy infrastructure. +#[allow(clippy::too_many_arguments)] pub fn derive_middleproxy_keys( nonce_srv: &[u8; 16], nonce_clt: &[u8; 16], diff --git a/src/crypto/random.rs b/src/crypto/random.rs index 0dd5f1a..6313610 100644 --- a/src/crypto/random.rs +++ b/src/crypto/random.rs @@ -95,7 +95,7 @@ impl SecureRandom { return 0; } - let bytes_needed = (k + 7) / 8; + let bytes_needed = k.div_ceil(8); let bytes = self.bytes(bytes_needed.min(8)); let mut result = 0u64; diff --git a/src/error.rs b/src/error.rs index eaebd88..e4d66b9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -91,7 +91,7 @@ impl From for std::io::Error { std::io::Error::new(std::io::ErrorKind::UnexpectedEof, err) } StreamError::Poisoned { .. } => { - std::io::Error::new(std::io::ErrorKind::Other, err) + std::io::Error::other(err) } StreamError::BufferOverflow { .. } => { std::io::Error::new(std::io::ErrorKind::OutOfMemory, err) @@ -100,7 +100,7 @@ impl From for std::io::Error { std::io::Error::new(std::io::ErrorKind::InvalidData, err) } StreamError::PartialRead { .. } | StreamError::PartialWrite { .. } => { - std::io::Error::new(std::io::ErrorKind::Other, err) + std::io::Error::other(err) } } } @@ -135,12 +135,7 @@ impl Recoverable for StreamError { } fn can_continue(&self) -> bool { - match self { - Self::Poisoned { .. } => false, - Self::UnexpectedEof => false, - Self::BufferOverflow { .. } => false, - _ => true, - } + !matches!(self, Self::Poisoned { .. } | Self::UnexpectedEof | Self::BufferOverflow { .. }) } } diff --git a/src/main.rs b/src/main.rs index 0d1eccc..7264239 100644 --- a/src/main.rs +++ b/src/main.rs @@ -301,7 +301,7 @@ async fn main() -> std::result::Result<(), Box> { match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).await { Ok(proxy_secret) => { info!( - secret_len = proxy_secret.len() as usize, // ← ЯВНЫЙ ТИП usize + secret_len = proxy_secret.len(), key_sig = format_args!( "0x{:08x}", if proxy_secret.len() >= 4 { @@ -597,14 +597,12 @@ match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).awai } else { info!(" IPv4 in use / IPv6 is fallback"); } - } else { - if v6_works && !v4_works { - info!(" IPv6 only / IPv4 unavailable)"); - } else if v4_works && !v6_works { - info!(" IPv4 only / IPv6 unavailable)"); - } else if !v6_works && !v4_works { - info!(" No DC connectivity"); - } + } else if v6_works && !v4_works { + info!(" IPv6 only / IPv4 unavailable)"); + } else if v4_works && !v6_works { + info!(" IPv4 only / IPv6 unavailable)"); + } else if !v6_works && !v4_works { + info!(" No DC connectivity"); } info!(" via {}", upstream_result.upstream_name); diff --git a/src/network/probe.rs b/src/network/probe.rs index eda69b8..c52b340 100644 --- a/src/network/probe.rs +++ b/src/network/probe.rs @@ -95,23 +95,21 @@ pub async fn run_probe(config: &NetworkConfig, stun_addr: Option, nat_pr } pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision { - let mut decision = NetworkDecision::default(); + let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some(); + let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some(); - decision.ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some(); - decision.ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some(); - - decision.ipv4_me = config.ipv4 + let ipv4_me = config.ipv4 && probe.detected_ipv4.is_some() && (!probe.ipv4_is_bogon || probe.reflected_ipv4.is_some()); let ipv6_enabled = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()); - decision.ipv6_me = ipv6_enabled + let ipv6_me = ipv6_enabled && probe.detected_ipv6.is_some() && (!probe.ipv6_is_bogon || probe.reflected_ipv6.is_some()); - decision.effective_prefer = match config.prefer { - 6 if decision.ipv6_me || decision.ipv6_dc => 6, - 4 if decision.ipv4_me || decision.ipv4_dc => 4, + let effective_prefer = match config.prefer { + 6 if ipv6_me || ipv6_dc => 6, + 4 if ipv4_me || ipv4_dc => 4, 6 => { warn!("prefer=6 requested but IPv6 unavailable; falling back to IPv4"); 4 @@ -119,10 +117,17 @@ pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) _ => 4, }; - let me_families = decision.ipv4_me as u8 + decision.ipv6_me as u8; - decision.effective_multipath = config.multipath && me_families >= 2; + let me_families = ipv4_me as u8 + ipv6_me as u8; + let effective_multipath = config.multipath && me_families >= 2; - decision + NetworkDecision { + ipv4_dc, + ipv6_dc, + ipv4_me, + ipv6_me, + effective_prefer, + effective_multipath, + } } fn detect_local_ip_v4() -> Option { diff --git a/src/network/stun.rs b/src/network/stun.rs index c47aa49..5bda495 100644 --- a/src/network/stun.rs +++ b/src/network/stun.rs @@ -198,16 +198,11 @@ async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result true, - (false, IpFamily::V6) => true, - _ => false, - }) - .next(); + .find(|a| matches!((a.is_ipv4(), family), (true, IpFamily::V4) | (false, IpFamily::V6))); Ok(target) } diff --git a/src/protocol/constants.rs b/src/protocol/constants.rs index e6ddbaf..9e79206 100644 --- a/src/protocol/constants.rs +++ b/src/protocol/constants.rs @@ -160,7 +160,7 @@ pub const MAX_TLS_CHUNK_SIZE: usize = 16384 + 256; /// Secure Intermediate payload is expected to be 4-byte aligned. pub fn is_valid_secure_payload_len(data_len: usize) -> bool { - data_len % 4 == 0 + data_len.is_multiple_of(4) } /// Compute Secure Intermediate payload length from wire length. @@ -179,7 +179,7 @@ pub fn secure_padding_len(data_len: usize, rng: &SecureRandom) -> usize { is_valid_secure_payload_len(data_len), "Secure payload must be 4-byte aligned, got {data_len}" ); - (rng.range(3) + 1) as usize + rng.range(3) + 1 } // ============= Timeouts ============= @@ -231,7 +231,6 @@ pub static RESERVED_NONCE_CONTINUES: &[[u8; 4]] = &[ // ============= RPC Constants (for Middle Proxy) ============= /// RPC Proxy Request - /// RPC Flags (from Erlang mtp_rpc.erl) pub const RPC_FLAG_NOT_ENCRYPTED: u32 = 0x2; pub const RPC_FLAG_HAS_AD_TAG: u32 = 0x8; diff --git a/src/protocol/frame.rs b/src/protocol/frame.rs index a332be0..dd59ba9 100644 --- a/src/protocol/frame.rs +++ b/src/protocol/frame.rs @@ -85,7 +85,7 @@ impl FrameMode { pub fn validate_message_length(len: usize) -> bool { use super::constants::{MIN_MSG_LEN, MAX_MSG_LEN, PADDING_FILLER}; - len >= MIN_MSG_LEN && len <= MAX_MSG_LEN && len % PADDING_FILLER.len() == 0 + (MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) && len.is_multiple_of(PADDING_FILLER.len()) } #[cfg(test)] diff --git a/src/protocol/tls.rs b/src/protocol/tls.rs index 091092a..fbe7ad5 100644 --- a/src/protocol/tls.rs +++ b/src/protocol/tls.rs @@ -335,7 +335,7 @@ pub fn validate_tls_handshake( // This is a quirk in some clients that use uptime instead of real time let is_boot_time = timestamp < 60 * 60 * 24 * 1000; // < ~2.7 years in seconds - if !is_boot_time && (time_diff < TIME_SKEW_MIN || time_diff > TIME_SKEW_MAX) { + if !is_boot_time && !(TIME_SKEW_MIN..=TIME_SKEW_MAX).contains(&time_diff) { continue; } } @@ -393,7 +393,7 @@ pub fn build_server_hello( ) -> Vec { const MIN_APP_DATA: usize = 64; const MAX_APP_DATA: usize = 16640; // RFC 8446 §5.2 upper bound - let fake_cert_len = fake_cert_len.max(MIN_APP_DATA).min(MAX_APP_DATA); + let fake_cert_len = fake_cert_len.clamp(MIN_APP_DATA, MAX_APP_DATA); let x25519_key = gen_fake_x25519_key(rng); // Build ServerHello @@ -525,10 +525,10 @@ pub fn extract_sni_from_client_hello(handshake: &[u8]) -> Option { if sn_pos + name_len > sn_end { break; } - if name_type == 0 && name_len > 0 { - if let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len]) { - return Some(host.to_string()); - } + if name_type == 0 && name_len > 0 + && let Ok(host) = std::str::from_utf8(&handshake[sn_pos..sn_pos + name_len]) + { + return Some(host.to_string()); } sn_pos += name_len; } @@ -571,7 +571,7 @@ pub fn extract_alpn_from_client_hello(handshake: &[u8]) -> Vec> { let list_len = u16::from_be_bytes([handshake[pos], handshake[pos+1]]) as usize; let mut lp = pos + 2; let list_end = (pos + 2).saturating_add(list_len).min(pos + elen); - while lp + 1 <= list_end { + while lp < list_end { let plen = handshake[lp] as usize; lp += 1; if lp + plen > list_end { break; } diff --git a/src/proxy/client.rs b/src/proxy/client.rs index 051ce9e..483f6e0 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -594,18 +594,18 @@ impl RunningClientHandler { peer_addr: SocketAddr, ip_tracker: &UserIpTracker, ) -> Result<()> { - if let Some(expiration) = config.access.user_expirations.get(user) { - if chrono::Utc::now() > *expiration { - return Err(ProxyError::UserExpired { - user: user.to_string(), - }); - } + if let Some(expiration) = config.access.user_expirations.get(user) + && chrono::Utc::now() > *expiration + { + return Err(ProxyError::UserExpired { + user: user.to_string(), + }); } // IP limit check if let Err(reason) = ip_tracker.check_and_add(user, peer_addr.ip()).await { warn!( - user = %user, + user = %user, ip = %peer_addr.ip(), reason = %reason, "IP limit exceeded" @@ -615,20 +615,20 @@ impl RunningClientHandler { }); } - if let Some(limit) = config.access.user_max_tcp_conns.get(user) { - if stats.get_user_curr_connects(user) >= *limit as u64 { - return Err(ProxyError::ConnectionLimitExceeded { - user: user.to_string(), - }); - } + if let Some(limit) = config.access.user_max_tcp_conns.get(user) + && stats.get_user_curr_connects(user) >= *limit as u64 + { + return Err(ProxyError::ConnectionLimitExceeded { + user: user.to_string(), + }); } - if let Some(quota) = config.access.user_data_quota.get(user) { - if stats.get_user_total_octets(user) >= *quota { - return Err(ProxyError::DataQuotaExceeded { - user: user.to_string(), - }); - } + if let Some(quota) = config.access.user_data_quota.get(user) + && stats.get_user_total_octets(user) >= *quota + { + return Err(ProxyError::DataQuotaExceeded { + user: user.to_string(), + }); } Ok(()) diff --git a/src/proxy/direct_relay.rs b/src/proxy/direct_relay.rs index 630937b..e50623d 100644 --- a/src/proxy/direct_relay.rs +++ b/src/proxy/direct_relay.rs @@ -118,10 +118,10 @@ fn get_dc_addr_static(dc_idx: i16, config: &ProxyConfig) -> Result { // Unknown DC requested by client without override: log and fall back. if !config.dc_overrides.contains_key(&dc_key) { warn!(dc_idx = dc_idx, "Requested non-standard DC with no override; falling back to default cluster"); - if let Some(path) = &config.general.unknown_dc_log_path { - if let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) { - let _ = writeln!(file, "dc_idx={dc_idx}"); - } + if let Some(path) = &config.general.unknown_dc_log_path + && let Ok(mut file) = OpenOptions::new().create(true).append(true).open(path) + { + let _ = writeln!(file, "dc_idx={dc_idx}"); } } diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index 78ef806..72175fe 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -19,12 +19,12 @@ const MASK_BUFFER_SIZE: usize = 8192; /// Detect client type based on initial data fn detect_client_type(data: &[u8]) -> &'static str { // Check for HTTP request - if data.len() > 4 { - if data.starts_with(b"GET ") || data.starts_with(b"POST") || + if data.len() > 4 + && (data.starts_with(b"GET ") || data.starts_with(b"POST") || data.starts_with(b"HEAD") || data.starts_with(b"PUT ") || - data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS") { - return "HTTP"; - } + data.starts_with(b"DELETE") || data.starts_with(b"OPTIONS")) + { + return "HTTP"; } // Check for TLS ClientHello (0x16 = handshake, 0x03 0x01-0x03 = TLS version) diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs index a6a11e1..f089442 100644 --- a/src/proxy/middle_relay.rs +++ b/src/proxy/middle_relay.rs @@ -393,13 +393,13 @@ where .unwrap_or_else(|e| Err(ProxyError::Proxy(format!("ME writer join error: {e}")))); // When client closes, but ME channel stopped as unregistered - it isnt error - if client_closed { - if matches!( + if client_closed + && matches!( writer_result, Err(ProxyError::Proxy(ref msg)) if msg == "ME connection lost" - ) { - writer_result = Ok(()); - } + ) + { + writer_result = Ok(()); } let result = match (main_result, c2me_result, writer_result) { @@ -549,7 +549,7 @@ where match proto_tag { ProtoTag::Abridged => { - if data.len() % 4 != 0 { + if !data.len().is_multiple_of(4) { return Err(ProxyError::Proxy(format!( "Abridged payload must be 4-byte aligned, got {}", data.len() @@ -567,7 +567,7 @@ where frame_buf.push(first); frame_buf.extend_from_slice(data); client_writer - .write_all(&frame_buf) + .write_all(frame_buf) .await .map_err(ProxyError::Io)?; } else if len_words < (1 << 24) { @@ -581,7 +581,7 @@ where frame_buf.extend_from_slice(&[first, lw[0], lw[1], lw[2]]); frame_buf.extend_from_slice(data); client_writer - .write_all(&frame_buf) + .write_all(frame_buf) .await .map_err(ProxyError::Io)?; } else { @@ -618,7 +618,7 @@ where rng.fill(&mut frame_buf[start..]); } client_writer - .write_all(&frame_buf) + .write_all(frame_buf) .await .map_err(ProxyError::Io)?; } diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 31e9d4f..a58996d 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -326,10 +326,10 @@ impl ReplayShard { // Use key.as_ref() to get &[u8] — avoids Borrow ambiguity // between Borrow<[u8]> and Borrow> - if let Some(entry) = self.cache.peek(key.as_ref()) { - if entry.seq == queue_seq { - self.cache.pop(key.as_ref()); - } + if let Some(entry) = self.cache.peek(key.as_ref()) + && entry.seq == queue_seq + { + self.cache.pop(key.as_ref()); } } } diff --git a/src/stream/crypto_stream.rs b/src/stream/crypto_stream.rs index 67d8c95..5303fe5 100644 --- a/src/stream/crypto_stream.rs +++ b/src/stream/crypto_stream.rs @@ -47,7 +47,7 @@ //! - when upstream is Pending but pending still has room: accept `to_accept` bytes and //! encrypt+append ciphertext directly into pending (in-place encryption of appended range) -//! Encrypted stream wrappers using AES-CTR +//! Encrypted stream wrappers using AES-CTR //! //! This module provides stateful async stream wrappers that handle //! encryption/decryption with proper partial read/write handling. @@ -153,9 +153,9 @@ impl CryptoReader { fn take_poison_error(&mut self) -> io::Error { match &mut self.state { CryptoReaderState::Poisoned { error } => error.take().unwrap_or_else(|| { - io::Error::new(ErrorKind::Other, "stream previously poisoned") + io::Error::other("stream previously poisoned") }), - _ => io::Error::new(ErrorKind::Other, "stream not poisoned"), + _ => io::Error::other("stream not poisoned"), } } } @@ -168,6 +168,7 @@ impl AsyncRead for CryptoReader { ) -> Poll> { let this = self.get_mut(); + #[allow(clippy::never_loop)] loop { match &mut this.state { CryptoReaderState::Poisoned { .. } => { @@ -485,14 +486,14 @@ impl CryptoWriter { fn take_poison_error(&mut self) -> io::Error { match &mut self.state { CryptoWriterState::Poisoned { error } => error.take().unwrap_or_else(|| { - io::Error::new(ErrorKind::Other, "stream previously poisoned") + io::Error::other("stream previously poisoned") }), - _ => io::Error::new(ErrorKind::Other, "stream not poisoned"), + _ => io::Error::other("stream not poisoned"), } } /// Ensure we are in Flushing state and return mutable pending buffer. - fn ensure_pending<'a>(state: &'a mut CryptoWriterState, max_pending: usize) -> &'a mut PendingCiphertext { + fn ensure_pending(state: &mut CryptoWriterState, max_pending: usize) -> &mut PendingCiphertext { if matches!(state, CryptoWriterState::Idle) { *state = CryptoWriterState::Flushing { pending: PendingCiphertext::new(max_pending), diff --git a/src/stream/frame_codec.rs b/src/stream/frame_codec.rs index 3de8257..2ff7de7 100644 --- a/src/stream/frame_codec.rs +++ b/src/stream/frame_codec.rs @@ -139,7 +139,7 @@ fn encode_abridged(frame: &Frame, dst: &mut BytesMut) -> io::Result<()> { let data = &frame.data; // Validate alignment - if data.len() % 4 != 0 { + if !data.len().is_multiple_of(4) { return Err(Error::new( ErrorKind::InvalidInput, format!("abridged frame must be 4-byte aligned, got {} bytes", data.len()) diff --git a/src/stream/frame_stream.rs b/src/stream/frame_stream.rs index b66c2cd..c729162 100644 --- a/src/stream/frame_stream.rs +++ b/src/stream/frame_stream.rs @@ -78,7 +78,7 @@ impl AbridgedFrameWriter { impl AbridgedFrameWriter { /// Write a frame pub async fn write_frame(&mut self, data: &[u8], meta: &FrameMeta) -> Result<()> { - if data.len() % 4 != 0 { + if !data.len().is_multiple_of(4) { return Err(Error::new( ErrorKind::InvalidInput, format!("Abridged frame must be aligned to 4 bytes, got {}", data.len()), @@ -331,7 +331,7 @@ impl MtprotoFrameReader { } // Validate length - if len < MIN_MSG_LEN || len > MAX_MSG_LEN || len % PADDING_FILLER.len() != 0 { + if !(MIN_MSG_LEN..=MAX_MSG_LEN).contains(&len) || !len.is_multiple_of(PADDING_FILLER.len()) { return Err(Error::new( ErrorKind::InvalidData, format!("Invalid message length: {}", len), diff --git a/src/stream/tls_stream.rs b/src/stream/tls_stream.rs index fa165db..fe28542 100644 --- a/src/stream/tls_stream.rs +++ b/src/stream/tls_stream.rs @@ -135,7 +135,7 @@ impl TlsRecordHeader { } /// Build header bytes - fn to_bytes(&self) -> [u8; 5] { + fn to_bytes(self) -> [u8; 5] { [ self.record_type, self.version[0], @@ -260,9 +260,9 @@ impl FakeTlsReader { fn take_poison_error(&mut self) -> io::Error { match &mut self.state { TlsReaderState::Poisoned { error } => error.take().unwrap_or_else(|| { - io::Error::new(ErrorKind::Other, "stream previously poisoned") + io::Error::other("stream previously poisoned") }), - _ => io::Error::new(ErrorKind::Other, "stream not poisoned"), + _ => io::Error::other("stream not poisoned"), } } } @@ -297,7 +297,7 @@ impl AsyncRead for FakeTlsReader { TlsReaderState::Poisoned { error } => { this.state = TlsReaderState::Poisoned { error: None }; let err = error.unwrap_or_else(|| { - io::Error::new(ErrorKind::Other, "stream previously poisoned") + io::Error::other("stream previously poisoned") }); return Poll::Ready(Err(err)); } @@ -616,9 +616,9 @@ impl FakeTlsWriter { fn take_poison_error(&mut self) -> io::Error { match &mut self.state { TlsWriterState::Poisoned { error } => error.take().unwrap_or_else(|| { - io::Error::new(ErrorKind::Other, "stream previously poisoned") + io::Error::other("stream previously poisoned") }), - _ => io::Error::new(ErrorKind::Other, "stream not poisoned"), + _ => io::Error::other("stream not poisoned"), } } @@ -682,7 +682,7 @@ impl AsyncWrite for FakeTlsWriter { TlsWriterState::Poisoned { error } => { this.state = TlsWriterState::Poisoned { error: None }; let err = error.unwrap_or_else(|| { - Error::new(ErrorKind::Other, "stream previously poisoned") + Error::other("stream previously poisoned") }); return Poll::Ready(Err(err)); } @@ -771,7 +771,7 @@ impl AsyncWrite for FakeTlsWriter { TlsWriterState::Poisoned { error } => { this.state = TlsWriterState::Poisoned { error: None }; let err = error.unwrap_or_else(|| { - Error::new(ErrorKind::Other, "stream previously poisoned") + Error::other("stream previously poisoned") }); return Poll::Ready(Err(err)); } diff --git a/src/tls_front/cache.rs b/src/tls_front/cache.rs index a425a35..23e60db 100644 --- a/src/tls_front/cache.rs +++ b/src/tls_front/cache.rs @@ -115,32 +115,32 @@ impl TlsFrontCache { if !name.ends_with(".json") { continue; } - if let Ok(data) = tokio::fs::read(entry.path()).await { - if let Ok(mut cached) = serde_json::from_slice::(&data) { - if cached.domain.is_empty() - || cached.domain.len() > 255 - || !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-') - { - warn!(file = %name, "Skipping TLS cache entry with invalid domain"); - continue; - } - // fetched_at is skipped during deserialization; approximate with file mtime if available. - if let Ok(meta) = entry.metadata().await { - if let Ok(modified) = meta.modified() { - cached.fetched_at = modified; - } - } - // Drop entries older than 72h - if let Ok(age) = cached.fetched_at.elapsed() { - if age > Duration::from_secs(72 * 3600) { - warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)"); - continue; - } - } - let domain = cached.domain.clone(); - self.set(&domain, cached).await; - loaded += 1; + if let Ok(data) = tokio::fs::read(entry.path()).await + && let Ok(mut cached) = serde_json::from_slice::(&data) + { + if cached.domain.is_empty() + || cached.domain.len() > 255 + || !cached.domain.chars().all(|c| c.is_ascii_alphanumeric() || c == '.' || c == '-') + { + warn!(file = %name, "Skipping TLS cache entry with invalid domain"); + continue; } + // fetched_at is skipped during deserialization; approximate with file mtime if available. + if let Ok(meta) = entry.metadata().await + && let Ok(modified) = meta.modified() + { + cached.fetched_at = modified; + } + // Drop entries older than 72h + if let Ok(age) = cached.fetched_at.elapsed() + && age > Duration::from_secs(72 * 3600) + { + warn!(domain = %cached.domain, "Skipping stale TLS cache entry (>72h)"); + continue; + } + let domain = cached.domain.clone(); + self.set(&domain, cached).await; + loaded += 1; } } } diff --git a/src/tls_front/emulator.rs b/src/tls_front/emulator.rs index 25d2a8c..c8c18ac 100644 --- a/src/tls_front/emulator.rs +++ b/src/tls_front/emulator.rs @@ -12,7 +12,7 @@ fn jitter_and_clamp_sizes(sizes: &[usize], rng: &SecureRandom) -> Vec { sizes .iter() .map(|&size| { - let base = size.max(MIN_APP_DATA).min(MAX_APP_DATA); + let base = size.clamp(MIN_APP_DATA, MAX_APP_DATA); let jitter_range = ((base as f64) * 0.03).round() as i64; if jitter_range == 0 { return base; @@ -50,7 +50,7 @@ fn ensure_payload_capacity(mut sizes: Vec, payload_len: usize) -> Vec 17 { + let body_len = size - 17; + rec.extend_from_slice(&rng.bytes(body_len)); + rec.push(0x16); // inner content type marker (handshake) + rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag } else { - if size > 17 { - let body_len = size - 17; - rec.extend_from_slice(&rng.bytes(body_len)); - rec.push(0x16); // inner content type marker (handshake) - rec.extend_from_slice(&rng.bytes(16)); // AEAD-like tag - } else { - rec.extend_from_slice(&rng.bytes(size)); - } + rec.extend_from_slice(&rng.bytes(size)); } app_data.extend_from_slice(&rec); } diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index 4678ea3..7ac4b42 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -384,7 +384,7 @@ async fn fetch_via_raw_tls( for _ in 0..4 { match timeout(connect_timeout, read_tls_record(&mut stream)).await { Ok(Ok(rec)) => records.push(rec), - Ok(Err(e)) => return Err(e.into()), + Ok(Err(e)) => return Err(e), Err(_) => break, } if records.len() >= 3 && records.iter().any(|(t, _)| *t == TLS_RECORD_APPLICATION) { diff --git a/src/transport/middle_proxy/codec.rs b/src/transport/middle_proxy/codec.rs index 6d83761..6df0466 100644 --- a/src/transport/middle_proxy/codec.rs +++ b/src/transport/middle_proxy/codec.rs @@ -165,11 +165,10 @@ fn process_pid16() -> u16 { } fn process_utime() -> u32 { - let utime = std::time::SystemTime::now() + std::time::SystemTime::now() .duration_since(std::time::UNIX_EPOCH) .unwrap_or_default() - .as_secs() as u32; - utime + .as_secs() as u32 } pub(crate) fn cbc_encrypt_padded( diff --git a/src/transport/middle_proxy/config_updater.rs b/src/transport/middle_proxy/config_updater.rs index 96d5f91..56d5b81 100644 --- a/src/transport/middle_proxy/config_updater.rs +++ b/src/transport/middle_proxy/config_updater.rs @@ -40,14 +40,16 @@ pub struct ProxyConfigData { } fn parse_host_port(s: &str) -> Option<(IpAddr, u16)> { - if let Some(bracket_end) = s.rfind(']') { - if s.starts_with('[') && bracket_end + 1 < s.len() && s.as_bytes().get(bracket_end + 1) == Some(&b':') { - let host = &s[1..bracket_end]; - let port_str = &s[bracket_end + 2..]; - let ip = host.parse::().ok()?; - let port = port_str.parse::().ok()?; - return Some((ip, port)); - } + if let Some(bracket_end) = s.rfind(']') + && s.starts_with('[') + && bracket_end + 1 < s.len() + && s.as_bytes().get(bracket_end + 1) == Some(&b':') + { + let host = &s[1..bracket_end]; + let port_str = &s[bracket_end + 2..]; + let ip = host.parse::().ok()?; + let port = port_str.parse::().ok()?; + return Some((ip, port)); } let idx = s.rfind(':')?; @@ -84,20 +86,18 @@ pub async fn fetch_proxy_config(url: &str) -> Result { .map_err(|e| crate::error::ProxyError::Proxy(format!("fetch_proxy_config GET failed: {e}")))? ; - if let Some(date) = resp.headers().get(reqwest::header::DATE) { - if let Ok(date_str) = date.to_str() { - if let Ok(server_time) = httpdate::parse_http_date(date_str) { - if let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| { - server_time.duration_since(SystemTime::now()).map_err(|_| e) - }) { - let skew_secs = skew.as_secs(); - if skew_secs > 60 { - warn!(skew_secs, "Time skew >60s detected from fetch_proxy_config Date header"); - } else if skew_secs > 30 { - warn!(skew_secs, "Time skew >30s detected from fetch_proxy_config Date header"); - } - } - } + if let Some(date) = resp.headers().get(reqwest::header::DATE) + && let Ok(date_str) = date.to_str() + && let Ok(server_time) = httpdate::parse_http_date(date_str) + && let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| { + server_time.duration_since(SystemTime::now()).map_err(|_| e) + }) + { + let skew_secs = skew.as_secs(); + if skew_secs > 60 { + warn!(skew_secs, "Time skew >60s detected from fetch_proxy_config Date header"); + } else if skew_secs > 30 { + warn!(skew_secs, "Time skew >30s detected from fetch_proxy_config Date header"); } } diff --git a/src/transport/middle_proxy/handshake.rs b/src/transport/middle_proxy/handshake.rs index 95a9d6e..d9bcdde 100644 --- a/src/transport/middle_proxy/handshake.rs +++ b/src/transport/middle_proxy/handshake.rs @@ -47,21 +47,21 @@ impl MePool { pub(crate) async fn connect_tcp(&self, addr: SocketAddr) -> Result<(TcpStream, f64)> { let start = Instant::now(); let connect_fut = async { - if addr.is_ipv6() { - if let Some(v6) = self.detected_ipv6 { - match TcpSocket::new_v6() { - Ok(sock) => { - if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) { - debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind"); - } else { - match sock.connect(addr).await { - Ok(stream) => return Ok(stream), - Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"), - } + if addr.is_ipv6() + && let Some(v6) = self.detected_ipv6 + { + match TcpSocket::new_v6() { + Ok(sock) => { + if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) { + debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind"); + } else { + match sock.connect(addr).await { + Ok(stream) => return Ok(stream), + Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"), } } - Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"), } + Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"), } } TcpStream::connect(addr).await diff --git a/src/transport/middle_proxy/health.rs b/src/transport/middle_proxy/health.rs index e73e5f1..4bb7e64 100644 --- a/src/transport/middle_proxy/health.rs +++ b/src/transport/middle_proxy/health.rs @@ -92,10 +92,10 @@ async fn check_family( let key = (dc, family); let now = Instant::now(); - if let Some(ts) = next_attempt.get(&key) { - if now < *ts { - continue; - } + if let Some(ts) = next_attempt.get(&key) + && now < *ts + { + continue; } let max_concurrent = pool.me_reconnect_max_concurrent_per_dc.max(1) as usize; diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 2047e80..06fdc96 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -498,10 +498,10 @@ impl MePool { let mut guard = self.proxy_map_v4.write().await; let keys: Vec = guard.keys().cloned().collect(); for k in keys.iter().cloned().filter(|k| *k > 0) { - if !guard.contains_key(&-k) { - if let Some(addrs) = guard.get(&k).cloned() { - guard.insert(-k, addrs); - } + if !guard.contains_key(&-k) + && let Some(addrs) = guard.get(&k).cloned() + { + guard.insert(-k, addrs); } } } @@ -509,10 +509,10 @@ impl MePool { let mut guard = self.proxy_map_v6.write().await; let keys: Vec = guard.keys().cloned().collect(); for k in keys.iter().cloned().filter(|k| *k > 0) { - if !guard.contains_key(&-k) { - if let Some(addrs) = guard.get(&k).cloned() { - guard.insert(-k, addrs); - } + if !guard.contains_key(&-k) + && let Some(addrs) = guard.get(&k).cloned() + { + guard.insert(-k, addrs); } } } @@ -760,13 +760,12 @@ impl MePool { cancel_reader_token.clone(), ) .await; - if let Some(pool) = pool.upgrade() { - if cleanup_for_reader + if let Some(pool) = pool.upgrade() + && cleanup_for_reader .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .is_ok() - { - pool.remove_writer_and_close_clients(writer_id).await; - } + { + pool.remove_writer_and_close_clients(writer_id).await; } if let Err(e) = res { warn!(error = %e, "ME reader ended"); @@ -834,13 +833,12 @@ impl MePool { stats_ping.increment_me_keepalive_failed(); debug!("ME ping failed, removing dead writer"); cancel_ping.cancel(); - if let Some(pool) = pool_ping.upgrade() { - if cleanup_for_ping + if let Some(pool) = pool_ping.upgrade() + && cleanup_for_ping .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) .is_ok() - { - pool.remove_writer_and_close_clients(writer_id).await; - } + { + pool.remove_writer_and_close_clients(writer_id).await; } break; } @@ -943,24 +941,20 @@ impl MePool { let pool = Arc::downgrade(self); tokio::spawn(async move { let deadline = timeout.map(|t| Instant::now() + t); - loop { - if let Some(p) = pool.upgrade() { - if let Some(deadline_at) = deadline { - if Instant::now() >= deadline_at { - warn!(writer_id, "Drain timeout, force-closing"); - p.stats.increment_pool_force_close_total(); - let _ = p.remove_writer_and_close_clients(writer_id).await; - break; - } - } - if p.registry.is_writer_empty(writer_id).await { - let _ = p.remove_writer_only(writer_id).await; - break; - } - tokio::time::sleep(Duration::from_secs(1)).await; - } else { + while let Some(p) = pool.upgrade() { + if let Some(deadline_at) = deadline + && Instant::now() >= deadline_at + { + warn!(writer_id, "Drain timeout, force-closing"); + p.stats.increment_pool_force_close_total(); + let _ = p.remove_writer_and_close_clients(writer_id).await; break; } + if p.registry.is_writer_empty(writer_id).await { + let _ = p.remove_writer_only(writer_id).await; + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; } }); } diff --git a/src/transport/middle_proxy/pool_nat.rs b/src/transport/middle_proxy/pool_nat.rs index 4d9e2a1..9936707 100644 --- a/src/transport/middle_proxy/pool_nat.rs +++ b/src/transport/middle_proxy/pool_nat.rs @@ -25,7 +25,7 @@ impl MePool { pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr { let nat_ip = self .nat_ip_cfg - .or_else(|| self.nat_ip_detected.try_read().ok().and_then(|g| (*g).clone())); + .or_else(|| self.nat_ip_detected.try_read().ok().and_then(|g| *g)); let Some(nat_ip) = nat_ip else { return ip; @@ -75,7 +75,7 @@ impl MePool { return None; } - if let Some(ip) = self.nat_ip_detected.read().await.clone() { + if let Some(ip) = *self.nat_ip_detected.read().await { return Some(ip); } @@ -102,17 +102,17 @@ impl MePool { ) -> Option { const STUN_CACHE_TTL: Duration = Duration::from_secs(600); // Backoff window - if let Some(until) = *self.stun_backoff_until.read().await { - if Instant::now() < until { - if let Ok(cache) = self.nat_reflection_cache.try_lock() { - let slot = match family { - IpFamily::V4 => cache.v4, - IpFamily::V6 => cache.v6, - }; - return slot.map(|(_, addr)| addr); - } - return None; + if let Some(until) = *self.stun_backoff_until.read().await + && Instant::now() < until + { + if let Ok(cache) = self.nat_reflection_cache.try_lock() { + let slot = match family { + IpFamily::V4 => cache.v4, + IpFamily::V6 => cache.v6, + }; + return slot.map(|(_, addr)| addr); } + return None; } if let Ok(mut cache) = self.nat_reflection_cache.try_lock() { @@ -120,10 +120,10 @@ impl MePool { IpFamily::V4 => &mut cache.v4, IpFamily::V6 => &mut cache.v6, }; - if let Some((ts, addr)) = slot { - if ts.elapsed() < STUN_CACHE_TTL { - return Some(*addr); - } + if let Some((ts, addr)) = slot + && ts.elapsed() < STUN_CACHE_TTL + { + return Some(*addr); } } diff --git a/src/transport/middle_proxy/secret.rs b/src/transport/middle_proxy/secret.rs index 9641143..69a3198 100644 --- a/src/transport/middle_proxy/secret.rs +++ b/src/transport/middle_proxy/secret.rs @@ -63,20 +63,18 @@ pub async fn download_proxy_secret() -> Result> { ))); } - if let Some(date) = resp.headers().get(reqwest::header::DATE) { - if let Ok(date_str) = date.to_str() { - if let Ok(server_time) = httpdate::parse_http_date(date_str) { - if let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| { - server_time.duration_since(SystemTime::now()).map_err(|_| e) - }) { - let skew_secs = skew.as_secs(); - if skew_secs > 60 { - warn!(skew_secs, "Time skew >60s detected from proxy-secret Date header"); - } else if skew_secs > 30 { - warn!(skew_secs, "Time skew >30s detected from proxy-secret Date header"); - } - } - } + if let Some(date) = resp.headers().get(reqwest::header::DATE) + && let Ok(date_str) = date.to_str() + && let Ok(server_time) = httpdate::parse_http_date(date_str) + && let Ok(skew) = SystemTime::now().duration_since(server_time).or_else(|e| { + server_time.duration_since(SystemTime::now()).map_err(|_| e) + }) + { + let skew_secs = skew.as_secs(); + if skew_secs > 60 { + warn!(skew_secs, "Time skew >60s detected from proxy-secret Date header"); + } else if skew_secs > 30 { + warn!(skew_secs, "Time skew >30s detected from proxy-secret Date header"); } } diff --git a/src/transport/middle_proxy/send.rs b/src/transport/middle_proxy/send.rs index 56bd17a..8867212 100644 --- a/src/transport/middle_proxy/send.rs +++ b/src/transport/middle_proxy/send.rs @@ -242,10 +242,10 @@ impl MePool { } if preferred.is_empty() { let def = self.default_dc.load(Ordering::Relaxed); - if def != 0 { - if let Some(v) = map_guard.get(&def) { - preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port))); - } + if def != 0 + && let Some(v) = map_guard.get(&def) + { + preferred.extend(v.iter().map(|(ip, port)| SocketAddr::new(*ip, *port))); } } @@ -267,7 +267,7 @@ impl MePool { if !self.writer_accepts_new_binding(w) { continue; } - if preferred.iter().any(|p| *p == w.addr) { + if preferred.contains(&w.addr) { out.push(idx); } } diff --git a/src/transport/socket.rs b/src/transport/socket.rs index 0a20c3c..f1f8d5c 100644 --- a/src/transport/socket.rs +++ b/src/transport/socket.rs @@ -136,17 +136,17 @@ pub fn resolve_interface_ip(name: &str, want_ipv6: bool) -> Option { if let Ok(addrs) = getifaddrs() { for iface in addrs { - if iface.interface_name == name { - if let Some(address) = iface.address { - if let Some(v4) = address.as_sockaddr_in() { - if !want_ipv6 { - return Some(IpAddr::V4(v4.ip())); - } - } else if let Some(v6) = address.as_sockaddr_in6() { - if want_ipv6 { - return Some(IpAddr::V6(v6.ip().clone())); - } + if iface.interface_name == name + && let Some(address) = iface.address + { + if let Some(v4) = address.as_sockaddr_in() { + if !want_ipv6 { + return Some(IpAddr::V4(v4.ip())); } + } else if let Some(v6) = address.as_sockaddr_in6() + && want_ipv6 + { + return Some(IpAddr::V6(v6.ip())); } } } diff --git a/src/transport/socks.rs b/src/transport/socks.rs index 188d369..8196b52 100644 --- a/src/transport/socks.rs +++ b/src/transport/socks.rs @@ -27,11 +27,11 @@ pub async fn connect_socks4( buf.extend_from_slice(user); buf.push(0); // NULL - stream.write_all(&buf).await.map_err(|e| ProxyError::Io(e))?; + stream.write_all(&buf).await.map_err(ProxyError::Io)?; // Response: VN (1) | CD (1) | DSTPORT (2) | DSTIP (4) let mut resp = [0u8; 8]; - stream.read_exact(&mut resp).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut resp).await.map_err(ProxyError::Io)?; if resp[1] != 90 { return Err(ProxyError::Proxy(format!("SOCKS4 request rejected: code {}", resp[1]))); @@ -56,10 +56,10 @@ pub async fn connect_socks5( let mut buf = vec![5u8, methods.len() as u8]; buf.extend_from_slice(&methods); - stream.write_all(&buf).await.map_err(|e| ProxyError::Io(e))?; + stream.write_all(&buf).await.map_err(ProxyError::Io)?; let mut resp = [0u8; 2]; - stream.read_exact(&mut resp).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut resp).await.map_err(ProxyError::Io)?; if resp[0] != 5 { return Err(ProxyError::Proxy("Invalid SOCKS5 version".to_string())); @@ -80,10 +80,10 @@ pub async fn connect_socks5( auth_buf.push(p_bytes.len() as u8); auth_buf.extend_from_slice(p_bytes); - stream.write_all(&auth_buf).await.map_err(|e| ProxyError::Io(e))?; + stream.write_all(&auth_buf).await.map_err(ProxyError::Io)?; let mut auth_resp = [0u8; 2]; - stream.read_exact(&mut auth_resp).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut auth_resp).await.map_err(ProxyError::Io)?; if auth_resp[1] != 0 { return Err(ProxyError::Proxy("SOCKS5 authentication failed".to_string())); @@ -112,11 +112,11 @@ pub async fn connect_socks5( req.extend_from_slice(&target.port().to_be_bytes()); - stream.write_all(&req).await.map_err(|e| ProxyError::Io(e))?; + stream.write_all(&req).await.map_err(ProxyError::Io)?; // Response let mut head = [0u8; 4]; - stream.read_exact(&mut head).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut head).await.map_err(ProxyError::Io)?; if head[1] != 0 { return Err(ProxyError::Proxy(format!("SOCKS5 request failed: code {}", head[1]))); @@ -126,17 +126,17 @@ pub async fn connect_socks5( match head[3] { 1 => { // IPv4 let mut addr = [0u8; 4 + 2]; - stream.read_exact(&mut addr).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; }, 3 => { // Domain let mut len = [0u8; 1]; - stream.read_exact(&mut len).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut len).await.map_err(ProxyError::Io)?; let mut addr = vec![0u8; len[0] as usize + 2]; - stream.read_exact(&mut addr).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; }, 4 => { // IPv6 let mut addr = [0u8; 16 + 2]; - stream.read_exact(&mut addr).await.map_err(|e| ProxyError::Io(e))?; + stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; }, _ => return Err(ProxyError::Proxy("Invalid address type in SOCKS5 response".to_string())), } diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index 887fa99..e2198a8 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -57,9 +57,10 @@ impl LatencyEma { // ============= Per-DC IP Preference Tracking ============= /// Tracks which IP version works for each DC -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] pub enum IpPreference { /// Not yet tested + #[default] Unknown, /// IPv6 works PreferV6, @@ -71,12 +72,6 @@ pub enum IpPreference { Unavailable, } -impl Default for IpPreference { - fn default() -> Self { - Self::Unknown - } -} - // ============= Upstream State ============= #[derive(Debug)] @@ -112,7 +107,7 @@ impl UpstreamState { if abs_dc == 0 { return None; } - if abs_dc >= 1 && abs_dc <= NUM_DCS { + if (1..=NUM_DCS).contains(&abs_dc) { Some(abs_dc - 1) } else { // Unknown DC → default cluster (DC 2, index 1) @@ -122,10 +117,10 @@ impl UpstreamState { /// Get latency for a specific DC, falling back to average across all known DCs fn effective_latency(&self, dc_idx: Option) -> Option { - if let Some(di) = dc_idx.and_then(Self::dc_array_idx) { - if let Some(ms) = self.dc_latency[di].get() { - return Some(ms); - } + if let Some(di) = dc_idx.and_then(Self::dc_array_idx) + && let Some(ms) = self.dc_latency[di].get() + { + return Some(ms); } let (sum, count) = self.dc_latency.iter() @@ -582,7 +577,7 @@ impl UpstreamManager { let result = tokio::time::timeout( Duration::from_secs(DC_PING_TIMEOUT_SECS), - self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr_v6) + self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr_v6) ).await; let ping_result = match result { @@ -633,7 +628,7 @@ impl UpstreamManager { let result = tokio::time::timeout( Duration::from_secs(DC_PING_TIMEOUT_SECS), - self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr_v4) + self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr_v4) ).await; let ping_result = match result { @@ -696,7 +691,7 @@ impl UpstreamManager { } let result = tokio::time::timeout( Duration::from_secs(DC_PING_TIMEOUT_SECS), - self.ping_single_dc(&upstream_config, Some(bind_rr.clone()), addr) + self.ping_single_dc(upstream_config, Some(bind_rr.clone()), addr) ).await; let ping_result = match result { diff --git a/src/util/ip.rs b/src/util/ip.rs index f3e774f..36a5759 100644 --- a/src/util/ip.rs +++ b/src/util/ip.rs @@ -67,54 +67,56 @@ pub async fn detect_ip() -> IpInfo { // Try to get local interface IP first (default gateway interface) // We connect to Google DNS to find out which interface is used for routing - if let Some(ip) = get_local_ip("8.8.8.8:80") { - if ip.is_ipv4() && !ip.is_loopback() { - info.ipv4 = Some(ip); - debug!(ip = %ip, "Detected local IPv4 address via routing"); - } + if let Some(ip) = get_local_ip("8.8.8.8:80") + && ip.is_ipv4() + && !ip.is_loopback() + { + info.ipv4 = Some(ip); + debug!(ip = %ip, "Detected local IPv4 address via routing"); } - if let Some(ip) = get_local_ipv6("[2001:4860:4860::8888]:80") { - if ip.is_ipv6() && !ip.is_loopback() { - info.ipv6 = Some(ip); - debug!(ip = %ip, "Detected local IPv6 address via routing"); - } + if let Some(ip) = get_local_ipv6("[2001:4860:4860::8888]:80") + && ip.is_ipv6() + && !ip.is_loopback() + { + info.ipv6 = Some(ip); + debug!(ip = %ip, "Detected local IPv6 address via routing"); } - - // If local detection failed or returned private IP (and we want public), + + // If local detection failed or returned private IP (and we want public), // or just as a fallback/verification, we might want to check external services. - // However, the requirement is: "if IP for listening is not set... it should be IP from interface... + // However, the requirement is: "if IP for listening is not set... it should be IP from interface... // if impossible - request external resources". - + // So if we found a local IP, we might be good. But often servers are behind NAT. // If the local IP is private, we probably want the public IP for the tg:// link. // Let's check if the detected IPs are private. - - let need_external_v4 = info.ipv4.map_or(true, |ip| is_private_ip(ip)); - let need_external_v6 = info.ipv6.map_or(true, |ip| is_private_ip(ip)); + + let need_external_v4 = info.ipv4.is_none_or(is_private_ip); + let need_external_v6 = info.ipv6.is_none_or(is_private_ip); if need_external_v4 { debug!("Local IPv4 is private or missing, checking external services..."); for url in IPV4_URLS { - if let Some(ip) = fetch_ip(url).await { - if ip.is_ipv4() { - info.ipv4 = Some(ip); - debug!(ip = %ip, "Detected public IPv4 address"); - break; - } + if let Some(ip) = fetch_ip(url).await + && ip.is_ipv4() + { + info.ipv4 = Some(ip); + debug!(ip = %ip, "Detected public IPv4 address"); + break; } } } - + if need_external_v6 { debug!("Local IPv6 is private or missing, checking external services..."); for url in IPV6_URLS { - if let Some(ip) = fetch_ip(url).await { - if ip.is_ipv6() { - info.ipv6 = Some(ip); - debug!(ip = %ip, "Detected public IPv6 address"); - break; - } + if let Some(ip) = fetch_ip(url).await + && ip.is_ipv6() + { + info.ipv6 = Some(ip); + debug!(ip = %ip, "Detected public IPv6 address"); + break; } } } diff --git a/src/util/time.rs b/src/util/time.rs index 310b015..07ea0ba 100644 --- a/src/util/time.rs +++ b/src/util/time.rs @@ -67,15 +67,15 @@ pub async fn check_time_sync() -> Option { #[allow(dead_code)] pub async fn time_sync_task(check_interval: Duration) -> ! { loop { - if let Some(result) = check_time_sync().await { - if result.is_skewed { - error!( - "System clock is off by {} seconds. Please sync your clock.", - result.skew_secs - ); - } + if let Some(result) = check_time_sync().await + && result.is_skewed + { + error!( + "System clock is off by {} seconds. Please sync your clock.", + result.skew_secs + ); } - + tokio::time::sleep(check_interval).await; } } \ No newline at end of file From 50e15896b33cd5f7fb5ee09c83e91f9f5d9c224f Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Tue, 24 Feb 2026 09:02:47 +0300 Subject: [PATCH 29/98] Update config.toml MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 2 раза добавил параметр me_reinit_drain_timeout_secs --- config.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/config.toml b/config.toml index 375cd7f..fd76fed 100644 --- a/config.toml +++ b/config.toml @@ -46,7 +46,6 @@ update_every = 7200 # Resolve the active updater interval crypto_pending_buffer = 262144 # Max pending ciphertext buffer per client writer (bytes). Controls FakeTLS backpressure vs throughput. max_client_frame = 16777216 # Maximum allowed client MTProto frame size (bytes). desync_all_full = false # Emit full crypto-desync forensic logs for every event. When false, full forensic details are emitted once per key window. -me_reinit_drain_timeout_secs = 300 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. hardswap = true # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. From d2f08fb70791a4297ab96f22b6471524a545ac54 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 18:19:39 +0300 Subject: [PATCH 30/98] ME Soft Reinit tuning Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/config/defaults.rs | 20 ++ src/config/load.rs | 78 +++++++ src/config/types.rs | 25 ++ src/main.rs | 43 ++-- src/transport/middle_proxy/config_updater.rs | 228 ++++++++++++++++--- src/transport/middle_proxy/secret.rs | 52 +++-- 6 files changed, 386 insertions(+), 60 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index b9b0da1..6b80ede 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -182,6 +182,26 @@ pub(crate) fn default_update_every_secs() -> u64 { 30 * 60 } +pub(crate) fn default_me_config_stable_snapshots() -> u8 { + 2 +} + +pub(crate) fn default_me_config_apply_cooldown_secs() -> u64 { + 300 +} + +pub(crate) fn default_proxy_secret_stable_snapshots() -> u8 { + 2 +} + +pub(crate) fn default_proxy_secret_rotate_runtime() -> bool { + true +} + +pub(crate) fn default_proxy_secret_len_max() -> usize { + 256 +} + pub(crate) fn default_me_reinit_drain_timeout_secs() -> u64 { 120 } diff --git a/src/config/load.rs b/src/config/load.rs index 750e0dc..be34efa 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -147,6 +147,24 @@ impl ProxyConfig { } } + if config.general.me_config_stable_snapshots == 0 { + return Err(ProxyError::Config( + "general.me_config_stable_snapshots must be > 0".to_string(), + )); + } + + if config.general.proxy_secret_stable_snapshots == 0 { + return Err(ProxyError::Config( + "general.proxy_secret_stable_snapshots must be > 0".to_string(), + )); + } + + if !(32..=4096).contains(&config.general.proxy_secret_len_max) { + return Err(ProxyError::Config( + "general.proxy_secret_len_max must be within [32, 4096]".to_string(), + )); + } + if !(0.0..=1.0).contains(&config.general.me_pool_min_fresh_ratio) { return Err(ProxyError::Config( "general.me_pool_min_fresh_ratio must be within [0.0, 1.0]".to_string(), @@ -462,6 +480,66 @@ mod tests { let _ = std::fs::remove_file(path); } + #[test] + fn me_config_stable_snapshots_zero_is_rejected() { + let toml = r#" + [general] + me_config_stable_snapshots = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_config_stable_snapshots_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_config_stable_snapshots must be > 0")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn proxy_secret_stable_snapshots_zero_is_rejected() { + let toml = r#" + [general] + proxy_secret_stable_snapshots = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_proxy_secret_stable_snapshots_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.proxy_secret_stable_snapshots must be > 0")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn proxy_secret_len_max_out_of_range_is_rejected() { + let toml = r#" + [general] + proxy_secret_len_max = 16 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_proxy_secret_len_max_out_of_range_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.proxy_secret_len_max must be within [32, 4096]")); + let _ = std::fs::remove_file(path); + } + #[test] fn me_pool_min_fresh_ratio_out_of_range_is_rejected() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index c9ceea4..bd9697e 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -267,6 +267,26 @@ pub struct GeneralConfig { #[serde(default)] pub update_every: Option, + /// Number of identical getProxyConfig snapshots required before applying ME map updates. + #[serde(default = "default_me_config_stable_snapshots")] + pub me_config_stable_snapshots: u8, + + /// Cooldown in seconds between applied ME map updates. + #[serde(default = "default_me_config_apply_cooldown_secs")] + pub me_config_apply_cooldown_secs: u64, + + /// Number of identical getProxySecret snapshots required before runtime secret rotation. + #[serde(default = "default_proxy_secret_stable_snapshots")] + pub proxy_secret_stable_snapshots: u8, + + /// Enable runtime proxy-secret rotation from getProxySecret. + #[serde(default = "default_proxy_secret_rotate_runtime")] + pub proxy_secret_rotate_runtime: bool, + + /// Maximum allowed proxy-secret length in bytes for startup and runtime refresh. + #[serde(default = "default_proxy_secret_len_max")] + pub proxy_secret_len_max: usize, + /// Drain-TTL in seconds for stale ME writers after endpoint map changes. /// During TTL, stale writers may be used only as fallback for new bindings. #[serde(default = "default_me_pool_drain_ttl_secs")] @@ -346,6 +366,11 @@ impl Default for GeneralConfig { hardswap: default_hardswap(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), update_every: Some(default_update_every_secs()), + me_config_stable_snapshots: default_me_config_stable_snapshots(), + me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(), + proxy_secret_stable_snapshots: default_proxy_secret_stable_snapshots(), + proxy_secret_rotate_runtime: default_proxy_secret_rotate_runtime(), + proxy_secret_len_max: default_proxy_secret_len_max(), me_pool_drain_ttl_secs: default_me_pool_drain_ttl_secs(), me_pool_min_fresh_ratio: default_me_pool_min_fresh_ratio(), me_reinit_drain_timeout_secs: default_me_reinit_drain_timeout_secs(), diff --git a/src/main.rs b/src/main.rs index 7264239..1c7b39c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -298,25 +298,30 @@ async fn main() -> std::result::Result<(), Box> { // proxy-secret is from: https://core.telegram.org/getProxySecret // ============================================================= let proxy_secret_path = config.general.proxy_secret_path.as_deref(); -match crate::transport::middle_proxy::fetch_proxy_secret(proxy_secret_path).await { - Ok(proxy_secret) => { - info!( - secret_len = proxy_secret.len(), - key_sig = format_args!( - "0x{:08x}", - if proxy_secret.len() >= 4 { - u32::from_le_bytes([ - proxy_secret[0], - proxy_secret[1], - proxy_secret[2], - proxy_secret[3], - ]) - } else { - 0 - } - ), - "Proxy-secret loaded" - ); + match crate::transport::middle_proxy::fetch_proxy_secret( + proxy_secret_path, + config.general.proxy_secret_len_max, + ) + .await + { + Ok(proxy_secret) => { + info!( + secret_len = proxy_secret.len(), + key_sig = format_args!( + "0x{:08x}", + if proxy_secret.len() >= 4 { + u32::from_le_bytes([ + proxy_secret[0], + proxy_secret[1], + proxy_secret[2], + proxy_secret[3], + ]) + } else { + 0 + } + ), + "Proxy-secret loaded" + ); // Load ME config (v4/v6) + default DC let mut cfg_v4 = fetch_proxy_config( diff --git a/src/transport/middle_proxy/config_updater.rs b/src/transport/middle_proxy/config_updater.rs index 56d5b81..fc9ed3d 100644 --- a/src/transport/middle_proxy/config_updater.rs +++ b/src/transport/middle_proxy/config_updater.rs @@ -1,4 +1,5 @@ use std::collections::HashMap; +use std::hash::{DefaultHasher, Hash, Hasher}; use std::net::IpAddr; use std::sync::Arc; use std::time::Duration; @@ -11,7 +12,7 @@ use crate::config::ProxyConfig; use crate::error::Result; use super::MePool; -use super::secret::download_proxy_secret; +use super::secret::download_proxy_secret_with_max_len; use crate::crypto::SecureRandom; use std::time::SystemTime; @@ -39,6 +40,92 @@ pub struct ProxyConfigData { pub default_dc: Option, } +#[derive(Debug, Default)] +struct StableSnapshot { + candidate_hash: Option, + candidate_hits: u8, + applied_hash: Option, +} + +impl StableSnapshot { + fn observe(&mut self, hash: u64) -> u8 { + if self.candidate_hash == Some(hash) { + self.candidate_hits = self.candidate_hits.saturating_add(1); + } else { + self.candidate_hash = Some(hash); + self.candidate_hits = 1; + } + self.candidate_hits + } + + fn is_applied(&self, hash: u64) -> bool { + self.applied_hash == Some(hash) + } + + fn mark_applied(&mut self, hash: u64) { + self.applied_hash = Some(hash); + } +} + +#[derive(Debug, Default)] +struct UpdaterState { + config_v4: StableSnapshot, + config_v6: StableSnapshot, + secret: StableSnapshot, + last_map_apply_at: Option, +} + +fn hash_proxy_config(cfg: &ProxyConfigData) -> u64 { + let mut hasher = DefaultHasher::new(); + cfg.default_dc.hash(&mut hasher); + + let mut by_dc: Vec<(i32, Vec<(IpAddr, u16)>)> = + cfg.map.iter().map(|(dc, addrs)| (*dc, addrs.clone())).collect(); + by_dc.sort_by_key(|(dc, _)| *dc); + for (dc, mut addrs) in by_dc { + dc.hash(&mut hasher); + addrs.sort_unstable(); + for (ip, port) in addrs { + ip.hash(&mut hasher); + port.hash(&mut hasher); + } + } + + hasher.finish() +} + +fn hash_secret(secret: &[u8]) -> u64 { + let mut hasher = DefaultHasher::new(); + secret.hash(&mut hasher); + hasher.finish() +} + +fn map_apply_cooldown_ready( + last_applied: Option, + cooldown: Duration, +) -> bool { + if cooldown.is_zero() { + return true; + } + match last_applied { + Some(ts) => ts.elapsed() >= cooldown, + None => true, + } +} + +fn map_apply_cooldown_remaining_secs( + last_applied: tokio::time::Instant, + cooldown: Duration, +) -> u64 { + if cooldown.is_zero() { + return 0; + } + cooldown + .checked_sub(last_applied.elapsed()) + .map(|d| d.as_secs()) + .unwrap_or(0) +} + fn parse_host_port(s: &str) -> Option<(IpAddr, u16)> { if let Some(bracket_end) = s.rfind(']') && s.starts_with('[') @@ -130,7 +217,12 @@ pub async fn fetch_proxy_config(url: &str) -> Result { Ok(ProxyConfigData { map, default_dc }) } -async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &ProxyConfig) { +async fn run_update_cycle( + pool: &Arc, + rng: &Arc, + cfg: &ProxyConfig, + state: &mut UpdaterState, +) { pool.update_runtime_reinit_policy( cfg.general.hardswap, cfg.general.me_pool_drain_ttl_secs, @@ -138,33 +230,93 @@ async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &Pro cfg.general.me_pool_min_fresh_ratio, ); + let required_cfg_snapshots = cfg.general.me_config_stable_snapshots.max(1); + let required_secret_snapshots = cfg.general.proxy_secret_stable_snapshots.max(1); + let apply_cooldown = Duration::from_secs(cfg.general.me_config_apply_cooldown_secs); let mut maps_changed = false; - // Update proxy config v4 + let mut ready_v4: Option<(ProxyConfigData, u64)> = None; let cfg_v4 = retry_fetch("https://core.telegram.org/getProxyConfig").await; if let Some(cfg_v4) = cfg_v4 { - let changed = pool.update_proxy_maps(cfg_v4.map.clone(), None).await; - if let Some(dc) = cfg_v4.default_dc { - pool.default_dc - .store(dc, std::sync::atomic::Ordering::Relaxed); - } - if changed { - maps_changed = true; - info!("ME config updated (v4)"); + let cfg_v4_hash = hash_proxy_config(&cfg_v4); + let stable_hits = state.config_v4.observe(cfg_v4_hash); + if stable_hits < required_cfg_snapshots { + debug!( + stable_hits, + required_cfg_snapshots, + snapshot = format_args!("0x{cfg_v4_hash:016x}"), + "ME config v4 candidate observed" + ); + } else if state.config_v4.is_applied(cfg_v4_hash) { + debug!( + snapshot = format_args!("0x{cfg_v4_hash:016x}"), + "ME config v4 stable snapshot already applied" + ); } else { - debug!("ME config v4 unchanged"); + ready_v4 = Some((cfg_v4, cfg_v4_hash)); } } - // Update proxy config v6 (optional) + let mut ready_v6: Option<(ProxyConfigData, u64)> = None; let cfg_v6 = retry_fetch("https://core.telegram.org/getProxyConfigV6").await; if let Some(cfg_v6) = cfg_v6 { - let changed = pool.update_proxy_maps(HashMap::new(), Some(cfg_v6.map)).await; - if changed { - maps_changed = true; - info!("ME config updated (v6)"); + let cfg_v6_hash = hash_proxy_config(&cfg_v6); + let stable_hits = state.config_v6.observe(cfg_v6_hash); + if stable_hits < required_cfg_snapshots { + debug!( + stable_hits, + required_cfg_snapshots, + snapshot = format_args!("0x{cfg_v6_hash:016x}"), + "ME config v6 candidate observed" + ); + } else if state.config_v6.is_applied(cfg_v6_hash) { + debug!( + snapshot = format_args!("0x{cfg_v6_hash:016x}"), + "ME config v6 stable snapshot already applied" + ); } else { - debug!("ME config v6 unchanged"); + ready_v6 = Some((cfg_v6, cfg_v6_hash)); + } + } + + if ready_v4.is_some() || ready_v6.is_some() { + if map_apply_cooldown_ready(state.last_map_apply_at, apply_cooldown) { + let update_v4 = ready_v4 + .as_ref() + .map(|(snapshot, _)| snapshot.map.clone()) + .unwrap_or_default(); + let update_v6 = ready_v6 + .as_ref() + .map(|(snapshot, _)| snapshot.map.clone()); + + let changed = pool.update_proxy_maps(update_v4, update_v6).await; + + if let Some((snapshot, hash)) = ready_v4 { + if let Some(dc) = snapshot.default_dc { + pool.default_dc + .store(dc, std::sync::atomic::Ordering::Relaxed); + } + state.config_v4.mark_applied(hash); + } + + if let Some((_snapshot, hash)) = ready_v6 { + state.config_v6.mark_applied(hash); + } + + state.last_map_apply_at = Some(tokio::time::Instant::now()); + + if changed { + maps_changed = true; + info!("ME config update applied after stable-gate"); + } else { + debug!("ME config stable-gate applied with no map delta"); + } + } else if let Some(last) = state.last_map_apply_at { + let wait_secs = map_apply_cooldown_remaining_secs(last, apply_cooldown); + debug!( + wait_secs, + "ME config stable snapshot deferred by cooldown" + ); } } @@ -175,14 +327,37 @@ async fn run_update_cycle(pool: &Arc, rng: &Arc, cfg: &Pro pool.reset_stun_state(); - // Update proxy-secret - match download_proxy_secret().await { - Ok(secret) => { - if pool.update_secret(secret).await { - info!("proxy-secret updated and pool reconnect scheduled"); + if cfg.general.proxy_secret_rotate_runtime { + match download_proxy_secret_with_max_len(cfg.general.proxy_secret_len_max).await { + Ok(secret) => { + let secret_hash = hash_secret(&secret); + let stable_hits = state.secret.observe(secret_hash); + if stable_hits < required_secret_snapshots { + debug!( + stable_hits, + required_secret_snapshots, + snapshot = format_args!("0x{secret_hash:016x}"), + "proxy-secret candidate observed" + ); + } else if state.secret.is_applied(secret_hash) { + debug!( + snapshot = format_args!("0x{secret_hash:016x}"), + "proxy-secret stable snapshot already applied" + ); + } else { + let rotated = pool.update_secret(secret).await; + state.secret.mark_applied(secret_hash); + if rotated { + info!("proxy-secret rotated after stable-gate"); + } else { + debug!("proxy-secret stable snapshot confirmed as unchanged"); + } + } } + Err(e) => warn!(error = %e, "proxy-secret update failed"), } - Err(e) => warn!(error = %e, "proxy-secret update failed"), + } else { + debug!("proxy-secret runtime rotation disabled by config"); } } @@ -191,6 +366,7 @@ pub async fn me_config_updater( rng: Arc, mut config_rx: watch::Receiver>, ) { + let mut state = UpdaterState::default(); let mut update_every_secs = config_rx .borrow() .general @@ -207,7 +383,7 @@ pub async fn me_config_updater( tokio::select! { _ = &mut sleep => { let cfg = config_rx.borrow().clone(); - run_update_cycle(&pool, &rng, cfg.as_ref()).await; + run_update_cycle(&pool, &rng, cfg.as_ref(), &mut state).await; let refreshed_secs = cfg.general.effective_update_every_secs().max(1); if refreshed_secs != update_every_secs { info!( @@ -245,7 +421,7 @@ pub async fn me_config_updater( ); update_every_secs = new_secs; update_every = Duration::from_secs(update_every_secs); - run_update_cycle(&pool, &rng, cfg.as_ref()).await; + run_update_cycle(&pool, &rng, cfg.as_ref(), &mut state).await; next_tick = tokio::time::Instant::now() + update_every; } else { info!( diff --git a/src/transport/middle_proxy/secret.rs b/src/transport/middle_proxy/secret.rs index 69a3198..4991d32 100644 --- a/src/transport/middle_proxy/secret.rs +++ b/src/transport/middle_proxy/secret.rs @@ -4,12 +4,42 @@ use httpdate; use crate::error::{ProxyError, Result}; +pub const PROXY_SECRET_MIN_LEN: usize = 32; + +pub(super) fn validate_proxy_secret_len(data_len: usize, max_len: usize) -> Result<()> { + if max_len < PROXY_SECRET_MIN_LEN { + return Err(ProxyError::Proxy(format!( + "proxy-secret max length is invalid: {} bytes (must be >= {})", + max_len, + PROXY_SECRET_MIN_LEN + ))); + } + + if data_len < PROXY_SECRET_MIN_LEN { + return Err(ProxyError::Proxy(format!( + "proxy-secret too short: {} bytes (need >= {})", + data_len, + PROXY_SECRET_MIN_LEN + ))); + } + + if data_len > max_len { + return Err(ProxyError::Proxy(format!( + "proxy-secret too long: {} bytes (limit = {})", + data_len, + max_len + ))); + } + + Ok(()) +} + /// Fetch Telegram proxy-secret binary. -pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result> { +pub async fn fetch_proxy_secret(cache_path: Option<&str>, max_len: usize) -> Result> { let cache = cache_path.unwrap_or("proxy-secret"); // 1) Try fresh download first. - match download_proxy_secret().await { + match download_proxy_secret_with_max_len(max_len).await { Ok(data) => { if let Err(e) = tokio::fs::write(cache, &data).await { warn!(error = %e, "Failed to cache proxy-secret (non-fatal)"); @@ -24,9 +54,9 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result> { } } - // 2) Fallback to cache/file regardless of age; require len>=32. + // 2) Fallback to cache/file regardless of age; require len in bounds. match tokio::fs::read(cache).await { - Ok(data) if data.len() >= 32 => { + Ok(data) if validate_proxy_secret_len(data.len(), max_len).is_ok() => { let age_hours = tokio::fs::metadata(cache) .await .ok() @@ -41,17 +71,14 @@ pub async fn fetch_proxy_secret(cache_path: Option<&str>) -> Result> { ); Ok(data) } - Ok(data) => Err(ProxyError::Proxy(format!( - "Cached proxy-secret too short: {} bytes (need >= 32)", - data.len() - ))), + Ok(data) => validate_proxy_secret_len(data.len(), max_len).map(|_| data), Err(e) => Err(ProxyError::Proxy(format!( "Failed to read proxy-secret cache after download failure: {e}" ))), } } -pub async fn download_proxy_secret() -> Result> { +pub async fn download_proxy_secret_with_max_len(max_len: usize) -> Result> { let resp = reqwest::get("https://core.telegram.org/getProxySecret") .await .map_err(|e| ProxyError::Proxy(format!("Failed to download proxy-secret: {e}")))?; @@ -84,12 +111,7 @@ pub async fn download_proxy_secret() -> Result> { .map_err(|e| ProxyError::Proxy(format!("Read proxy-secret body: {e}")))? .to_vec(); - if data.len() < 32 { - return Err(ProxyError::Proxy(format!( - "proxy-secret too short: {} bytes (need >= 32)", - data.len() - ))); - } + validate_proxy_secret_len(data.len(), max_len)?; info!(len = data.len(), "Downloaded proxy-secret OK"); Ok(data) From c13c1cf7e3944a9393bdd0e86f51094c15354ff1 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Tue, 24 Feb 2026 18:39:46 +0300 Subject: [PATCH 31/98] Update config.toml --- config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.toml b/config.toml index fd76fed..48a9047 100644 --- a/config.toml +++ b/config.toml @@ -23,7 +23,7 @@ middle_proxy_nat_stun = "stun.l.google.com:19302" # Optional fallback STUN servers list. middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] # Desired number of concurrent ME writers in pool. -middle_proxy_pool_size = 16 +middle_proxy_pool_size = 8 # Pre-initialized warm-standby ME connections kept idle. middle_proxy_warm_standby = 8 # Ignore STUN/interface mismatch and keep ME enabled even if IP differs. From b1cd7f97273b17da8b7429578d5e264c18c1fa76 Mon Sep 17 00:00:00 2001 From: badcdd <114914117+badcdd@users.noreply.github.com> Date: Tue, 24 Feb 2026 18:59:37 +0300 Subject: [PATCH 32/98] fix similar username in discovered items --- tools/zbx_telemt_template.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/zbx_telemt_template.yaml b/tools/zbx_telemt_template.yaml index 3f3b5bc..493f3f2 100644 --- a/tools/zbx_telemt_template.yaml +++ b/tools/zbx_telemt_template.yaml @@ -172,7 +172,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_connections_current{user=~"{#TELEMT_USER}"}' + - 'telemt_user_connections_current{user="{#TELEMT_USER}"}' - value - '' master_item: @@ -188,7 +188,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_msgs_from_client{user=~"{#TELEMT_USER}"}' + - 'telemt_user_msgs_from_client{user="{#TELEMT_USER}"}' - value - '' master_item: @@ -204,7 +204,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_msgs_to_client{user=~"{#TELEMT_USER}"}' + - 'telemt_user_msgs_to_client{user="{#TELEMT_USER}"}' - value - '' master_item: @@ -221,7 +221,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_octets_from_client{user=~"{#TELEMT_USER}"}' + - 'telemt_user_octets_from_client{user="{#TELEMT_USER}"}' - value - '' master_item: @@ -238,7 +238,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_octets_to_client{user=~"{#TELEMT_USER}"}' + - 'telemt_user_octets_to_client{user="{#TELEMT_USER}"}' - value - '' master_item: @@ -254,7 +254,7 @@ zabbix_export: preprocessing: - type: PROMETHEUS_PATTERN parameters: - - 'telemt_user_connections_total{user=~"{#TELEMT_USER}"}' + - 'telemt_user_connections_total{user="{#TELEMT_USER}"}' - value - '' master_item: From ee07325eba54d7f7132517f83c371861c0227e6d Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 21:12:44 +0300 Subject: [PATCH 33/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index fd1d892..6bce323 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.0.13" +version = "3.0.14" edition = "2024" [dependencies] From b00b87032b989fdc323714b4721a9a44507a386f Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Tue, 24 Feb 2026 22:10:49 +0300 Subject: [PATCH 34/98] Update config.toml --- config.toml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/config.toml b/config.toml index 48a9047..0bebe1d 100644 --- a/config.toml +++ b/config.toml @@ -52,6 +52,12 @@ hardswap = true # Enable C-like hard-swap for ME pool me_pool_drain_ttl_secs = 90 # Drain-TTL in seconds for stale ME writers after endpoint map changes. During TTL, stale writers may be used only as fallback for new bindings. me_pool_min_fresh_ratio = 0.8 # Minimum desired-DC coverage ratio required before draining stale writers. Range: 0.0..=1.0. me_reinit_drain_timeout_secs = 120 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). +me_config_stable_snapshots = 2 # Number of identical getProxyConfig snapshots required before applying ME map updates. +me_config_apply_cooldown_secs = 300 # Cooldown in seconds between applied ME map updates. +proxy_secret_rotate_runtime = true # Enable runtime proxy-secret rotation from getProxySecret. +proxy_secret_stable_snapshots = 2 # Number of identical getProxySecret snapshots required before runtime secret rotation. +proxy_secret_len_max = 256 # Maximum allowed proxy-secret length in bytes for startup and runtime refresh. + [general.modes] classic = false From 692d9476b99d09fb3ed5014a31dd4a10f286e52d Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Tue, 24 Feb 2026 22:11:15 +0300 Subject: [PATCH 35/98] Update config.toml --- config.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/config.toml b/config.toml index 0bebe1d..c1dc73f 100644 --- a/config.toml +++ b/config.toml @@ -58,7 +58,6 @@ proxy_secret_rotate_runtime = true # Enable runtime proxy-secret rotation proxy_secret_stable_snapshots = 2 # Number of identical getProxySecret snapshots required before runtime secret rotation. proxy_secret_len_max = 256 # Maximum allowed proxy-secret length in bytes for startup and runtime refresh. - [general.modes] classic = false secure = false From 4a95f6d1959dca06f5078fe8fba4c6cf377fe3fe Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 22:59:59 +0300 Subject: [PATCH 36/98] ME Pool Health + Rotation Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/config/defaults.rs | 4 + src/config/load.rs | 46 +++++ src/config/types.rs | 10 + src/main.rs | 24 +-- src/transport/middle_proxy/health.rs | 110 +++++++---- src/transport/middle_proxy/pool.rs | 241 +++++++++++++++++++++---- src/transport/middle_proxy/rotation.rs | 105 +++++++---- 7 files changed, 424 insertions(+), 116 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 6b80ede..4f563ba 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -182,6 +182,10 @@ pub(crate) fn default_update_every_secs() -> u64 { 30 * 60 } +pub(crate) fn default_me_reinit_every_secs() -> u64 { + 15 * 60 +} + pub(crate) fn default_me_config_stable_snapshots() -> u8 { 2 } diff --git a/src/config/load.rs b/src/config/load.rs index be34efa..c18c84f 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -147,6 +147,12 @@ impl ProxyConfig { } } + if config.general.me_reinit_every_secs == 0 { + return Err(ProxyError::Config( + "general.me_reinit_every_secs must be > 0".to_string(), + )); + } + if config.general.me_config_stable_snapshots == 0 { return Err(ProxyError::Config( "general.me_config_stable_snapshots must be > 0".to_string(), @@ -480,6 +486,46 @@ mod tests { let _ = std::fs::remove_file(path); } + #[test] + fn me_reinit_every_default_is_set() { + let toml = r#" + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_reinit_every_default_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!( + cfg.general.me_reinit_every_secs, + default_me_reinit_every_secs() + ); + let _ = std::fs::remove_file(path); + } + + #[test] + fn me_reinit_every_zero_is_rejected() { + let toml = r#" + [general] + me_reinit_every_secs = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_reinit_every_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_reinit_every_secs must be > 0")); + let _ = std::fs::remove_file(path); + } + #[test] fn me_config_stable_snapshots_zero_is_rejected() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index bd9697e..03417c5 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -267,6 +267,10 @@ pub struct GeneralConfig { #[serde(default)] pub update_every: Option, + /// Periodic ME pool reinitialization interval in seconds. + #[serde(default = "default_me_reinit_every_secs")] + pub me_reinit_every_secs: u64, + /// Number of identical getProxyConfig snapshots required before applying ME map updates. #[serde(default = "default_me_config_stable_snapshots")] pub me_config_stable_snapshots: u8, @@ -366,6 +370,7 @@ impl Default for GeneralConfig { hardswap: default_hardswap(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), update_every: Some(default_update_every_secs()), + me_reinit_every_secs: default_me_reinit_every_secs(), me_config_stable_snapshots: default_me_config_stable_snapshots(), me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(), proxy_secret_stable_snapshots: default_proxy_secret_stable_snapshots(), @@ -392,6 +397,11 @@ impl GeneralConfig { .unwrap_or_else(|| self.proxy_secret_auto_reload_secs.min(self.proxy_config_auto_reload_secs)) } + /// Resolve periodic zero-downtime reinit interval for ME writers. + pub fn effective_me_reinit_every_secs(&self) -> u64 { + self.me_reinit_every_secs + } + /// Resolve force-close timeout for stale writers. /// `me_reinit_drain_timeout_secs` remains backward-compatible alias. pub fn effective_me_pool_force_close_secs(&self) -> u64 { diff --git a/src/main.rs b/src/main.rs index 1c7b39c..d9a692d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -391,18 +391,6 @@ async fn main() -> std::result::Result<(), Box> { .await; }); - // Periodic ME connection rotation - let pool_clone_rot = pool.clone(); - let rng_clone_rot = rng.clone(); - tokio::spawn(async move { - crate::transport::middle_proxy::me_rotation_task( - pool_clone_rot, - rng_clone_rot, - std::time::Duration::from_secs(1800), - ) - .await; - }); - Some(pool) } Err(e) => { @@ -712,6 +700,18 @@ async fn main() -> std::result::Result<(), Box> { ) .await; }); + + let pool_clone_rot = pool.clone(); + let rng_clone_rot = rng.clone(); + let config_rx_clone_rot = config_rx.clone(); + tokio::spawn(async move { + crate::transport::middle_proxy::me_rotation_task( + pool_clone_rot, + rng_clone_rot, + config_rx_clone_rot, + ) + .await; + }); } let mut listeners = Vec::new(); diff --git a/src/transport/middle_proxy/health.rs b/src/transport/middle_proxy/health.rs index 4bb7e64..dde3354 100644 --- a/src/transport/middle_proxy/health.rs +++ b/src/transport/middle_proxy/health.rs @@ -1,10 +1,9 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use std::net::SocketAddr; use std::sync::Arc; use std::time::{Duration, Instant}; use tracing::{debug, info, warn}; -use rand::seq::SliceRandom; use rand::Rng; use crate::crypto::SecureRandom; @@ -64,31 +63,43 @@ async fn check_family( IpFamily::V4 => pool.proxy_map_v4.read().await.clone(), IpFamily::V6 => pool.proxy_map_v6.read().await.clone(), }; - let writer_addrs: HashSet = pool + + let mut dc_endpoints = HashMap::>::new(); + for (dc, addrs) in map { + let entry = dc_endpoints.entry(dc.abs()).or_default(); + for (ip, port) in addrs { + entry.push(SocketAddr::new(ip, port)); + } + } + for endpoints in dc_endpoints.values_mut() { + endpoints.sort_unstable(); + endpoints.dedup(); + } + + let mut live_addr_counts = HashMap::::new(); + for writer in pool .writers .read() .await .iter() .filter(|w| !w.draining.load(std::sync::atomic::Ordering::Relaxed)) - .map(|w| w.addr) - .collect(); + { + *live_addr_counts.entry(writer.addr).or_insert(0) += 1; + } - let entries: Vec<(i32, Vec)> = map - .iter() - .map(|(dc, addrs)| { - let list = addrs - .iter() - .map(|(ip, port)| SocketAddr::new(*ip, *port)) - .collect::>(); - (*dc, list) - }) - .collect(); - - for (dc, dc_addrs) in entries { - let has_coverage = dc_addrs.iter().any(|a| writer_addrs.contains(a)); - if has_coverage { + for (dc, endpoints) in dc_endpoints { + if endpoints.is_empty() { continue; } + let required = MePool::required_writers_for_dc(endpoints.len()); + let alive = endpoints + .iter() + .map(|addr| *live_addr_counts.get(addr).unwrap_or(&0)) + .sum::(); + if alive >= required { + continue; + } + let missing = required - alive; let key = (dc, family); let now = Instant::now(); @@ -104,32 +115,45 @@ async fn check_family( } *inflight.entry(key).or_insert(0) += 1; - let mut shuffled = dc_addrs.clone(); - shuffled.shuffle(&mut rand::rng()); - let mut success = false; - for addr in shuffled { - let res = tokio::time::timeout(pool.me_one_timeout, pool.connect_one(addr, rng.as_ref())).await; + let mut restored = 0usize; + for _ in 0..missing { + let res = tokio::time::timeout( + pool.me_one_timeout, + pool.connect_endpoints_round_robin(&endpoints, rng.as_ref()), + ) + .await; match res { - Ok(Ok(())) => { - info!(%addr, dc = %dc, ?family, "ME reconnected for DC coverage"); + Ok(true) => { + restored += 1; pool.stats.increment_me_reconnect_success(); - backoff.insert(key, pool.me_reconnect_backoff_base.as_millis() as u64); - let jitter = pool.me_reconnect_backoff_base.as_millis() as u64 / JITTER_FRAC_NUM; - let wait = pool.me_reconnect_backoff_base - + Duration::from_millis(rand::rng().random_range(0..=jitter.max(1))); - next_attempt.insert(key, now + wait); - success = true; - break; } - Ok(Err(e)) => { + Ok(false) => { pool.stats.increment_me_reconnect_attempt(); - debug!(%addr, dc = %dc, error = %e, ?family, "ME reconnect failed") + debug!(dc = %dc, ?family, "ME round-robin reconnect failed") + } + Err(_) => { + pool.stats.increment_me_reconnect_attempt(); + debug!(dc = %dc, ?family, "ME reconnect timed out"); } - Err(_) => debug!(%addr, dc = %dc, ?family, "ME reconnect timed out"), } } - if !success { - pool.stats.increment_me_reconnect_attempt(); + + let now_alive = alive + restored; + if now_alive >= required { + info!( + dc = %dc, + ?family, + alive = now_alive, + required, + endpoint_count = endpoints.len(), + "ME writer floor restored for DC" + ); + backoff.insert(key, pool.me_reconnect_backoff_base.as_millis() as u64); + let jitter = pool.me_reconnect_backoff_base.as_millis() as u64 / JITTER_FRAC_NUM; + let wait = pool.me_reconnect_backoff_base + + Duration::from_millis(rand::rng().random_range(0..=jitter.max(1))); + next_attempt.insert(key, now + wait); + } else { let curr = *backoff.get(&key).unwrap_or(&(pool.me_reconnect_backoff_base.as_millis() as u64)); let next_ms = (curr.saturating_mul(2)).min(pool.me_reconnect_backoff_cap.as_millis() as u64); backoff.insert(key, next_ms); @@ -137,7 +161,15 @@ async fn check_family( let wait = Duration::from_millis(next_ms) + Duration::from_millis(rand::rng().random_range(0..=jitter.max(1))); next_attempt.insert(key, now + wait); - warn!(dc = %dc, backoff_ms = next_ms, ?family, "DC has no ME coverage, scheduled reconnect"); + warn!( + dc = %dc, + ?family, + alive = now_alive, + required, + endpoint_count = endpoints.len(), + backoff_ms = next_ms, + "DC writer floor is below required level, scheduled reconnect" + ); } if let Some(v) = inflight.get_mut(&key) { *v = v.saturating_sub(1); diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 06fdc96..223d488 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -75,6 +75,7 @@ pub struct MePool { pub(super) rtt_stats: Arc>>, pub(super) nat_reflection_cache: Arc>, pub(super) writer_available: Arc, + pub(super) refill_inflight: Arc>>, pub(super) conn_count: AtomicUsize, pub(super) stats: Arc, pub(super) generation: AtomicU64, @@ -180,6 +181,7 @@ impl MePool { rtt_stats: Arc::new(Mutex::new(HashMap::new())), nat_reflection_cache: Arc::new(Mutex::new(NatReflectionCache::default())), writer_available: Arc::new(Notify::new()), + refill_inflight: Arc::new(Mutex::new(HashSet::new())), conn_count: AtomicUsize::new(0), generation: AtomicU64::new(1), hardswap: AtomicBool::new(hardswap), @@ -324,34 +326,66 @@ impl MePool { out } + pub(super) fn required_writers_for_dc(endpoint_count: usize) -> usize { + endpoint_count.max(3) + } + + pub(super) async fn connect_endpoints_round_robin( + self: &Arc, + endpoints: &[SocketAddr], + rng: &SecureRandom, + ) -> bool { + if endpoints.is_empty() { + return false; + } + let start = (self.rr.fetch_add(1, Ordering::Relaxed) as usize) % endpoints.len(); + for offset in 0..endpoints.len() { + let idx = (start + offset) % endpoints.len(); + let addr = endpoints[idx]; + match self.connect_one(addr, rng).await { + Ok(()) => return true, + Err(e) => debug!(%addr, error = %e, "ME connect failed during round-robin warmup"), + } + } + false + } + async fn warmup_generation_for_all_dcs( self: &Arc, rng: &SecureRandom, generation: u64, desired_by_dc: &HashMap>, ) { - for endpoints in desired_by_dc.values() { + for (dc, endpoints) in desired_by_dc { if endpoints.is_empty() { continue; } - let has_fresh = { - let ws = self.writers.read().await; - ws.iter().any(|w| { - !w.draining.load(Ordering::Relaxed) - && w.generation == generation - && endpoints.contains(&w.addr) - }) - }; + let mut endpoint_list: Vec = endpoints.iter().copied().collect(); + endpoint_list.sort_unstable(); + let required = Self::required_writers_for_dc(endpoint_list.len()); - if has_fresh { - continue; - } + loop { + let fresh_count = { + let ws = self.writers.read().await; + ws.iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .filter(|w| endpoints.contains(&w.addr)) + .count() + }; + if fresh_count >= required { + break; + } - let mut shuffled: Vec = endpoints.iter().copied().collect(); - shuffled.shuffle(&mut rand::rng()); - for addr in shuffled { - if self.connect_one(addr, rng).await.is_ok() { + if !self.connect_endpoints_round_robin(&endpoint_list, rng).await { + warn!( + dc = *dc, + fresh_count, + required, + endpoint_count = endpoint_list.len(), + "ME warmup stopped: unable to reach required writer floor for DC" + ); break; } } @@ -364,7 +398,7 @@ impl MePool { ) { let desired_by_dc = self.desired_dc_endpoints().await; if desired_by_dc.is_empty() { - warn!("ME endpoint map is empty after update; skipping stale writer drain"); + warn!("ME endpoint map is empty; skipping stale writer drain"); return; } @@ -403,19 +437,26 @@ impl MePool { } if hardswap { - let fresh_writer_addrs: HashSet = writers - .iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| w.generation == generation) - .map(|w| w.addr) - .collect(); - let (fresh_ratio, fresh_missing_dc) = - Self::coverage_ratio(&desired_by_dc, &fresh_writer_addrs); + let mut fresh_missing_dc = Vec::<(i32, usize, usize)>::new(); + for (dc, endpoints) in &desired_by_dc { + if endpoints.is_empty() { + continue; + } + let required = Self::required_writers_for_dc(endpoints.len()); + let fresh_count = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .filter(|w| endpoints.contains(&w.addr)) + .count(); + if fresh_count < required { + fresh_missing_dc.push((*dc, fresh_count, required)); + } + } if !fresh_missing_dc.is_empty() { warn!( previous_generation, generation, - fresh_ratio = format_args!("{fresh_ratio:.3}"), missing_dc = ?fresh_missing_dc, "ME hardswap pending: fresh generation coverage incomplete" ); @@ -425,7 +466,7 @@ impl MePool { warn!( missing_dc = ?missing_dc, // Keep stale writers alive when fresh coverage is incomplete. - "ME reinit coverage incomplete after map update; keeping stale writers" + "ME reinit coverage incomplete; keeping stale writers" ); return; } @@ -450,7 +491,7 @@ impl MePool { drop(writers); if stale_writer_ids.is_empty() { - debug!("ME map update completed with no stale writers"); + debug!("ME reinit cycle completed with no stale writers"); return; } @@ -464,7 +505,7 @@ impl MePool { coverage_ratio = format_args!("{coverage_ratio:.3}"), min_ratio = format_args!("{min_ratio:.3}"), drain_timeout_secs, - "ME map update covered; draining stale writers" + "ME reinit cycle covered; draining stale writers" ); self.stats.increment_pool_swap_total(); for writer_id in stale_writer_ids { @@ -473,6 +514,134 @@ impl MePool { } } + pub async fn zero_downtime_reinit_periodic( + self: &Arc, + rng: &SecureRandom, + ) { + self.zero_downtime_reinit_after_map_change(rng).await; + } + + async fn endpoints_for_same_dc(&self, addr: SocketAddr) -> Vec { + let mut target_dc = HashSet::::new(); + let mut endpoints = HashSet::::new(); + + if self.decision.ipv4_me { + let map = self.proxy_map_v4.read().await.clone(); + for (dc, addrs) in &map { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + target_dc.insert(dc.abs()); + } + } + for dc in &target_dc { + for key in [*dc, -*dc] { + if let Some(addrs) = map.get(&key) { + for (ip, port) in addrs { + endpoints.insert(SocketAddr::new(*ip, *port)); + } + } + } + } + } + + if self.decision.ipv6_me { + let map = self.proxy_map_v6.read().await.clone(); + for (dc, addrs) in &map { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + target_dc.insert(dc.abs()); + } + } + for dc in &target_dc { + for key in [*dc, -*dc] { + if let Some(addrs) = map.get(&key) { + for (ip, port) in addrs { + endpoints.insert(SocketAddr::new(*ip, *port)); + } + } + } + } + } + + let mut sorted: Vec = endpoints.into_iter().collect(); + sorted.sort_unstable(); + sorted + } + + async fn refill_writer_after_loss(self: &Arc, addr: SocketAddr) -> bool { + let fast_retries = self.me_reconnect_fast_retry_count.max(1); + + for attempt in 0..fast_retries { + self.stats.increment_me_reconnect_attempt(); + match self.connect_one(addr, self.rng.as_ref()).await { + Ok(()) => { + self.stats.increment_me_reconnect_success(); + info!( + %addr, + attempt = attempt + 1, + "ME writer restored on the same endpoint" + ); + return true; + } + Err(e) => { + debug!( + %addr, + attempt = attempt + 1, + error = %e, + "ME immediate same-endpoint reconnect failed" + ); + } + } + } + + let dc_endpoints = self.endpoints_for_same_dc(addr).await; + if dc_endpoints.is_empty() { + return false; + } + + for attempt in 0..fast_retries { + self.stats.increment_me_reconnect_attempt(); + if self + .connect_endpoints_round_robin(&dc_endpoints, self.rng.as_ref()) + .await + { + self.stats.increment_me_reconnect_success(); + info!( + %addr, + attempt = attempt + 1, + "ME writer restored via DC fallback endpoint" + ); + return true; + } + } + + false + } + + pub(crate) fn trigger_immediate_refill(self: &Arc, addr: SocketAddr) { + let pool = Arc::clone(self); + tokio::spawn(async move { + { + let mut guard = pool.refill_inflight.lock().await; + if !guard.insert(addr) { + return; + } + } + + let restored = pool.refill_writer_after_loss(addr).await; + if !restored { + warn!(%addr, "ME immediate refill failed"); + } + + let mut guard = pool.refill_inflight.lock().await; + guard.remove(&addr); + }); + } + pub async fn update_proxy_maps( &self, new_v4: HashMap>, @@ -880,16 +1049,21 @@ impl MePool { } } - async fn remove_writer_only(&self, writer_id: u64) -> Vec { + async fn remove_writer_only(self: &Arc, writer_id: u64) -> Vec { let mut close_tx: Option> = None; + let mut removed_addr: Option = None; + let mut trigger_refill = false; { let mut ws = self.writers.write().await; if let Some(pos) = ws.iter().position(|w| w.id == writer_id) { let w = ws.remove(pos); - if w.draining.load(Ordering::Relaxed) { + let was_draining = w.draining.load(Ordering::Relaxed); + if was_draining { self.stats.decrement_pool_drain_active(); } w.cancel.cancel(); + removed_addr = Some(w.addr); + trigger_refill = !was_draining; close_tx = Some(w.tx.clone()); self.conn_count.fetch_sub(1, Ordering::Relaxed); } @@ -897,6 +1071,11 @@ impl MePool { if let Some(tx) = close_tx { let _ = tx.send(WriterCommand::Close).await; } + if trigger_refill + && let Some(addr) = removed_addr + { + self.trigger_immediate_refill(addr); + } self.rtt_stats.lock().await.remove(&writer_id); self.registry.writer_lost(writer_id).await } diff --git a/src/transport/middle_proxy/rotation.rs b/src/transport/middle_proxy/rotation.rs index e141fc4..cf5f70d 100644 --- a/src/transport/middle_proxy/rotation.rs +++ b/src/transport/middle_proxy/rotation.rs @@ -1,50 +1,87 @@ use std::sync::Arc; -use std::sync::atomic::Ordering; use std::time::Duration; +use tokio::sync::watch; use tracing::{info, warn}; +use crate::config::ProxyConfig; use crate::crypto::SecureRandom; use super::MePool; -/// Periodically refresh ME connections to avoid long-lived degradation. -pub async fn me_rotation_task(pool: Arc, rng: Arc, interval: Duration) { - let interval = interval.max(Duration::from_secs(600)); +/// Periodically reinitialize ME generations and swap them after full warmup. +pub async fn me_rotation_task( + pool: Arc, + rng: Arc, + mut config_rx: watch::Receiver>, +) { + let mut interval_secs = config_rx + .borrow() + .general + .effective_me_reinit_every_secs() + .max(1); + let mut interval = Duration::from_secs(interval_secs); + let mut next_tick = tokio::time::Instant::now() + interval; + + info!(interval_secs, "ME periodic reinit task started"); + loop { - tokio::time::sleep(interval).await; + let sleep = tokio::time::sleep_until(next_tick); + tokio::pin!(sleep); - let candidate = { - let ws = pool.writers.read().await; - if ws.is_empty() { - None - } else { - let idx = (pool.rr.load(std::sync::atomic::Ordering::Relaxed) as usize) % ws.len(); - ws.get(idx).cloned() - } - }; - - let Some(w) = candidate else { - continue; - }; - - info!(addr = %w.addr, writer_id = w.id, "Rotating ME connection"); - match pool.connect_one(w.addr, rng.as_ref()).await { - Ok(()) => { - tokio::time::sleep(Duration::from_secs(2)).await; - let ws = pool.writers.read().await; - let new_alive = ws.iter().any(|nw| - nw.id != w.id && nw.addr == w.addr && !nw.degraded.load(Ordering::Relaxed) && !nw.draining.load(Ordering::Relaxed) - ); - drop(ws); - if new_alive { - pool.mark_writer_draining(w.id).await; - } else { - warn!(addr = %w.addr, writer_id = w.id, "New writer died, keeping old"); + tokio::select! { + _ = &mut sleep => { + pool.zero_downtime_reinit_periodic(rng.as_ref()).await; + let refreshed_secs = config_rx + .borrow() + .general + .effective_me_reinit_every_secs() + .max(1); + if refreshed_secs != interval_secs { + info!( + old_me_reinit_every_secs = interval_secs, + new_me_reinit_every_secs = refreshed_secs, + "ME periodic reinit interval changed" + ); + interval_secs = refreshed_secs; + interval = Duration::from_secs(interval_secs); } + next_tick = tokio::time::Instant::now() + interval; } - Err(e) => { - warn!(addr = %w.addr, writer_id = w.id, error = %e, "ME rotation connect failed"); + changed = config_rx.changed() => { + if changed.is_err() { + warn!("ME periodic reinit task stopped: config channel closed"); + break; + } + let new_secs = config_rx + .borrow() + .general + .effective_me_reinit_every_secs() + .max(1); + if new_secs == interval_secs { + continue; + } + + if new_secs < interval_secs { + info!( + old_me_reinit_every_secs = interval_secs, + new_me_reinit_every_secs = new_secs, + "ME periodic reinit interval decreased, running immediate reinit" + ); + interval_secs = new_secs; + interval = Duration::from_secs(interval_secs); + pool.zero_downtime_reinit_periodic(rng.as_ref()).await; + next_tick = tokio::time::Instant::now() + interval; + } else { + info!( + old_me_reinit_every_secs = interval_secs, + new_me_reinit_every_secs = new_secs, + "ME periodic reinit interval increased" + ); + interval_secs = new_secs; + interval = Duration::from_secs(interval_secs); + next_tick = tokio::time::Instant::now() + interval; + } } } } From 7538967d3c4b56b4d122c7839e35d8cb25b0d4d0 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Tue, 24 Feb 2026 23:36:33 +0300 Subject: [PATCH 37/98] ME Hardswap being softer Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/config/defaults.rs | 16 ++ src/config/load.rs | 141 +++++++++++++++++ src/config/types.rs | 20 +++ src/main.rs | 4 + src/transport/middle_proxy/config_updater.rs | 8 + src/transport/middle_proxy/pool.rs | 158 +++++++++++++++++-- 6 files changed, 332 insertions(+), 15 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 4f563ba..d43ace9 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -186,6 +186,22 @@ pub(crate) fn default_me_reinit_every_secs() -> u64 { 15 * 60 } +pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 { + 1000 +} + +pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 { + 2000 +} + +pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 { + 3 +} + +pub(crate) fn default_me_hardswap_warmup_pass_backoff_base_ms() -> u64 { + 500 +} + pub(crate) fn default_me_config_stable_snapshots() -> u8 { 2 } diff --git a/src/config/load.rs b/src/config/load.rs index c18c84f..5698a71 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -153,6 +153,32 @@ impl ProxyConfig { )); } + if config.general.me_hardswap_warmup_delay_max_ms == 0 { + return Err(ProxyError::Config( + "general.me_hardswap_warmup_delay_max_ms must be > 0".to_string(), + )); + } + + if config.general.me_hardswap_warmup_delay_min_ms + > config.general.me_hardswap_warmup_delay_max_ms + { + return Err(ProxyError::Config( + "general.me_hardswap_warmup_delay_min_ms must be <= general.me_hardswap_warmup_delay_max_ms".to_string(), + )); + } + + if config.general.me_hardswap_warmup_extra_passes > 10 { + return Err(ProxyError::Config( + "general.me_hardswap_warmup_extra_passes must be within [0, 10]".to_string(), + )); + } + + if config.general.me_hardswap_warmup_pass_backoff_base_ms == 0 { + return Err(ProxyError::Config( + "general.me_hardswap_warmup_pass_backoff_base_ms must be > 0".to_string(), + )); + } + if config.general.me_config_stable_snapshots == 0 { return Err(ProxyError::Config( "general.me_config_stable_snapshots must be > 0".to_string(), @@ -526,6 +552,121 @@ mod tests { let _ = std::fs::remove_file(path); } + #[test] + fn me_hardswap_warmup_defaults_are_set() { + let toml = r#" + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_hardswap_warmup_defaults_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!( + cfg.general.me_hardswap_warmup_delay_min_ms, + default_me_hardswap_warmup_delay_min_ms() + ); + assert_eq!( + cfg.general.me_hardswap_warmup_delay_max_ms, + default_me_hardswap_warmup_delay_max_ms() + ); + assert_eq!( + cfg.general.me_hardswap_warmup_extra_passes, + default_me_hardswap_warmup_extra_passes() + ); + assert_eq!( + cfg.general.me_hardswap_warmup_pass_backoff_base_ms, + default_me_hardswap_warmup_pass_backoff_base_ms() + ); + let _ = std::fs::remove_file(path); + } + + #[test] + fn me_hardswap_warmup_delay_range_is_validated() { + let toml = r#" + [general] + me_hardswap_warmup_delay_min_ms = 2001 + me_hardswap_warmup_delay_max_ms = 2000 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_hardswap_warmup_delay_range_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains( + "general.me_hardswap_warmup_delay_min_ms must be <= general.me_hardswap_warmup_delay_max_ms" + )); + let _ = std::fs::remove_file(path); + } + + #[test] + fn me_hardswap_warmup_delay_max_zero_is_rejected() { + let toml = r#" + [general] + me_hardswap_warmup_delay_max_ms = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_hardswap_warmup_delay_max_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_hardswap_warmup_delay_max_ms must be > 0")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn me_hardswap_warmup_extra_passes_out_of_range_is_rejected() { + let toml = r#" + [general] + me_hardswap_warmup_extra_passes = 11 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_hardswap_warmup_extra_passes_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_hardswap_warmup_extra_passes must be within [0, 10]")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn me_hardswap_warmup_pass_backoff_zero_is_rejected() { + let toml = r#" + [general] + me_hardswap_warmup_pass_backoff_base_ms = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_me_hardswap_warmup_backoff_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.me_hardswap_warmup_pass_backoff_base_ms must be > 0")); + let _ = std::fs::remove_file(path); + } + #[test] fn me_config_stable_snapshots_zero_is_rejected() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index 03417c5..0cda9f4 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -271,6 +271,22 @@ pub struct GeneralConfig { #[serde(default = "default_me_reinit_every_secs")] pub me_reinit_every_secs: u64, + /// Minimum delay in ms between hardswap warmup connect attempts. + #[serde(default = "default_me_hardswap_warmup_delay_min_ms")] + pub me_hardswap_warmup_delay_min_ms: u64, + + /// Maximum delay in ms between hardswap warmup connect attempts. + #[serde(default = "default_me_hardswap_warmup_delay_max_ms")] + pub me_hardswap_warmup_delay_max_ms: u64, + + /// Additional warmup passes in the same hardswap cycle after the base pass. + #[serde(default = "default_me_hardswap_warmup_extra_passes")] + pub me_hardswap_warmup_extra_passes: u8, + + /// Base backoff in ms between hardswap warmup passes when floor is still incomplete. + #[serde(default = "default_me_hardswap_warmup_pass_backoff_base_ms")] + pub me_hardswap_warmup_pass_backoff_base_ms: u64, + /// Number of identical getProxyConfig snapshots required before applying ME map updates. #[serde(default = "default_me_config_stable_snapshots")] pub me_config_stable_snapshots: u8, @@ -371,6 +387,10 @@ impl Default for GeneralConfig { fast_mode_min_tls_record: default_fast_mode_min_tls_record(), update_every: Some(default_update_every_secs()), me_reinit_every_secs: default_me_reinit_every_secs(), + me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(), + me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(), + me_hardswap_warmup_extra_passes: default_me_hardswap_warmup_extra_passes(), + me_hardswap_warmup_pass_backoff_base_ms: default_me_hardswap_warmup_pass_backoff_base_ms(), me_config_stable_snapshots: default_me_config_stable_snapshots(), me_config_apply_cooldown_secs: default_me_config_apply_cooldown_secs(), proxy_secret_stable_snapshots: default_proxy_secret_stable_snapshots(), diff --git a/src/main.rs b/src/main.rs index d9a692d..3bcbf3e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -373,6 +373,10 @@ async fn main() -> std::result::Result<(), Box> { config.general.me_pool_drain_ttl_secs, config.general.effective_me_pool_force_close_secs(), config.general.me_pool_min_fresh_ratio, + config.general.me_hardswap_warmup_delay_min_ms, + config.general.me_hardswap_warmup_delay_max_ms, + config.general.me_hardswap_warmup_extra_passes, + config.general.me_hardswap_warmup_pass_backoff_base_ms, ); let pool_size = config.general.middle_proxy_pool_size.max(1); diff --git a/src/transport/middle_proxy/config_updater.rs b/src/transport/middle_proxy/config_updater.rs index fc9ed3d..4e8e63f 100644 --- a/src/transport/middle_proxy/config_updater.rs +++ b/src/transport/middle_proxy/config_updater.rs @@ -228,6 +228,10 @@ async fn run_update_cycle( cfg.general.me_pool_drain_ttl_secs, cfg.general.effective_me_pool_force_close_secs(), cfg.general.me_pool_min_fresh_ratio, + cfg.general.me_hardswap_warmup_delay_min_ms, + cfg.general.me_hardswap_warmup_delay_max_ms, + cfg.general.me_hardswap_warmup_extra_passes, + cfg.general.me_hardswap_warmup_pass_backoff_base_ms, ); let required_cfg_snapshots = cfg.general.me_config_stable_snapshots.max(1); @@ -407,6 +411,10 @@ pub async fn me_config_updater( cfg.general.me_pool_drain_ttl_secs, cfg.general.effective_me_pool_force_close_secs(), cfg.general.me_pool_min_fresh_ratio, + cfg.general.me_hardswap_warmup_delay_min_ms, + cfg.general.me_hardswap_warmup_delay_max_ms, + cfg.general.me_hardswap_warmup_extra_passes, + cfg.general.me_hardswap_warmup_pass_backoff_base_ms, ); let new_secs = cfg.general.effective_update_every_secs().max(1); if new_secs == update_every_secs { diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 223d488..aa14e5b 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -83,6 +83,10 @@ pub struct MePool { pub(super) me_pool_drain_ttl_secs: AtomicU64, pub(super) me_pool_force_close_secs: AtomicU64, pub(super) me_pool_min_fresh_ratio_permille: AtomicU32, + pub(super) me_hardswap_warmup_delay_min_ms: AtomicU64, + pub(super) me_hardswap_warmup_delay_max_ms: AtomicU64, + pub(super) me_hardswap_warmup_extra_passes: AtomicU32, + pub(super) me_hardswap_warmup_pass_backoff_base_ms: AtomicU64, pool_size: usize, } @@ -140,6 +144,10 @@ impl MePool { me_pool_drain_ttl_secs: u64, me_pool_force_close_secs: u64, me_pool_min_fresh_ratio: f32, + me_hardswap_warmup_delay_min_ms: u64, + me_hardswap_warmup_delay_max_ms: u64, + me_hardswap_warmup_extra_passes: u8, + me_hardswap_warmup_pass_backoff_base_ms: u64, ) -> Arc { Arc::new(Self { registry: Arc::new(ConnRegistry::new()), @@ -188,6 +196,10 @@ impl MePool { me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs), me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs), me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(me_pool_min_fresh_ratio)), + me_hardswap_warmup_delay_min_ms: AtomicU64::new(me_hardswap_warmup_delay_min_ms), + me_hardswap_warmup_delay_max_ms: AtomicU64::new(me_hardswap_warmup_delay_max_ms), + me_hardswap_warmup_extra_passes: AtomicU32::new(me_hardswap_warmup_extra_passes as u32), + me_hardswap_warmup_pass_backoff_base_ms: AtomicU64::new(me_hardswap_warmup_pass_backoff_base_ms), }) } @@ -205,6 +217,10 @@ impl MePool { drain_ttl_secs: u64, force_close_secs: u64, min_fresh_ratio: f32, + hardswap_warmup_delay_min_ms: u64, + hardswap_warmup_delay_max_ms: u64, + hardswap_warmup_extra_passes: u8, + hardswap_warmup_pass_backoff_base_ms: u64, ) { self.hardswap.store(hardswap, Ordering::Relaxed); self.me_pool_drain_ttl_secs.store(drain_ttl_secs, Ordering::Relaxed); @@ -212,6 +228,14 @@ impl MePool { .store(force_close_secs, Ordering::Relaxed); self.me_pool_min_fresh_ratio_permille .store(Self::ratio_to_permille(min_fresh_ratio), Ordering::Relaxed); + self.me_hardswap_warmup_delay_min_ms + .store(hardswap_warmup_delay_min_ms, Ordering::Relaxed); + self.me_hardswap_warmup_delay_max_ms + .store(hardswap_warmup_delay_max_ms, Ordering::Relaxed); + self.me_hardswap_warmup_extra_passes + .store(hardswap_warmup_extra_passes as u32, Ordering::Relaxed); + self.me_hardswap_warmup_pass_backoff_base_ms + .store(hardswap_warmup_pass_backoff_base_ms, Ordering::Relaxed); } pub fn reset_stun_state(&self) { @@ -330,6 +354,49 @@ impl MePool { endpoint_count.max(3) } + fn hardswap_warmup_connect_delay_ms(&self) -> u64 { + let min_ms = self + .me_hardswap_warmup_delay_min_ms + .load(Ordering::Relaxed); + let max_ms = self + .me_hardswap_warmup_delay_max_ms + .load(Ordering::Relaxed); + let (min_ms, max_ms) = if min_ms <= max_ms { + (min_ms, max_ms) + } else { + (max_ms, min_ms) + }; + if min_ms == max_ms { + return min_ms; + } + rand::rng().random_range(min_ms..=max_ms) + } + + fn hardswap_warmup_backoff_ms(&self, pass_idx: usize) -> u64 { + let base_ms = self + .me_hardswap_warmup_pass_backoff_base_ms + .load(Ordering::Relaxed); + let cap_ms = (self.me_reconnect_backoff_cap.as_millis() as u64).max(base_ms); + let shift = (pass_idx as u32).min(20); + let scaled = base_ms.saturating_mul(1u64 << shift); + let core = scaled.min(cap_ms); + let jitter = (core / 2).max(1); + core.saturating_add(rand::rng().random_range(0..=jitter)) + } + + async fn fresh_writer_count_for_endpoints( + &self, + generation: u64, + endpoints: &HashSet, + ) -> usize { + let ws = self.writers.read().await; + ws.iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .filter(|w| endpoints.contains(&w.addr)) + .count() + } + pub(super) async fn connect_endpoints_round_robin( self: &Arc, endpoints: &[SocketAddr], @@ -356,6 +423,12 @@ impl MePool { generation: u64, desired_by_dc: &HashMap>, ) { + let extra_passes = self + .me_hardswap_warmup_extra_passes + .load(Ordering::Relaxed) + .min(10) as usize; + let total_passes = 1 + extra_passes; + for (dc, endpoints) in desired_by_dc { if endpoints.is_empty() { continue; @@ -364,30 +437,85 @@ impl MePool { let mut endpoint_list: Vec = endpoints.iter().copied().collect(); endpoint_list.sort_unstable(); let required = Self::required_writers_for_dc(endpoint_list.len()); + let mut completed = false; + let mut last_fresh_count = self + .fresh_writer_count_for_endpoints(generation, endpoints) + .await; - loop { - let fresh_count = { - let ws = self.writers.read().await; - ws.iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| w.generation == generation) - .filter(|w| endpoints.contains(&w.addr)) - .count() - }; - if fresh_count >= required { + for pass_idx in 0..total_passes { + if last_fresh_count >= required { + completed = true; break; } - if !self.connect_endpoints_round_robin(&endpoint_list, rng).await { - warn!( + let missing = required.saturating_sub(last_fresh_count); + debug!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, + required, + missing, + endpoint_count = endpoint_list.len(), + "ME hardswap warmup pass started" + ); + + for attempt_idx in 0..missing { + let delay_ms = self.hardswap_warmup_connect_delay_ms(); + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + + let connected = self.connect_endpoints_round_robin(&endpoint_list, rng).await; + debug!( dc = *dc, - fresh_count, + pass = pass_idx + 1, + total_passes, + attempt = attempt_idx + 1, + delay_ms, + connected, + "ME hardswap warmup connect attempt finished" + ); + } + + last_fresh_count = self + .fresh_writer_count_for_endpoints(generation, endpoints) + .await; + if last_fresh_count >= required { + completed = true; + info!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, required, - endpoint_count = endpoint_list.len(), - "ME warmup stopped: unable to reach required writer floor for DC" + "ME hardswap warmup floor reached for DC" ); break; } + + if pass_idx + 1 < total_passes { + let backoff_ms = self.hardswap_warmup_backoff_ms(pass_idx); + debug!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, + required, + backoff_ms, + "ME hardswap warmup pass incomplete, delaying next pass" + ); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + } + } + + if !completed { + warn!( + dc = *dc, + fresh_count = last_fresh_count, + required, + endpoint_count = endpoint_list.len(), + total_passes, + "ME warmup stopped: unable to reach required writer floor for DC" + ); } } } From 25ab79406ffd629961806a17ca6a48626d1f6b36 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 00:28:26 +0300 Subject: [PATCH 38/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 6bce323..994e11f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.0.14" +version = "3.1.0" edition = "2024" [dependencies] From 866c2fbd96b048ead4dde3688270d0c8785963ea Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 00:29:58 +0300 Subject: [PATCH 39/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 994e11f..b6ef28d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.1.0" +version = "3.0.15" edition = "2024" [dependencies] From e9a42810157f5d84456ab45e3f0000ec0d0f0c4e Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 00:31:12 +0300 Subject: [PATCH 40/98] Delete proxy-secret Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- proxy-secret | 1 - 1 file changed, 1 deletion(-) delete mode 100644 proxy-secret diff --git a/proxy-secret b/proxy-secret deleted file mode 100644 index ef77163..0000000 --- a/proxy-secret +++ /dev/null @@ -1 +0,0 @@ -ʖxHl~,D0d]UJUAM'!FnRZD>ϳF>yZfa*ߜڋ o8zM:dq>\3w}n\TĐy'VIil&] \ No newline at end of file From c6c3d71b08954cc0279fd23d40a7fec08a01c072 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 01:26:01 +0300 Subject: [PATCH 41/98] ME Pool Flap-Detect in statistics --- src/metrics.rs | 93 ++++++++++++++++++++++++++++++ src/stats/mod.rs | 49 ++++++++++++++++ src/transport/middle_proxy/pool.rs | 10 ++++ 3 files changed, 152 insertions(+) diff --git a/src/metrics.rs b/src/metrics.rs index 53ddd5d..0051858 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -199,6 +199,95 @@ fn render_metrics(stats: &Stats) -> String { stats.get_pool_stale_pick_total() ); + let _ = writeln!(out, "# HELP telemt_me_writer_removed_total Total ME writer removals"); + let _ = writeln!(out, "# TYPE telemt_me_writer_removed_total counter"); + let _ = writeln!( + out, + "telemt_me_writer_removed_total {}", + stats.get_me_writer_removed_total() + ); + + let _ = writeln!( + out, + "# HELP telemt_me_writer_removed_unexpected_total Unexpected ME writer removals that triggered refill" + ); + let _ = writeln!(out, "# TYPE telemt_me_writer_removed_unexpected_total counter"); + let _ = writeln!( + out, + "telemt_me_writer_removed_unexpected_total {}", + stats.get_me_writer_removed_unexpected_total() + ); + + let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started"); + let _ = writeln!(out, "# TYPE telemt_me_refill_triggered_total counter"); + let _ = writeln!( + out, + "telemt_me_refill_triggered_total {}", + stats.get_me_refill_triggered_total() + ); + + let _ = writeln!( + out, + "# HELP telemt_me_refill_skipped_inflight_total Immediate ME refill skips due to inflight dedup" + ); + let _ = writeln!(out, "# TYPE telemt_me_refill_skipped_inflight_total counter"); + let _ = writeln!( + out, + "telemt_me_refill_skipped_inflight_total {}", + stats.get_me_refill_skipped_inflight_total() + ); + + let _ = writeln!(out, "# HELP telemt_me_refill_failed_total Immediate ME refill failures"); + let _ = writeln!(out, "# TYPE telemt_me_refill_failed_total counter"); + let _ = writeln!( + out, + "telemt_me_refill_failed_total {}", + stats.get_me_refill_failed_total() + ); + + let _ = writeln!( + out, + "# HELP telemt_me_writer_restored_same_endpoint_total Refilled ME writer restored on the same endpoint" + ); + let _ = writeln!(out, "# TYPE telemt_me_writer_restored_same_endpoint_total counter"); + let _ = writeln!( + out, + "telemt_me_writer_restored_same_endpoint_total {}", + stats.get_me_writer_restored_same_endpoint_total() + ); + + let _ = writeln!( + out, + "# HELP telemt_me_writer_restored_fallback_total Refilled ME writer restored via fallback endpoint" + ); + let _ = writeln!(out, "# TYPE telemt_me_writer_restored_fallback_total counter"); + let _ = writeln!( + out, + "telemt_me_writer_restored_fallback_total {}", + stats.get_me_writer_restored_fallback_total() + ); + + let unresolved_writer_losses = stats + .get_me_writer_removed_unexpected_total() + .saturating_sub( + stats + .get_me_writer_restored_same_endpoint_total() + .saturating_add(stats.get_me_writer_restored_fallback_total()), + ); + let _ = writeln!( + out, + "# HELP telemt_me_writer_removed_unexpected_minus_restored_total Unexpected writer removals not yet compensated by restore" + ); + let _ = writeln!( + out, + "# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge" + ); + let _ = writeln!( + out, + "telemt_me_writer_removed_unexpected_minus_restored_total {}", + unresolved_writer_losses + ); + let _ = writeln!(out, "# HELP telemt_user_connections_total Per-user total connections"); let _ = writeln!(out, "# TYPE telemt_user_connections_total counter"); let _ = writeln!(out, "# HELP telemt_user_connections_current Per-user active connections"); @@ -277,6 +366,10 @@ mod tests { assert!(output.contains("# TYPE telemt_connections_total counter")); assert!(output.contains("# TYPE telemt_connections_bad_total counter")); assert!(output.contains("# TYPE telemt_handshake_timeouts_total counter")); + assert!(output.contains("# TYPE telemt_me_writer_removed_total counter")); + assert!(output.contains( + "# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge" + )); } #[tokio::test] diff --git a/src/stats/mod.rs b/src/stats/mod.rs index a58996d..5f4c98e 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -43,6 +43,13 @@ pub struct Stats { pool_drain_active: AtomicU64, pool_force_close_total: AtomicU64, pool_stale_pick_total: AtomicU64, + me_writer_removed_total: AtomicU64, + me_writer_removed_unexpected_total: AtomicU64, + me_refill_triggered_total: AtomicU64, + me_refill_skipped_inflight_total: AtomicU64, + me_refill_failed_total: AtomicU64, + me_writer_restored_same_endpoint_total: AtomicU64, + me_writer_restored_fallback_total: AtomicU64, user_stats: DashMap, start_time: parking_lot::RwLock>, } @@ -142,6 +149,27 @@ impl Stats { pub fn increment_pool_stale_pick_total(&self) { self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed); } + pub fn increment_me_writer_removed_total(&self) { + self.me_writer_removed_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_writer_removed_unexpected_total(&self) { + self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_refill_triggered_total(&self) { + self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_refill_skipped_inflight_total(&self) { + self.me_refill_skipped_inflight_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_refill_failed_total(&self) { + self.me_refill_failed_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_writer_restored_same_endpoint_total(&self) { + self.me_writer_restored_same_endpoint_total.fetch_add(1, Ordering::Relaxed); + } + pub fn increment_me_writer_restored_fallback_total(&self) { + self.me_writer_restored_fallback_total.fetch_add(1, Ordering::Relaxed); + } pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) } pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) } pub fn get_me_keepalive_sent(&self) -> u64 { self.me_keepalive_sent.load(Ordering::Relaxed) } @@ -195,6 +223,27 @@ impl Stats { pub fn get_pool_stale_pick_total(&self) -> u64 { self.pool_stale_pick_total.load(Ordering::Relaxed) } + pub fn get_me_writer_removed_total(&self) -> u64 { + self.me_writer_removed_total.load(Ordering::Relaxed) + } + pub fn get_me_writer_removed_unexpected_total(&self) -> u64 { + self.me_writer_removed_unexpected_total.load(Ordering::Relaxed) + } + pub fn get_me_refill_triggered_total(&self) -> u64 { + self.me_refill_triggered_total.load(Ordering::Relaxed) + } + pub fn get_me_refill_skipped_inflight_total(&self) -> u64 { + self.me_refill_skipped_inflight_total.load(Ordering::Relaxed) + } + pub fn get_me_refill_failed_total(&self) -> u64 { + self.me_refill_failed_total.load(Ordering::Relaxed) + } + pub fn get_me_writer_restored_same_endpoint_total(&self) -> u64 { + self.me_writer_restored_same_endpoint_total.load(Ordering::Relaxed) + } + pub fn get_me_writer_restored_fallback_total(&self) -> u64 { + self.me_writer_restored_fallback_total.load(Ordering::Relaxed) + } pub fn increment_user_connects(&self, user: &str) { self.user_stats.entry(user.to_string()).or_default() diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index aa14e5b..e5aebe4 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -708,6 +708,7 @@ impl MePool { match self.connect_one(addr, self.rng.as_ref()).await { Ok(()) => { self.stats.increment_me_reconnect_success(); + self.stats.increment_me_writer_restored_same_endpoint_total(); info!( %addr, attempt = attempt + 1, @@ -728,6 +729,7 @@ impl MePool { let dc_endpoints = self.endpoints_for_same_dc(addr).await; if dc_endpoints.is_empty() { + self.stats.increment_me_refill_failed_total(); return false; } @@ -738,6 +740,7 @@ impl MePool { .await { self.stats.increment_me_reconnect_success(); + self.stats.increment_me_writer_restored_fallback_total(); info!( %addr, attempt = attempt + 1, @@ -747,6 +750,7 @@ impl MePool { } } + self.stats.increment_me_refill_failed_total(); false } @@ -756,9 +760,11 @@ impl MePool { { let mut guard = pool.refill_inflight.lock().await; if !guard.insert(addr) { + pool.stats.increment_me_refill_skipped_inflight_total(); return; } } + pool.stats.increment_me_refill_triggered_total(); let restored = pool.refill_writer_after_loss(addr).await; if !restored { @@ -1189,9 +1195,13 @@ impl MePool { if was_draining { self.stats.decrement_pool_drain_active(); } + self.stats.increment_me_writer_removed_total(); w.cancel.cancel(); removed_addr = Some(w.addr); trigger_refill = !was_draining; + if trigger_refill { + self.stats.increment_me_writer_removed_unexpected_total(); + } close_tx = Some(w.tx.clone()); self.conn_count.fetch_sub(1, Ordering::Relaxed); } From 53ec96b04029c7e2406e52095ab5317662a2ad80 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Wed, 25 Feb 2026 01:37:55 +0300 Subject: [PATCH 42/98] Update config.toml --- config.toml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/config.toml b/config.toml index c1dc73f..f3df049 100644 --- a/config.toml +++ b/config.toml @@ -49,6 +49,10 @@ desync_all_full = false # Emit full crypto-desync forensic log auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. hardswap = true # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. +default_me_hardswap_warmup_delay_min_ms = 1000 # Minimum delay in ms between hardswap warmup connect attempts. +default_me_hardswap_warmup_delay_max_ms = 2000 # Maximum delay in ms between hardswap warmup connect attempts. +default_me_hardswap_warmup_extra_passes = 3 # Additional warmup passes in the same hardswap cycle after the base pass. +default_me_hardswap_warmup_pass_backoff_base_ms = 500 # Base backoff in ms between hardswap warmup passes when floor is still incomplete. me_pool_drain_ttl_secs = 90 # Drain-TTL in seconds for stale ME writers after endpoint map changes. During TTL, stale writers may be used only as fallback for new bindings. me_pool_min_fresh_ratio = 0.8 # Minimum desired-DC coverage ratio required before draining stale writers. Range: 0.0..=1.0. me_reinit_drain_timeout_secs = 120 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). @@ -57,6 +61,7 @@ me_config_apply_cooldown_secs = 300 # Cooldown in seconds between applied proxy_secret_rotate_runtime = true # Enable runtime proxy-secret rotation from getProxySecret. proxy_secret_stable_snapshots = 2 # Number of identical getProxySecret snapshots required before runtime secret rotation. proxy_secret_len_max = 256 # Maximum allowed proxy-secret length in bytes for startup and runtime refresh. +default_me_reinit_every_secs = 900 # Periodic ME pool reinitialization interval in seconds. [general.modes] classic = false From 6efcbe9bbf1da8ce5da903aac15c85663d2c5bbf Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 02:05:32 +0300 Subject: [PATCH 43/98] Update README.md --- README.md | 45 ++++++++++++--------------------------------- 1 file changed, 12 insertions(+), 33 deletions(-) diff --git a/README.md b/README.md index 8d0c41a..cba2a9d 100644 --- a/README.md +++ b/README.md @@ -10,28 +10,18 @@ ### 🇷🇺 RU -#### Драфтинг LTS и текущие улучшения +#### Релиз 3.0.15 — 25 февраля -С 21 февраля мы начали подготовку LTS-версии. +25 февраля мы выпустили версию **3.0.15**. -Мы внимательно анализируем весь доступный фидбек. -Наша цель — сделать LTS-кандидаты максимально стабильными, тщательно отлаженными и готовыми к long-run и highload production-сценариям. +Мы предполагаем, что она станет завершающей версией поколения 3.0 и уже сейчас мы рассматриваем её как **LTS-кандидата** для версии **3.1.0**! ---- +После нескольких дней детального анализа особенностей работы Middle-End мы спроектировали и реализовали продуманный режим **ротации ME Writer**. Данный режим позволяет поддерживать стабильно высокую производительность в long-run сценариях без возникновения ошибок, связанных с некорректной конфигурацией прокси. -#### Улучшения от 23 февраля - -23 февраля были внесены улучшения производительности в режимах **DC** и **Middle-End (ME)**, с акцентом на обратный канал (путь клиент → DC / ME). - -Дополнительно реализован ряд изменений, направленных на повышение устойчивости системы: - -- Смягчение сетевой нестабильности -- Повышение устойчивости к десинхронизации криптографии -- Снижение дрейфа сессий при неблагоприятных условиях -- Улучшение обработки ошибок в edge-case транспортных сценариях +Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX**. Релиз: -[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12) +[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15) --- @@ -48,28 +38,17 @@ ### 🇬🇧 EN -#### LTS Drafting and Ongoing Improvements +#### Release 3.0.15 — February 25 -Starting February 21, we began drafting the upcoming LTS version. +On February 25, we released version **3.0.15**. -We are carefully reviewing and analyzing all available feedback. -The goal is to ensure that LTS candidates are максимально stable, thoroughly debugged, and ready for long-run and high-load production scenarios. +We expect this to become the final release of the 3.0 generation and at this point, we already see it as a strong **LTS candidate** for the upcoming **3.1.0** release! ---- - -#### February 23 Improvements - -On February 23, we introduced performance improvements for both **DC** and **Middle-End (ME)** modes, specifically optimizing the reverse channel (client → DC / ME data path). - -Additionally, we implemented a set of robustness enhancements designed to: - -- Mitigate network-related instability -- Improve resilience against cryptographic desynchronization -- Reduce session drift under adverse conditions -- Improve error handling in edge-case transport scenarios +After several days of deep analysis of Middle-End behavior, we designed and implemented a well-engineered **ME Writer rotation mode**. This mode enables sustained high throughput in long-run scenarios while preventing proxy misconfiguration errors. +We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX**. Release: -[3.0.12](https://github.com/telemt/telemt/releases/tag/3.0.12) +[3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15) --- From 16f166cec8285df6552becadd20f24b6090b7b86 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 02:07:58 +0300 Subject: [PATCH 44/98] Update README.md --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index cba2a9d..192cd00 100644 --- a/README.md +++ b/README.md @@ -12,13 +12,13 @@ #### Релиз 3.0.15 — 25 февраля -25 февраля мы выпустили версию **3.0.15**. +25 февраля мы выпустили версию **3.0.15** Мы предполагаем, что она станет завершающей версией поколения 3.0 и уже сейчас мы рассматриваем её как **LTS-кандидата** для версии **3.1.0**! -После нескольких дней детального анализа особенностей работы Middle-End мы спроектировали и реализовали продуманный режим **ротации ME Writer**. Данный режим позволяет поддерживать стабильно высокую производительность в long-run сценариях без возникновения ошибок, связанных с некорректной конфигурацией прокси. +После нескольких дней детального анализа особенностей работы Middle-End мы спроектировали и реализовали продуманный режим **ротации ME Writer**. Данный режим позволяет поддерживать стабильно высокую производительность в long-run сценариях без возникновения ошибок, связанных с некорректной конфигурацией прокси -Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX**. +Будем рады вашему фидбеку и предложениям по улучшению — особенно в части **статистики** и **UX** Релиз: [3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15) @@ -40,13 +40,14 @@ #### Release 3.0.15 — February 25 -On February 25, we released version **3.0.15**. +On February 25, we released version **3.0.15** We expect this to become the final release of the 3.0 generation and at this point, we already see it as a strong **LTS candidate** for the upcoming **3.1.0** release! -After several days of deep analysis of Middle-End behavior, we designed and implemented a well-engineered **ME Writer rotation mode**. This mode enables sustained high throughput in long-run scenarios while preventing proxy misconfiguration errors. +After several days of deep analysis of Middle-End behavior, we designed and implemented a well-engineered **ME Writer rotation mode**. This mode enables sustained high throughput in long-run scenarios while preventing proxy misconfiguration errors + +We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX** -We are looking forward to your feedback and improvement proposals — especially regarding **statistics** and **UX**. Release: [3.0.15](https://github.com/telemt/telemt/releases/tag/3.0.15) From 618b7a183797978c555482f82bcb2e77be5be08a Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 02:10:14 +0300 Subject: [PATCH 45/98] ME Pool Beobachter --- src/config/defaults.rs | 12 ++++ src/config/load.rs | 18 +++++ src/config/types.rs | 20 ++++++ src/main.rs | 42 ++++++++++- src/metrics.rs | 84 ++++++++++++++++++---- src/proxy/client.rs | 154 ++++++++++++++++++++++++++++++++++++++--- src/proxy/masking.rs | 12 +++- src/stats/mod.rs | 2 + 8 files changed, 318 insertions(+), 26 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index d43ace9..80fcc07 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -121,6 +121,18 @@ pub(crate) fn default_desync_all_full() -> bool { false } +pub(crate) fn default_beobachten_minutes() -> u64 { + 10 +} + +pub(crate) fn default_beobachten_flush_secs() -> u64 { + 15 +} + +pub(crate) fn default_beobachten_file() -> String { + "cache/beobachten.txt".to_string() +} + pub(crate) fn default_tls_new_session_tickets() -> u8 { 0 } diff --git a/src/config/load.rs b/src/config/load.rs index 5698a71..aab553f 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -153,6 +153,24 @@ impl ProxyConfig { )); } + if config.general.beobachten_minutes == 0 { + return Err(ProxyError::Config( + "general.beobachten_minutes must be > 0".to_string(), + )); + } + + if config.general.beobachten_flush_secs == 0 { + return Err(ProxyError::Config( + "general.beobachten_flush_secs must be > 0".to_string(), + )); + } + + if config.general.beobachten_file.trim().is_empty() { + return Err(ProxyError::Config( + "general.beobachten_file cannot be empty".to_string(), + )); + } + if config.general.me_hardswap_warmup_delay_max_ms == 0 { return Err(ProxyError::Config( "general.me_hardswap_warmup_delay_max_ms must be > 0".to_string(), diff --git a/src/config/types.rs b/src/config/types.rs index 0cda9f4..cfa8d31 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -206,6 +206,22 @@ pub struct GeneralConfig { #[serde(default = "default_desync_all_full")] pub desync_all_full: bool, + /// Enable per-IP forensic observation buckets for scanners and handshake failures. + #[serde(default)] + pub beobachten: bool, + + /// Observation retention window in minutes for per-IP forensic buckets. + #[serde(default = "default_beobachten_minutes")] + pub beobachten_minutes: u64, + + /// Snapshot flush interval in seconds for beob output file. + #[serde(default = "default_beobachten_flush_secs")] + pub beobachten_flush_secs: u64, + + /// Snapshot file path for beob output. + #[serde(default = "default_beobachten_file")] + pub beobachten_file: String, + /// Enable C-like hard-swap for ME pool generations. /// When true, Telemt prewarms a new generation and switches once full coverage is reached. #[serde(default = "default_hardswap")] @@ -383,6 +399,10 @@ impl Default for GeneralConfig { crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), desync_all_full: default_desync_all_full(), + beobachten: false, + beobachten_minutes: default_beobachten_minutes(), + beobachten_flush_secs: default_beobachten_flush_secs(), + beobachten_file: default_beobachten_file(), hardswap: default_hardswap(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), update_every: Some(default_update_every_secs()), diff --git a/src/main.rs b/src/main.rs index 3bcbf3e..ab524a4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,6 +35,7 @@ use crate::crypto::SecureRandom; use crate::ip_tracker::UserIpTracker; use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe}; use crate::proxy::ClientHandler; +use crate::stats::beobachten::BeobachtenStore; use crate::stats::{ReplayChecker, Stats}; use crate::stream::BufferPool; use crate::transport::middle_proxy::{ @@ -159,6 +160,15 @@ fn print_proxy_links(host: &str, port: u16, config: &ProxyConfig) { info!(target: "telemt::links", "------------------------"); } +async fn write_beobachten_snapshot(path: &str, payload: &str) -> std::io::Result<()> { + if let Some(parent) = std::path::Path::new(path).parent() + && !parent.as_os_str().is_empty() + { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(path, payload).await +} + #[tokio::main] async fn main() -> std::result::Result<(), Box> { let (config_path, cli_silent, cli_log_level) = parse_cli(); @@ -256,6 +266,7 @@ async fn main() -> std::result::Result<(), Box> { let prefer_ipv6 = decision.prefer_ipv6(); let mut use_middle_proxy = config.general.use_middle_proxy && (decision.ipv4_me || decision.ipv6_me); let stats = Arc::new(Stats::new()); + let beobachten = Arc::new(BeobachtenStore::new()); let rng = Arc::new(SecureRandom::new()); // IP Tracker initialization @@ -692,6 +703,26 @@ async fn main() -> std::result::Result<(), Box> { detected_ip_v6, ); + let beobachten_writer = beobachten.clone(); + let config_rx_beobachten = config_rx.clone(); + tokio::spawn(async move { + loop { + let cfg = config_rx_beobachten.borrow().clone(); + let sleep_secs = cfg.general.beobachten_flush_secs.max(1); + + if cfg.general.beobachten { + let ttl = Duration::from_secs(cfg.general.beobachten_minutes.saturating_mul(60)); + let path = cfg.general.beobachten_file.clone(); + let snapshot = beobachten_writer.snapshot_text(ttl); + if let Err(e) = write_beobachten_snapshot(&path, &snapshot).await { + warn!(error = %e, path = %path, "Failed to flush beobachten snapshot"); + } + } + + tokio::time::sleep(Duration::from_secs(sleep_secs)).await; + } + }); + if let Some(ref pool) = me_pool { let pool_clone = pool.clone(); let rng_clone = rng.clone(); @@ -860,6 +891,7 @@ async fn main() -> std::result::Result<(), Box> { let me_pool = me_pool.clone(); let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); + let beobachten = beobachten.clone(); let max_connections_unix = max_connections.clone(); tokio::spawn(async move { @@ -887,6 +919,7 @@ async fn main() -> std::result::Result<(), Box> { let me_pool = me_pool.clone(); let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); + let beobachten = beobachten.clone(); let proxy_protocol_enabled = config.server.proxy_protocol; tokio::spawn(async move { @@ -894,7 +927,7 @@ async fn main() -> std::result::Result<(), Box> { if let Err(e) = crate::proxy::client::handle_client_stream( stream, fake_peer, config, stats, upstream_manager, replay_checker, buffer_pool, rng, - me_pool, tls_cache, ip_tracker, proxy_protocol_enabled, + me_pool, tls_cache, ip_tracker, beobachten, proxy_protocol_enabled, ).await { debug!(error = %e, "Unix socket connection error"); } @@ -942,9 +975,11 @@ async fn main() -> std::result::Result<(), Box> { if let Some(port) = config.server.metrics_port { let stats = stats.clone(); + let beobachten = beobachten.clone(); + let config_rx_metrics = config_rx.clone(); let whitelist = config.server.metrics_whitelist.clone(); tokio::spawn(async move { - metrics::serve(port, stats, whitelist).await; + metrics::serve(port, stats, beobachten, config_rx_metrics, whitelist).await; }); } @@ -958,6 +993,7 @@ async fn main() -> std::result::Result<(), Box> { let me_pool = me_pool.clone(); let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); + let beobachten = beobachten.clone(); let max_connections_tcp = max_connections.clone(); tokio::spawn(async move { @@ -980,6 +1016,7 @@ async fn main() -> std::result::Result<(), Box> { let me_pool = me_pool.clone(); let tls_cache = tls_cache.clone(); let ip_tracker = ip_tracker.clone(); + let beobachten = beobachten.clone(); let proxy_protocol_enabled = listener_proxy_protocol; tokio::spawn(async move { @@ -996,6 +1033,7 @@ async fn main() -> std::result::Result<(), Box> { me_pool, tls_cache, ip_tracker, + beobachten, proxy_protocol_enabled, ) .run() diff --git a/src/metrics.rs b/src/metrics.rs index 0051858..08abb2d 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -1,6 +1,7 @@ use std::convert::Infallible; use std::net::SocketAddr; use std::sync::Arc; +use std::time::Duration; use http_body_util::Full; use hyper::body::Bytes; @@ -11,9 +12,17 @@ use ipnetwork::IpNetwork; use tokio::net::TcpListener; use tracing::{info, warn, debug}; +use crate::config::ProxyConfig; +use crate::stats::beobachten::BeobachtenStore; use crate::stats::Stats; -pub async fn serve(port: u16, stats: Arc, whitelist: Vec) { +pub async fn serve( + port: u16, + stats: Arc, + beobachten: Arc, + config_rx: tokio::sync::watch::Receiver>, + whitelist: Vec, +) { let addr = SocketAddr::from(([0, 0, 0, 0], port)); let listener = match TcpListener::bind(addr).await { Ok(l) => l, @@ -22,7 +31,7 @@ pub async fn serve(port: u16, stats: Arc, whitelist: Vec) { return; } }; - info!("Metrics endpoint: http://{}/metrics", addr); + info!("Metrics endpoint: http://{}/metrics and /beobachten", addr); loop { let (stream, peer) = match listener.accept().await { @@ -39,10 +48,14 @@ pub async fn serve(port: u16, stats: Arc, whitelist: Vec) { } let stats = stats.clone(); + let beobachten = beobachten.clone(); + let config_rx_conn = config_rx.clone(); tokio::spawn(async move { let svc = service_fn(move |req| { let stats = stats.clone(); - async move { handle(req, &stats) } + let beobachten = beobachten.clone(); + let config = config_rx_conn.borrow().clone(); + async move { handle(req, &stats, &beobachten, &config) } }); if let Err(e) = http1::Builder::new() .serve_connection(hyper_util::rt::TokioIo::new(stream), svc) @@ -54,24 +67,48 @@ pub async fn serve(port: u16, stats: Arc, whitelist: Vec) { } } -fn handle(req: Request, stats: &Stats) -> Result>, Infallible> { - if req.uri().path() != "/metrics" { +fn handle( + req: Request, + stats: &Stats, + beobachten: &BeobachtenStore, + config: &ProxyConfig, +) -> Result>, Infallible> { + if req.uri().path() == "/metrics" { + let body = render_metrics(stats); let resp = Response::builder() - .status(StatusCode::NOT_FOUND) - .body(Full::new(Bytes::from("Not Found\n"))) + .status(StatusCode::OK) + .header("content-type", "text/plain; version=0.0.4; charset=utf-8") + .body(Full::new(Bytes::from(body))) + .unwrap(); + return Ok(resp); + } + + if req.uri().path() == "/beobachten" { + let body = render_beobachten(beobachten, config); + let resp = Response::builder() + .status(StatusCode::OK) + .header("content-type", "text/plain; charset=utf-8") + .body(Full::new(Bytes::from(body))) .unwrap(); return Ok(resp); } - let body = render_metrics(stats); let resp = Response::builder() - .status(StatusCode::OK) - .header("content-type", "text/plain; version=0.0.4; charset=utf-8") - .body(Full::new(Bytes::from(body))) + .status(StatusCode::NOT_FOUND) + .body(Full::new(Bytes::from("Not Found\n"))) .unwrap(); Ok(resp) } +fn render_beobachten(beobachten: &BeobachtenStore, config: &ProxyConfig) -> String { + if !config.general.beobachten { + return "beobachten disabled\n".to_string(); + } + + let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60)); + beobachten.snapshot_text(ttl) +} + fn render_metrics(stats: &Stats) -> String { use std::fmt::Write; let mut out = String::with_capacity(4096); @@ -318,6 +355,7 @@ fn render_metrics(stats: &Stats) -> String { #[cfg(test)] mod tests { use super::*; + use std::net::IpAddr; use http_body_util::BodyExt; #[test] @@ -375,6 +413,8 @@ mod tests { #[tokio::test] async fn test_endpoint_integration() { let stats = Arc::new(Stats::new()); + let beobachten = Arc::new(BeobachtenStore::new()); + let mut config = ProxyConfig::default(); stats.increment_connects_all(); stats.increment_connects_all(); stats.increment_connects_all(); @@ -383,16 +423,34 @@ mod tests { .uri("/metrics") .body(()) .unwrap(); - let resp = handle(req, &stats).unwrap(); + let resp = handle(req, &stats, &beobachten, &config).unwrap(); assert_eq!(resp.status(), StatusCode::OK); let body = resp.into_body().collect().await.unwrap().to_bytes(); assert!(std::str::from_utf8(body.as_ref()).unwrap().contains("telemt_connections_total 3")); + config.general.beobachten = true; + config.general.beobachten_minutes = 10; + beobachten.record( + "TLS-scanner", + "203.0.113.10".parse::().unwrap(), + Duration::from_secs(600), + ); + let req_beob = Request::builder() + .uri("/beobachten") + .body(()) + .unwrap(); + let resp_beob = handle(req_beob, &stats, &beobachten, &config).unwrap(); + assert_eq!(resp_beob.status(), StatusCode::OK); + let body_beob = resp_beob.into_body().collect().await.unwrap().to_bytes(); + let beob_text = std::str::from_utf8(body_beob.as_ref()).unwrap(); + assert!(beob_text.contains("[TLS-scanner]")); + assert!(beob_text.contains("203.0.113.10-1")); + let req404 = Request::builder() .uri("/other") .body(()) .unwrap(); - let resp404 = handle(req404, &stats).unwrap(); + let resp404 = handle(req404, &stats, &beobachten, &config).unwrap(); assert_eq!(resp404.status(), StatusCode::NOT_FOUND); } } diff --git a/src/proxy/client.rs b/src/proxy/client.rs index 483f6e0..c598023 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -1,7 +1,7 @@ //! Client Handler use std::future::Future; -use std::net::SocketAddr; +use std::net::{IpAddr, SocketAddr}; use std::pin::Pin; use std::sync::Arc; use std::time::Duration; @@ -27,6 +27,7 @@ use crate::error::{HandshakeResult, ProxyError, Result}; use crate::ip_tracker::UserIpTracker; use crate::protocol::constants::*; use crate::protocol::tls; +use crate::stats::beobachten::BeobachtenStore; use crate::stats::{ReplayChecker, Stats}; use crate::stream::{BufferPool, CryptoReader, CryptoWriter}; use crate::transport::middle_proxy::MePool; @@ -39,6 +40,36 @@ use crate::proxy::handshake::{HandshakeSuccess, handle_mtproto_handshake, handle use crate::proxy::masking::handle_bad_client; use crate::proxy::middle_relay::handle_via_middle_proxy; +fn beobachten_ttl(config: &ProxyConfig) -> Duration { + Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60)) +} + +fn record_beobachten_class( + beobachten: &BeobachtenStore, + config: &ProxyConfig, + peer_ip: IpAddr, + class: &str, +) { + if !config.general.beobachten { + return; + } + beobachten.record(class, peer_ip, beobachten_ttl(config)); +} + +fn record_handshake_failure_class( + beobachten: &BeobachtenStore, + config: &ProxyConfig, + peer_ip: IpAddr, + error: &ProxyError, +) { + let class = if error.to_string().contains("expected 64 bytes, got 0") { + "expected_64_got_0" + } else { + "other" + }; + record_beobachten_class(beobachten, config, peer_ip, class); +} + pub async fn handle_client_stream( mut stream: S, peer: SocketAddr, @@ -51,6 +82,7 @@ pub async fn handle_client_stream( me_pool: Option>, tls_cache: Option>, ip_tracker: Arc, + beobachten: Arc, proxy_protocol_enabled: bool, ) -> Result<()> where @@ -73,6 +105,7 @@ where Err(e) => { stats.increment_connects_bad(); warn!(peer = %peer, error = %e, "Invalid PROXY protocol header"); + record_beobachten_class(&beobachten, &config, peer.ip(), "other"); return Err(e); } } @@ -82,6 +115,9 @@ where let handshake_timeout = Duration::from_secs(config.timeouts.client_handshake); let stats_for_timeout = stats.clone(); + let config_for_timeout = config.clone(); + let beobachten_for_timeout = beobachten.clone(); + let peer_for_timeout = real_peer.ip(); // For non-TCP streams, use a synthetic local address let local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port) @@ -103,7 +139,15 @@ where debug!(peer = %real_peer, tls_len = tls_len, "TLS handshake too short"); stats.increment_connects_bad(); let (reader, writer) = tokio::io::split(stream); - handle_bad_client(reader, writer, &first_bytes, &config).await; + handle_bad_client( + reader, + writer, + &first_bytes, + real_peer.ip(), + &config, + &beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } @@ -120,7 +164,15 @@ where HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { stats.increment_connects_bad(); - handle_bad_client(reader, writer, &handshake, &config).await; + handle_bad_client( + reader, + writer, + &handshake, + real_peer.ip(), + &config, + &beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } HandshakeResult::Error(e) => return Err(e), @@ -156,7 +208,15 @@ where debug!(peer = %real_peer, "Non-TLS modes disabled"); stats.increment_connects_bad(); let (reader, writer) = tokio::io::split(stream); - handle_bad_client(reader, writer, &first_bytes, &config).await; + handle_bad_client( + reader, + writer, + &first_bytes, + real_peer.ip(), + &config, + &beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } @@ -173,7 +233,15 @@ where HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { stats.increment_connects_bad(); - handle_bad_client(reader, writer, &handshake, &config).await; + handle_bad_client( + reader, + writer, + &handshake, + real_peer.ip(), + &config, + &beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } HandshakeResult::Error(e) => return Err(e), @@ -200,11 +268,23 @@ where Ok(Ok(outcome)) => outcome, Ok(Err(e)) => { debug!(peer = %peer, error = %e, "Handshake failed"); + record_handshake_failure_class( + &beobachten_for_timeout, + &config_for_timeout, + peer_for_timeout, + &e, + ); return Err(e); } Err(_) => { stats_for_timeout.increment_handshake_timeouts(); debug!(peer = %peer, "Handshake timeout"); + record_beobachten_class( + &beobachten_for_timeout, + &config_for_timeout, + peer_for_timeout, + "other", + ); return Err(ProxyError::TgHandshakeTimeout); } }; @@ -230,6 +310,7 @@ pub struct RunningClientHandler { me_pool: Option>, tls_cache: Option>, ip_tracker: Arc, + beobachten: Arc, proxy_protocol_enabled: bool, } @@ -246,6 +327,7 @@ impl ClientHandler { me_pool: Option>, tls_cache: Option>, ip_tracker: Arc, + beobachten: Arc, proxy_protocol_enabled: bool, ) -> RunningClientHandler { RunningClientHandler { @@ -260,6 +342,7 @@ impl ClientHandler { me_pool, tls_cache, ip_tracker, + beobachten, proxy_protocol_enabled, } } @@ -284,17 +367,32 @@ impl RunningClientHandler { let handshake_timeout = Duration::from_secs(self.config.timeouts.client_handshake); let stats = self.stats.clone(); + let config_for_timeout = self.config.clone(); + let beobachten_for_timeout = self.beobachten.clone(); + let peer_for_timeout = peer.ip(); // Phase 1: handshake (with timeout) let outcome = match timeout(handshake_timeout, self.do_handshake()).await { Ok(Ok(outcome)) => outcome, Ok(Err(e)) => { debug!(peer = %peer, error = %e, "Handshake failed"); + record_handshake_failure_class( + &beobachten_for_timeout, + &config_for_timeout, + peer_for_timeout, + &e, + ); return Err(e); } Err(_) => { stats.increment_handshake_timeouts(); debug!(peer = %peer, "Handshake timeout"); + record_beobachten_class( + &beobachten_for_timeout, + &config_for_timeout, + peer_for_timeout, + "other", + ); return Err(ProxyError::TgHandshakeTimeout); } }; @@ -321,6 +419,12 @@ impl RunningClientHandler { Err(e) => { self.stats.increment_connects_bad(); warn!(peer = %self.peer, error = %e, "Invalid PROXY protocol header"); + record_beobachten_class( + &self.beobachten, + &self.config, + self.peer.ip(), + "other", + ); return Err(e); } } @@ -354,7 +458,15 @@ impl RunningClientHandler { debug!(peer = %peer, tls_len = tls_len, "TLS handshake too short"); self.stats.increment_connects_bad(); let (reader, writer) = self.stream.into_split(); - handle_bad_client(reader, writer, &first_bytes, &self.config).await; + handle_bad_client( + reader, + writer, + &first_bytes, + peer.ip(), + &self.config, + &self.beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } @@ -385,7 +497,15 @@ impl RunningClientHandler { HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { stats.increment_connects_bad(); - handle_bad_client(reader, writer, &handshake, &config).await; + handle_bad_client( + reader, + writer, + &handshake, + peer.ip(), + &config, + &self.beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } HandshakeResult::Error(e) => return Err(e), @@ -446,7 +566,15 @@ impl RunningClientHandler { debug!(peer = %peer, "Non-TLS modes disabled"); self.stats.increment_connects_bad(); let (reader, writer) = self.stream.into_split(); - handle_bad_client(reader, writer, &first_bytes, &self.config).await; + handle_bad_client( + reader, + writer, + &first_bytes, + peer.ip(), + &self.config, + &self.beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } @@ -476,7 +604,15 @@ impl RunningClientHandler { HandshakeResult::Success(result) => result, HandshakeResult::BadClient { reader, writer } => { stats.increment_connects_bad(); - handle_bad_client(reader, writer, &handshake, &config).await; + handle_bad_client( + reader, + writer, + &handshake, + peer.ip(), + &config, + &self.beobachten, + ) + .await; return Ok(HandshakeOutcome::Handled); } HandshakeResult::Error(e) => return Err(e), diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index 72175fe..cdb6cf9 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -1,6 +1,7 @@ //! Masking - forward unrecognized traffic to mask host use std::str; +use std::net::IpAddr; use std::time::Duration; use tokio::net::TcpStream; #[cfg(unix)] @@ -9,6 +10,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt}; use tokio::time::timeout; use tracing::debug; use crate::config::ProxyConfig; +use crate::stats::beobachten::BeobachtenStore; const MASK_TIMEOUT: Duration = Duration::from_secs(5); /// Maximum duration for the entire masking relay. @@ -50,20 +52,26 @@ pub async fn handle_bad_client( reader: R, writer: W, initial_data: &[u8], + peer_ip: IpAddr, config: &ProxyConfig, + beobachten: &BeobachtenStore, ) where R: AsyncRead + Unpin + Send + 'static, W: AsyncWrite + Unpin + Send + 'static, { + let client_type = detect_client_type(initial_data); + if config.general.beobachten { + let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60)); + beobachten.record(client_type, peer_ip, ttl); + } + if !config.censorship.mask { // Masking disabled, just consume data consume_client_data(reader).await; return; } - let client_type = detect_client_type(initial_data); - // Connect via Unix socket or TCP #[cfg(unix)] if let Some(ref sock_path) = config.censorship.mask_unix_sock { diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 5f4c98e..1e32bb7 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -2,6 +2,8 @@ #![allow(dead_code)] +pub mod beobachten; + use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Instant, Duration}; use dashmap::DashMap; From 6b8619d3c91e309897032243d55da9e5120b0fef Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 02:17:48 +0300 Subject: [PATCH 46/98] Create beobachten.rs --- src/stats/beobachten.rs | 117 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 117 insertions(+) create mode 100644 src/stats/beobachten.rs diff --git a/src/stats/beobachten.rs b/src/stats/beobachten.rs new file mode 100644 index 0000000..2e87fcc --- /dev/null +++ b/src/stats/beobachten.rs @@ -0,0 +1,117 @@ +//! Per-IP forensic buckets for scanner and handshake failure observation. + +use std::collections::{BTreeMap, HashMap}; +use std::net::IpAddr; +use std::time::{Duration, Instant}; + +use parking_lot::Mutex; + +const CLEANUP_INTERVAL: Duration = Duration::from_secs(30); + +#[derive(Default)] +struct BeobachtenInner { + entries: HashMap<(String, IpAddr), BeobachtenEntry>, + last_cleanup: Option, +} + +#[derive(Clone, Copy)] +struct BeobachtenEntry { + tries: u64, + last_seen: Instant, +} + +/// In-memory, TTL-scoped per-IP counters keyed by source class. +pub struct BeobachtenStore { + inner: Mutex, +} + +impl Default for BeobachtenStore { + fn default() -> Self { + Self::new() + } +} + +impl BeobachtenStore { + pub fn new() -> Self { + Self { + inner: Mutex::new(BeobachtenInner::default()), + } + } + + pub fn record(&self, class: &str, ip: IpAddr, ttl: Duration) { + if class.is_empty() || ttl.is_zero() { + return; + } + + let now = Instant::now(); + let mut guard = self.inner.lock(); + Self::cleanup_if_needed(&mut guard, now, ttl); + + let key = (class.to_string(), ip); + let entry = guard.entries.entry(key).or_insert(BeobachtenEntry { + tries: 0, + last_seen: now, + }); + entry.tries = entry.tries.saturating_add(1); + entry.last_seen = now; + } + + pub fn snapshot_text(&self, ttl: Duration) -> String { + if ttl.is_zero() { + return "beobachten disabled\n".to_string(); + } + + let now = Instant::now(); + let mut guard = self.inner.lock(); + Self::cleanup(&mut guard, now, ttl); + guard.last_cleanup = Some(now); + + let mut grouped = BTreeMap::>::new(); + for ((class, ip), entry) in &guard.entries { + grouped + .entry(class.clone()) + .or_default() + .push((*ip, entry.tries)); + } + + if grouped.is_empty() { + return "empty\n".to_string(); + } + + let mut out = String::with_capacity(grouped.len() * 64); + for (class, entries) in &mut grouped { + out.push('['); + out.push_str(class); + out.push_str("]\n"); + + entries.sort_by(|(ip_a, tries_a), (ip_b, tries_b)| { + tries_b + .cmp(tries_a) + .then_with(|| ip_a.to_string().cmp(&ip_b.to_string())) + }); + + for (ip, tries) in entries { + out.push_str(&format!("{ip}-{tries}\n")); + } + } + + out + } + + fn cleanup_if_needed(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) { + let should_cleanup = match inner.last_cleanup { + Some(last) => now.saturating_duration_since(last) >= CLEANUP_INTERVAL, + None => true, + }; + if should_cleanup { + Self::cleanup(inner, now, ttl); + inner.last_cleanup = Some(now); + } + } + + fn cleanup(inner: &mut BeobachtenInner, now: Instant, ttl: Duration) { + inner.entries.retain(|_, entry| { + now.saturating_duration_since(entry.last_seen) <= ttl + }); + } +} From f83e23c521570f76c393fdaaec2dadcb778a34cd Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 03:08:34 +0300 Subject: [PATCH 47/98] Update defaults.rs --- src/config/defaults.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 80fcc07..2ce7bac 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -199,11 +199,11 @@ pub(crate) fn default_me_reinit_every_secs() -> u64 { } pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 { - 1000 + 2000 } pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 { - 2000 + 3500 } pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 { From 5a09d30e1cd866f0a4a0681aaa94ca170c7a2501 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 03:09:02 +0300 Subject: [PATCH 48/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index b6ef28d..994e11f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.0.15" +version = "3.1.0" edition = "2024" [dependencies] From 206f87fe648dd123436f42bbdb5969c2fc680e28 Mon Sep 17 00:00:00 2001 From: D Date: Wed, 25 Feb 2026 09:22:26 +0300 Subject: [PATCH 49/98] fix: remove bracket in info --- src/main.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/main.rs b/src/main.rs index ab524a4..fbb50aa 100644 --- a/src/main.rs +++ b/src/main.rs @@ -203,14 +203,14 @@ async fn main() -> std::result::Result<(), Box> { }; let (filter_layer, filter_handle) = reload::Layer::new(EnvFilter::new("info")); - + // Configure color output based on config let fmt_layer = if config.general.disable_colors { fmt::Layer::default().with_ansi(false) } else { fmt::Layer::default().with_ansi(true) }; - + tracing_subscriber::registry() .with(filter_layer) .with(fmt_layer) @@ -272,7 +272,7 @@ async fn main() -> std::result::Result<(), Box> { // IP Tracker initialization let ip_tracker = Arc::new(UserIpTracker::new()); ip_tracker.load_limits(&config.access.user_max_unique_ips).await; - + if !config.access.user_max_unique_ips.is_empty() { info!("IP limits configured for {} users", config.access.user_max_unique_ips.len()); } @@ -598,7 +598,7 @@ async fn main() -> std::result::Result<(), Box> { .v4_results .iter() .any(|r| r.rtt_ms.is_some()); - + if upstream_result.both_available { if prefer_ipv6 { info!(" IPv6 in use / IPv4 is fallback"); @@ -606,9 +606,9 @@ async fn main() -> std::result::Result<(), Box> { info!(" IPv4 in use / IPv6 is fallback"); } } else if v6_works && !v4_works { - info!(" IPv6 only / IPv4 unavailable)"); + info!(" IPv6 only / IPv4 unavailable"); } else if v4_works && !v6_works { - info!(" IPv4 only / IPv6 unavailable)"); + info!(" IPv4 only / IPv6 unavailable"); } else if !v6_works && !v4_works { info!(" No DC connectivity"); } From 5558900c44235b026c723b68bf533c52512720e0 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 13:29:46 +0300 Subject: [PATCH 50/98] Update main.rs --- src/main.rs | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/main.rs b/src/main.rs index fbb50aa..c2b8c34 100644 --- a/src/main.rs +++ b/src/main.rs @@ -272,7 +272,7 @@ async fn main() -> std::result::Result<(), Box> { // IP Tracker initialization let ip_tracker = Arc::new(UserIpTracker::new()); ip_tracker.load_limits(&config.access.user_max_unique_ips).await; - + if !config.access.user_max_unique_ips.is_empty() { info!("IP limits configured for {} users", config.access.user_max_unique_ips.len()); } @@ -598,7 +598,7 @@ async fn main() -> std::result::Result<(), Box> { .v4_results .iter() .any(|r| r.rtt_ms.is_some()); - + if upstream_result.both_available { if prefer_ipv6 { info!(" IPv6 in use / IPv4 is fallback"); @@ -677,14 +677,8 @@ async fn main() -> std::result::Result<(), Box> { rc_clone.run_periodic_cleanup().await; }); - let detected_ip_v4: Option = probe - .reflected_ipv4 - .map(|s| s.ip()) - .or_else(|| probe.detected_ipv4.map(std::net::IpAddr::V4)); - let detected_ip_v6: Option = probe - .reflected_ipv6 - .map(|s| s.ip()) - .or_else(|| probe.detected_ipv6.map(std::net::IpAddr::V6)); + let detected_ip_v4: Option = probe.detected_ipv4.map(std::net::IpAddr::V4); + let detected_ip_v6: Option = probe.detected_ipv6.map(std::net::IpAddr::V6); debug!( "Detected IPs: v4={:?} v6={:?}", detected_ip_v4, detected_ip_v6 From 1b1bdfe99a33eabf4ccdfafae611d9d10683a438 Mon Sep 17 00:00:00 2001 From: Vladislav Yaroslavlev Date: Wed, 25 Feb 2026 14:00:50 +0300 Subject: [PATCH 51/98] Add proxy-secret to .gitignore The proxy-secret file contains sensitive authentication data that should never be committed to version control. --- .gitignore | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 6b5f1d5..3a45e41 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,5 @@ target # and can be added to the global gitignore or merged into this file. For a more nuclear # option (not recommended) you can uncomment the following to ignore the entire idea folder. #.idea/ -*.rs -target -Cargo.lock -src + +proxy-secret From f40b645c0530b3b870c22c556546f20b933ca627 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 17:28:06 +0300 Subject: [PATCH 52/98] Defaults in-place --- src/config/defaults.rs | 50 +++++++++++++++++++++++++++++++++++++++--- src/config/load.rs | 49 +++++++++++++++++++++++++++++++++++++++++ src/config/types.rs | 43 ++++++++++++++++-------------------- 3 files changed, 115 insertions(+), 27 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 2ce7bac..51abe65 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -3,6 +3,15 @@ use ipnetwork::IpNetwork; use serde::Deserialize; // Helper defaults kept private to the config module. +const DEFAULT_NETWORK_IPV6: Option = Some(false); +const DEFAULT_STUN_TCP_FALLBACK: bool = true; +const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16; +const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8; +const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 12; +const DEFAULT_LISTEN_ADDR_IPV6: &str = "::"; +const DEFAULT_ACCESS_USER: &str = "default"; +const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000"; + pub(crate) fn default_true() -> bool { true } @@ -77,6 +86,14 @@ pub(crate) fn default_prefer_4() -> u8 { 4 } +pub(crate) fn default_network_ipv6() -> Option { + DEFAULT_NETWORK_IPV6 +} + +pub(crate) fn default_stun_tcp_fallback() -> bool { + DEFAULT_STUN_TCP_FALLBACK +} + pub(crate) fn default_unknown_dc_log_path() -> Option { Some("unknown-dc.txt".to_string()) } @@ -85,6 +102,10 @@ pub(crate) fn default_pool_size() -> usize { 8 } +pub(crate) fn default_middle_proxy_warm_standby() -> usize { + DEFAULT_MIDDLE_PROXY_WARM_STANDBY +} + pub(crate) fn default_keepalive_interval() -> u64 { 25 } @@ -109,6 +130,14 @@ pub(crate) fn default_reconnect_backoff_cap_ms() -> u64 { 30_000 } +pub(crate) fn default_me_reconnect_max_concurrent_per_dc() -> u32 { + DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC +} + +pub(crate) fn default_me_reconnect_fast_retry_count() -> u32 { + DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT +} + pub(crate) fn default_crypto_pending_buffer() -> usize { 256 * 1024 } @@ -191,7 +220,11 @@ pub(crate) fn default_proxy_config_reload_secs() -> u64 { } pub(crate) fn default_update_every_secs() -> u64 { - 30 * 60 + 5 * 60 +} + +pub(crate) fn default_update_every() -> Option { + Some(default_update_every_secs()) } pub(crate) fn default_me_reinit_every_secs() -> u64 { @@ -199,11 +232,11 @@ pub(crate) fn default_me_reinit_every_secs() -> u64 { } pub(crate) fn default_me_hardswap_warmup_delay_min_ms() -> u64 { - 2000 + 1000 } pub(crate) fn default_me_hardswap_warmup_delay_max_ms() -> u64 { - 3500 + 2000 } pub(crate) fn default_me_hardswap_warmup_extra_passes() -> u8 { @@ -266,6 +299,17 @@ pub(crate) fn default_degradation_min_unavailable_dc_groups() -> u8 { 2 } +pub(crate) fn default_listen_addr_ipv6() -> String { + DEFAULT_LISTEN_ADDR_IPV6.to_string() +} + +pub(crate) fn default_access_users() -> HashMap { + HashMap::from([( + DEFAULT_ACCESS_USER.to_string(), + DEFAULT_ACCESS_SECRET.to_string(), + )]) +} + // Custom deserializer helpers #[derive(Deserialize)] diff --git a/src/config/load.rs b/src/config/load.rs index aab553f..be6759e 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -427,6 +427,55 @@ impl ProxyConfig { mod tests { use super::*; + #[test] + fn serde_defaults_remain_unchanged_for_present_sections() { + let toml = r#" + [network] + [general] + [server] + [access] + "#; + let cfg: ProxyConfig = toml::from_str(toml).unwrap(); + + assert_eq!(cfg.network.ipv6, None); + assert!(!cfg.network.stun_tcp_fallback); + assert_eq!(cfg.general.middle_proxy_warm_standby, 0); + assert_eq!(cfg.general.me_reconnect_max_concurrent_per_dc, 0); + assert_eq!(cfg.general.me_reconnect_fast_retry_count, 0); + assert_eq!(cfg.general.update_every, None); + assert_eq!(cfg.server.listen_addr_ipv4, None); + assert_eq!(cfg.server.listen_addr_ipv6, None); + assert!(cfg.access.users.is_empty()); + } + + #[test] + fn impl_defaults_are_sourced_from_default_helpers() { + let network = NetworkConfig::default(); + assert_eq!(network.ipv6, default_network_ipv6()); + assert_eq!(network.stun_tcp_fallback, default_stun_tcp_fallback()); + + let general = GeneralConfig::default(); + assert_eq!( + general.middle_proxy_warm_standby, + default_middle_proxy_warm_standby() + ); + assert_eq!( + general.me_reconnect_max_concurrent_per_dc, + default_me_reconnect_max_concurrent_per_dc() + ); + assert_eq!( + general.me_reconnect_fast_retry_count, + default_me_reconnect_fast_retry_count() + ); + assert_eq!(general.update_every, default_update_every()); + + let server = ServerConfig::default(); + assert_eq!(server.listen_addr_ipv6, Some(default_listen_addr_ipv6())); + + let access = AccessConfig::default(); + assert_eq!(access.users, default_access_users()); + } + #[test] fn dc_overrides_allow_string_and_array() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index cfa8d31..ad22c93 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -76,7 +76,7 @@ impl Default for ProxyModes { Self { classic: false, secure: false, - tls: true, + tls: default_true(), } } } @@ -117,12 +117,12 @@ pub struct NetworkConfig { impl Default for NetworkConfig { fn default() -> Self { Self { - ipv4: true, - ipv6: Some(false), - prefer: 4, + ipv4: default_true(), + ipv6: default_network_ipv6(), + prefer: default_prefer_4(), multipath: false, stun_servers: default_stun_servers(), - stun_tcp_fallback: true, + stun_tcp_fallback: default_stun_tcp_fallback(), http_ip_detect_urls: default_http_ip_detect_urls(), cache_public_ip_path: default_cache_public_ip_path(), } @@ -370,27 +370,27 @@ impl Default for GeneralConfig { Self { modes: ProxyModes::default(), prefer_ipv6: false, - fast_mode: true, + fast_mode: default_true(), use_middle_proxy: false, ad_tag: None, proxy_secret_path: None, middle_proxy_nat_ip: None, - middle_proxy_nat_probe: false, + middle_proxy_nat_probe: true, middle_proxy_nat_stun: None, middle_proxy_nat_stun_servers: Vec::new(), middle_proxy_pool_size: default_pool_size(), - middle_proxy_warm_standby: 16, - me_keepalive_enabled: true, + middle_proxy_warm_standby: default_middle_proxy_warm_standby(), + me_keepalive_enabled: default_true(), me_keepalive_interval_secs: default_keepalive_interval(), me_keepalive_jitter_secs: default_keepalive_jitter(), - me_keepalive_payload_random: true, - me_warmup_stagger_enabled: true, + me_keepalive_payload_random: default_true(), + me_warmup_stagger_enabled: default_true(), me_warmup_step_delay_ms: default_warmup_step_delay_ms(), me_warmup_step_jitter_ms: default_warmup_step_jitter_ms(), - me_reconnect_max_concurrent_per_dc: 8, + me_reconnect_max_concurrent_per_dc: default_me_reconnect_max_concurrent_per_dc(), me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), - me_reconnect_fast_retry_count: 8, + me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(), stun_iface_mismatch_ignore: false, unknown_dc_log_path: default_unknown_dc_log_path(), log_level: LogLevel::Normal, @@ -399,13 +399,13 @@ impl Default for GeneralConfig { crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), desync_all_full: default_desync_all_full(), - beobachten: false, + beobachten: true, beobachten_minutes: default_beobachten_minutes(), beobachten_flush_secs: default_beobachten_flush_secs(), beobachten_file: default_beobachten_file(), hardswap: default_hardswap(), fast_mode_min_tls_record: default_fast_mode_min_tls_record(), - update_every: Some(default_update_every_secs()), + update_every: default_update_every(), me_reinit_every_secs: default_me_reinit_every_secs(), me_hardswap_warmup_delay_min_ms: default_me_hardswap_warmup_delay_min_ms(), me_hardswap_warmup_delay_max_ms: default_me_hardswap_warmup_delay_max_ms(), @@ -423,7 +423,7 @@ impl Default for GeneralConfig { proxy_config_auto_reload_secs: default_proxy_config_reload_secs(), ntp_check: default_ntp_check(), ntp_servers: default_ntp_servers(), - auto_degradation_enabled: true, + auto_degradation_enabled: default_true(), degradation_min_unavailable_dc_groups: default_degradation_min_unavailable_dc_groups(), } } @@ -510,7 +510,7 @@ impl Default for ServerConfig { Self { port: default_port(), listen_addr_ipv4: Some(default_listen_addr()), - listen_addr_ipv6: Some("::".to_string()), + listen_addr_ipv6: Some(default_listen_addr_ipv6()), listen_unix_sock: None, listen_unix_sock_perm: None, listen_tcp: None, @@ -618,7 +618,7 @@ impl Default for AntiCensorshipConfig { Self { tls_domain: default_tls_domain(), tls_domains: Vec::new(), - mask: true, + mask: default_true(), mask_host: None, mask_port: default_mask_port(), mask_unix_sock: None, @@ -663,13 +663,8 @@ pub struct AccessConfig { impl Default for AccessConfig { fn default() -> Self { - let mut users = HashMap::new(); - users.insert( - "default".to_string(), - "00000000000000000000000000000000".to_string(), - ); Self { - users, + users: default_access_users(), user_max_tcp_conns: HashMap::new(), user_expirations: HashMap::new(), user_data_quota: HashMap::new(), From fed93464445eb6b32af08b393bc828198522206c Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 17:49:54 +0300 Subject: [PATCH 53/98] New config.toml + tls_emulation enabled by default --- config.toml | 266 +++++++++++++++++++++++++------------------- src/config/types.rs | 2 +- 2 files changed, 152 insertions(+), 116 deletions(-) diff --git a/config.toml b/config.toml index f3df049..e82d97c 100644 --- a/config.toml +++ b/config.toml @@ -1,67 +1,66 @@ -# === General Settings === +# Telemt full config with default values. +# Examples are kept in comments after '#'. + +# Top-level legacy field. +show_link = [] # example: "*" or ["alice", "bob"] +# default_dc = 2 # example: default DC for unmapped non-standard DCs + [general] fast_mode = true -use_middle_proxy = true -# ad_tag = "00000000000000000000000000000000" -# Path to proxy-secret binary (auto-downloaded if missing). -proxy_secret_path = "proxy-secret" -# disable_colors = false # Disable colored output in logs (useful for files/systemd) - -# === Log Level === -# Log level: debug | verbose | normal | silent -# Can be overridden with --silent or --log-level CLI flags -# RUST_LOG env var takes absolute priority over all of these -log_level = "normal" - -# === Middle Proxy - ME === -# Public IP override for ME KDF when behind NAT; leave unset to auto-detect. -# middle_proxy_nat_ip = "203.0.113.10" -# Enable STUN probing to discover public IP:port for ME. +use_middle_proxy = false +# ad_tag = "0123456789abcdef0123456789abcdef" # example +# proxy_secret_path = "proxy-secret" # example custom path +# middle_proxy_nat_ip = "203.0.113.10" # example public NAT IP override middle_proxy_nat_probe = true -# Primary STUN server (host:port); defaults to Telegram STUN when empty. -middle_proxy_nat_stun = "stun.l.google.com:19302" -# Optional fallback STUN servers list. -middle_proxy_nat_stun_servers = ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] -# Desired number of concurrent ME writers in pool. +# middle_proxy_nat_stun = "stun.l.google.com:19302" # example +middle_proxy_nat_stun_servers = [] # example: ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] middle_proxy_pool_size = 8 -# Pre-initialized warm-standby ME connections kept idle. -middle_proxy_warm_standby = 8 -# Ignore STUN/interface mismatch and keep ME enabled even if IP differs. -stun_iface_mismatch_ignore = false -# Keepalive padding frames - fl==4 +middle_proxy_warm_standby = 16 me_keepalive_enabled = true -me_keepalive_interval_secs = 25 # Period between keepalives -me_keepalive_jitter_secs = 5 # Jitter added to interval -me_keepalive_payload_random = true # Randomize 4-byte payload (vs zeros) -# Stagger extra ME connections on warmup to de-phase lifecycles. +me_keepalive_interval_secs = 25 +me_keepalive_jitter_secs = 5 +me_keepalive_payload_random = true +crypto_pending_buffer = 262144 +max_client_frame = 16777216 +desync_all_full = false +beobachten = true +beobachten_minutes = 10 +beobachten_flush_secs = 15 +beobachten_file = "cache/beobachten.txt" +hardswap = true me_warmup_stagger_enabled = true -me_warmup_step_delay_ms = 500 # Base delay between extra connects -me_warmup_step_jitter_ms = 300 # Jitter for warmup delay -# Reconnect policy knobs. -me_reconnect_max_concurrent_per_dc = 4 # Parallel reconnects per DC - EXPERIMENTAL! UNSTABLE! -me_reconnect_backoff_base_ms = 500 # Backoff start -me_reconnect_backoff_cap_ms = 30000 # Backoff cap -me_reconnect_fast_retry_count = 11 # Quick retries before backoff -update_every = 7200 # Resolve the active updater interval for ME infrastructure refresh tasks. -crypto_pending_buffer = 262144 # Max pending ciphertext buffer per client writer (bytes). Controls FakeTLS backpressure vs throughput. -max_client_frame = 16777216 # Maximum allowed client MTProto frame size (bytes). -desync_all_full = false # Emit full crypto-desync forensic logs for every event. When false, full forensic details are emitted once per key window. -auto_degradation_enabled = true # Enable auto-degradation from ME to Direct-DC. -degradation_min_unavailable_dc_groups = 2 # Minimum unavailable ME DC groups before degrading. -hardswap = true # Enable C-like hard-swap for ME pool generations. When true, Telemt prewarms a new generation and switches once full coverage is reached. -default_me_hardswap_warmup_delay_min_ms = 1000 # Minimum delay in ms between hardswap warmup connect attempts. -default_me_hardswap_warmup_delay_max_ms = 2000 # Maximum delay in ms between hardswap warmup connect attempts. -default_me_hardswap_warmup_extra_passes = 3 # Additional warmup passes in the same hardswap cycle after the base pass. -default_me_hardswap_warmup_pass_backoff_base_ms = 500 # Base backoff in ms between hardswap warmup passes when floor is still incomplete. -me_pool_drain_ttl_secs = 90 # Drain-TTL in seconds for stale ME writers after endpoint map changes. During TTL, stale writers may be used only as fallback for new bindings. -me_pool_min_fresh_ratio = 0.8 # Minimum desired-DC coverage ratio required before draining stale writers. Range: 0.0..=1.0. -me_reinit_drain_timeout_secs = 120 # Drain timeout in seconds for stale ME writers after endpoint map changes. Set to 0 to keep stale writers draining indefinitely (no force-close). -me_config_stable_snapshots = 2 # Number of identical getProxyConfig snapshots required before applying ME map updates. -me_config_apply_cooldown_secs = 300 # Cooldown in seconds between applied ME map updates. -proxy_secret_rotate_runtime = true # Enable runtime proxy-secret rotation from getProxySecret. -proxy_secret_stable_snapshots = 2 # Number of identical getProxySecret snapshots required before runtime secret rotation. -proxy_secret_len_max = 256 # Maximum allowed proxy-secret length in bytes for startup and runtime refresh. -default_me_reinit_every_secs = 900 # Periodic ME pool reinitialization interval in seconds. +me_warmup_step_delay_ms = 500 +me_warmup_step_jitter_ms = 300 +me_reconnect_max_concurrent_per_dc = 8 +me_reconnect_backoff_base_ms = 500 +me_reconnect_backoff_cap_ms = 30000 +me_reconnect_fast_retry_count = 12 +stun_iface_mismatch_ignore = false +unknown_dc_log_path = "unknown-dc.txt" # to disable: set to null +log_level = "normal" # debug | verbose | normal | silent +disable_colors = false +fast_mode_min_tls_record = 0 +update_every = 300 +me_reinit_every_secs = 900 +me_hardswap_warmup_delay_min_ms = 1000 +me_hardswap_warmup_delay_max_ms = 2000 +me_hardswap_warmup_extra_passes = 3 +me_hardswap_warmup_pass_backoff_base_ms = 500 +me_config_stable_snapshots = 2 +me_config_apply_cooldown_secs = 300 +proxy_secret_stable_snapshots = 2 +proxy_secret_rotate_runtime = true +proxy_secret_len_max = 256 +me_pool_drain_ttl_secs = 90 +me_pool_min_fresh_ratio = 0.8 +me_reinit_drain_timeout_secs = 120 +# Legacy compatibility fields used when update_every is omitted. +proxy_secret_auto_reload_secs = 3600 +proxy_config_auto_reload_secs = 3600 +ntp_check = true +ntp_servers = ["pool.ntp.org"] # example: ["pool.ntp.org", "time.cloudflare.com"] +auto_degradation_enabled = true +degradation_min_unavailable_dc_groups = 2 [general.modes] classic = false @@ -69,63 +68,82 @@ secure = false tls = true [general.links] -show = "*" -# show = ["alice", "bob"] # Only show links for alice and bob -# show = "*" # Show links for all users -# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links -# public_port = 443 # Port for tg:// links (default: server.port) +show = [] # example: "*" or ["alice", "bob"] +# public_host = "proxy.example.com" # example explicit host/IP for tg:// links +# public_port = 443 # example explicit port for tg:// links -# === Network Parameters === [network] -# Enable/disable families: true/false/auto(None) ipv4 = true -ipv6 = false # UNSTABLE WITH ME -# prefer = 4 or 6 -prefer = 4 -multipath = false # EXPERIMENTAL! +ipv6 = false # set true to enable IPv6 +prefer = 4 # 4 or 6 +multipath = false +stun_servers = [ + "stun.l.google.com:5349", + "stun1.l.google.com:3478", + "stun.gmx.net:3478", + "stun.l.google.com:19302", + "stun.1und1.de:3478", + "stun1.l.google.com:19302", + "stun2.l.google.com:19302", + "stun3.l.google.com:19302", + "stun4.l.google.com:19302", + "stun.services.mozilla.com:3478", + "stun.stunprotocol.org:3478", + "stun.nextcloud.com:3478", + "stun.voip.eutelia.it:3478", +] +stun_tcp_fallback = true +http_ip_detect_urls = ["https://ifconfig.me/ip", "https://api.ipify.org"] +cache_public_ip_path = "cache/public_ip.txt" -# === Server Binding === [server] port = 443 listen_addr_ipv4 = "0.0.0.0" listen_addr_ipv6 = "::" -# listen_unix_sock = "/var/run/telemt.sock" # Unix socket -# listen_unix_sock_perm = "0666" # Socket file permissions -# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol -# metrics_port = 9090 -# metrics_whitelist = ["127.0.0.1", "::1"] +# listen_unix_sock = "/var/run/telemt.sock" # example +# listen_unix_sock_perm = "0660" # example unix socket mode +# listen_tcp = true # example explicit override (auto-detected when omitted) +proxy_protocol = false +# metrics_port = 9090 # example +metrics_whitelist = ["127.0.0.1/32", "::1/128"] +# Example explicit listeners (default: omitted, auto-generated from listen_addr_*): +# [[server.listeners]] +# ip = "0.0.0.0" +# announce = "proxy-v4.example.com" +# # announce_ip = "203.0.113.10" # deprecated alias +# proxy_protocol = false +# reuse_allow = false +# +# [[server.listeners]] +# ip = "::" +# announce = "proxy-v6.example.com" +# proxy_protocol = false +# reuse_allow = false -# Listen on multiple interfaces/IPs - IPv4 -[[server.listeners]] -ip = "0.0.0.0" - -# Listen on multiple interfaces/IPs - IPv6 -[[server.listeners]] -ip = "::" - -# === Timeouts (in seconds) === [timeouts] -client_handshake = 30 +client_handshake = 15 tg_connect = 10 client_keepalive = 60 client_ack = 300 -# Quick ME reconnects for single-address DCs (count and per-attempt timeout, ms). -me_one_retry = 12 -me_one_timeout_ms = 1200 +me_one_retry = 3 +me_one_timeout_ms = 1500 -# === Anti-Censorship & Masking === [censorship] tls_domain = "petrovich.ru" # tls_domains = ["example.com", "cdn.example.net"] # Additional domains for EE links mask = true +# mask_host = "www.google.com" # example, defaults to tls_domain when both mask_host/mask_unix_sock are unset +# mask_unix_sock = "/var/run/nginx.sock" # example, mutually exclusive with mask_host mask_port = 443 -# mask_host = "petrovich.ru" # Defaults to tls_domain if not set -# mask_unix_sock = "/var/run/nginx.sock" # Unix socket (mutually exclusive with mask_host) -fake_cert_len = 2048 -# tls_emulation = false # Fetch real cert lengths and emulate TLS records -# tls_front_dir = "tlsfront" # Cache directory for TLS emulation +fake_cert_len = 2048 # if tls_emulation=false and default value is used, loader may randomize this value at runtime +tls_emulation = true +tls_front_dir = "tlsfront" +server_hello_delay_min_ms = 0 +server_hello_delay_max_ms = 0 +tls_new_session_tickets = 0 +tls_full_cert_ttl_secs = 90 +alpn_enforce = true -# === Access Control & Users === [access] replay_check_len = 65536 replay_window_secs = 1800 @@ -134,34 +152,52 @@ ignore_time_skew = false [access.users] # format: "username" = "32_hex_chars_secret" hello = "00000000000000000000000000000000" +default = "00000000000000000000000000000000" +# alice = "11111111111111111111111111111111" # example -# [access.user_max_tcp_conns] -# hello = 50 +[access.user_max_tcp_conns] +# alice = 100 # example -# [access.user_max_unique_ips] -# hello = 5 +[access.user_expirations] +# alice = "2027-01-01T00:00:00Z" # example -# [access.user_data_quota] -# hello = 1073741824 # 1 GB +[access.user_data_quota] +# alice = 10737418240 # example bytes -# [access.user_expirations] -# format: username = "[year]-[month]-[day]T[hour]:[minute]:[second]Z" UTC -# hello = "2027-01-01T00:00:00Z" - -# === Upstreams & Routing === -[[upstreams]] -type = "direct" -enabled = true -weight = 10 -# interface = "192.168.1.100" # Bind outgoing to specific IP or iface name -# bind_addresses = ["192.168.1.100"] # List for round-robin binding (family must match target) +[access.user_max_unique_ips] +# alice = 10 # example +# Default behavior if [[upstreams]] is omitted: loader injects one direct upstream. +# Example explicit upstreams: +# [[upstreams]] +# type = "direct" +# interface = "eth0" +# bind_addresses = ["192.0.2.10"] +# weight = 1 +# enabled = true +# scopes = "*" +# +# [[upstreams]] +# type = "socks4" +# address = "198.51.100.20:1080" +# interface = "eth0" +# user_id = "telemt" +# weight = 1 +# enabled = true +# scopes = "*" +# # [[upstreams]] # type = "socks5" -# address = "127.0.0.1:1080" -# enabled = false +# address = "198.51.100.30:1080" +# interface = "eth0" +# username = "proxy-user" +# password = "proxy-pass" # weight = 1 +# enabled = true +# scopes = "*" # === DC Address Overrides === # [dc_overrides] -# "203" = "91.105.192.100:443" +# "201" = "149.154.175.50:443" # example +# "202" = ["149.154.167.51:443", "149.154.175.100:443"] # example +# "203" = "91.105.192.100:443" # loader auto-adds this one when omitted diff --git a/src/config/types.rs b/src/config/types.rs index ad22c93..1302a97 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -623,7 +623,7 @@ impl Default for AntiCensorshipConfig { mask_port: default_mask_port(), mask_unix_sock: None, fake_cert_len: default_fake_cert_len(), - tls_emulation: false, + tls_emulation: true, tls_front_dir: default_tls_front_dir(), server_hello_delay_min_ms: default_server_hello_delay_min_ms(), server_hello_delay_max_ms: default_server_hello_delay_max_ms(), From 06292ff8335b67d606b2b1eb490af504b996dd77 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 21:33:06 +0300 Subject: [PATCH 54/98] Update config.toml --- config.toml | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/config.toml b/config.toml index e82d97c..44db620 100644 --- a/config.toml +++ b/config.toml @@ -8,12 +8,12 @@ show_link = [] # example: "*" or ["alice", "bob"] [general] fast_mode = true use_middle_proxy = false -# ad_tag = "0123456789abcdef0123456789abcdef" # example +# ad_tag = "00000000000000000000000000000000" # example # proxy_secret_path = "proxy-secret" # example custom path # middle_proxy_nat_ip = "203.0.113.10" # example public NAT IP override middle_proxy_nat_probe = true # middle_proxy_nat_stun = "stun.l.google.com:19302" # example -middle_proxy_nat_stun_servers = [] # example: ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] +# middle_proxy_nat_stun_servers = [] # example: ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] middle_proxy_pool_size = 8 middle_proxy_warm_standby = 16 me_keepalive_enabled = true @@ -68,7 +68,7 @@ secure = false tls = true [general.links] -show = [] # example: "*" or ["alice", "bob"] +show ="*" # example: "*" or ["alice", "bob"] # public_host = "proxy.example.com" # example explicit host/IP for tg:// links # public_port = 443 # example explicit port for tg:// links @@ -152,20 +152,21 @@ ignore_time_skew = false [access.users] # format: "username" = "32_hex_chars_secret" hello = "00000000000000000000000000000000" -default = "00000000000000000000000000000000" # alice = "11111111111111111111111111111111" # example [access.user_max_tcp_conns] # alice = 100 # example [access.user_expirations] -# alice = "2027-01-01T00:00:00Z" # example +# alice = "2078-01-01T00:00:00Z" # example [access.user_data_quota] +# hello = 10737418240 # example bytes # alice = 10737418240 # example bytes [access.user_max_unique_ips] -# alice = 10 # example +# hello = 10 # example +# alice = 100 # example # Default behavior if [[upstreams]] is omitted: loader injects one direct upstream. # Example explicit upstreams: From 79a3720fd577e2da69518a15acc0107b5587f457 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 22:22:04 +0300 Subject: [PATCH 55/98] Rename config.toml to config.full.toml --- config.toml => config.full.toml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename config.toml => config.full.toml (100%) diff --git a/config.toml b/config.full.toml similarity index 100% rename from config.toml rename to config.full.toml From a6bfa3309e2713d8ecc4d4dd10d58119094fe464 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 22:32:02 +0300 Subject: [PATCH 56/98] Create config.toml --- config.toml | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100644 config.toml diff --git a/config.toml b/config.toml new file mode 100644 index 0000000..d21b8f7 --- /dev/null +++ b/config.toml @@ -0,0 +1,37 @@ +# === General Settings === +[general] +use_middle_proxy = true +# ad_tag = "00000000000000000000000000000000" + +# === Log Level === +# Log level: debug | verbose | normal | silent +# Can be overridden with --silent or --log-level CLI flags +# RUST_LOG env var takes absolute priority over all of these +log_level = "normal" + +[general.modes] +classic = false +secure = false +tls = true + +# === Server Binding === +[server] +port = 9999 +# proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol +metrics_port = 9090 +metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"] + +# Listen on multiple interfaces/IPs - IPv4 +[[server.listeners]] +ip = "0.0.0.0" + +# === Anti-Censorship & Masking === +[censorship] +tls_domain = "petrovich.ru" +mask = true +tls_emulation = true # Fetch real cert lengths and emulate TLS records +tls_front_dir = "tlsfront" # Cache directory for TLS emulation + +[access.users] +# format: "username" = "32_hex_chars_secret" +hello = "00000000000000000000000000000000" From 03ce2678650f7f3ddf839286f1a77c8723904ad3 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 22:33:38 +0300 Subject: [PATCH 57/98] Update config.toml --- config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.toml b/config.toml index d21b8f7..28a0087 100644 --- a/config.toml +++ b/config.toml @@ -16,7 +16,7 @@ tls = true # === Server Binding === [server] -port = 9999 +port = 443 # proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol metrics_port = 9090 metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"] From 76f1b5101828b0cbdb25daa9b636596401342773 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Wed, 25 Feb 2026 22:44:38 +0300 Subject: [PATCH 58/98] Update config.toml --- config.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/config.toml b/config.toml index 28a0087..cc49bb3 100644 --- a/config.toml +++ b/config.toml @@ -1,3 +1,7 @@ +### Telemt Based Config.toml +# We believe that these settings are sufficient for most scenarios +# where cutting-egde methods and parameters or special solutions are not needed + # === General Settings === [general] use_middle_proxy = true From 1e4ba2eb56d0ff8dc9127f6a056e9945bbe8934c Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 10:45:47 +0300 Subject: [PATCH 59/98] Update config.toml --- config.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.toml b/config.toml index cc49bb3..9da6a5d 100644 --- a/config.toml +++ b/config.toml @@ -4,7 +4,7 @@ # === General Settings === [general] -use_middle_proxy = true +use_middle_proxy = false # ad_tag = "00000000000000000000000000000000" # === Log Level === From 4af40f71215cb5e19c6f3ad1580b17db19d989ff Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:13:58 +0300 Subject: [PATCH 60/98] Update config.toml --- config.toml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/config.toml b/config.toml index 9da6a5d..3460d37 100644 --- a/config.toml +++ b/config.toml @@ -18,6 +18,13 @@ classic = false secure = false tls = true +[general.links] +show = "*" +# show = ["alice", "bob"] # Only show links for alice and bob +# show = "*" # Show links for all users +# public_host = "proxy.example.com" # Host (IP or domain) for tg:// links +# public_port = 443 # Port for tg:// links (default: server.port) + # === Server Binding === [server] port = 443 From 4e30a4999ccd62b65bdebdfa933b8dc180693c41 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:14:52 +0300 Subject: [PATCH 61/98] Update config.toml --- config.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/config.toml b/config.toml index 3460d37..b280234 100644 --- a/config.toml +++ b/config.toml @@ -29,8 +29,8 @@ show = "*" [server] port = 443 # proxy_protocol = false # Enable if behind HAProxy/nginx with PROXY protocol -metrics_port = 9090 -metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"] +# metrics_port = 9090 +# metrics_whitelist = ["127.0.0.1", "::1", "0.0.0.0/0"] # Listen on multiple interfaces/IPs - IPv4 [[server.listeners]] From 6cf9687dd6e6fdd5d7839ad06e9a98441332c8c3 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:43:27 +0300 Subject: [PATCH 62/98] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 192cd00..9bba0cb 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,8 @@ **Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes +{**Telemt Chat in Telegram**](https://t.me/telemtrs) + ## NEWS and EMERGENCY ### ✈️ Telemt 3 is released! From 7ead0cd753a42bcfb179ad4dc758225a53bd107a Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 11:45:50 +0300 Subject: [PATCH 63/98] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9bba0cb..e2a898f 100644 --- a/README.md +++ b/README.md @@ -2,7 +2,7 @@ **Telemt** is a fast, secure, and feature-rich server written in Rust: it fully implements the official Telegram proxy algo and adds many production-ready improvements such as connection pooling, replay protection, detailed statistics, masking from "prying" eyes -{**Telemt Chat in Telegram**](https://t.me/telemtrs) +[**Telemt Chat in Telegram**](https://t.me/telemtrs) ## NEWS and EMERGENCY ### ✈️ Telemt 3 is released! From 896e129155684a5547ab5c5a0de15c3cca803528 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 12:48:22 +0300 Subject: [PATCH 64/98] Checked defaults --- src/config/defaults.rs | 33 ++++++++++++++++---- src/config/types.rs | 68 +++++++++++++++++++++++++----------------- 2 files changed, 69 insertions(+), 32 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 51abe65..a2b2fc7 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -7,7 +7,7 @@ const DEFAULT_NETWORK_IPV6: Option = Some(false); const DEFAULT_STUN_TCP_FALLBACK: bool = true; const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16; const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8; -const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 12; +const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 11; const DEFAULT_LISTEN_ADDR_IPV6: &str = "::"; const DEFAULT_ACCESS_USER: &str = "default"; const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000"; @@ -21,7 +21,7 @@ pub(crate) fn default_port() -> u16 { } pub(crate) fn default_tls_domain() -> String { - "www.google.com".to_string() + "petrovich.ru".to_string() } pub(crate) fn default_mask_port() -> u16 { @@ -45,7 +45,7 @@ pub(crate) fn default_replay_window_secs() -> u64 { } pub(crate) fn default_handshake_timeout() -> u64 { - 15 + 30 } pub(crate) fn default_connect_timeout() -> u64 { @@ -60,17 +60,21 @@ pub(crate) fn default_ack_timeout() -> u64 { 300 } pub(crate) fn default_me_one_retry() -> u8 { - 3 + 12 } pub(crate) fn default_me_one_timeout() -> u64 { - 1500 + 1200 } pub(crate) fn default_listen_addr() -> String { "0.0.0.0".to_string() } +pub(crate) fn default_listen_addr_ipv4() -> Option { + Some(default_listen_addr()) +} + pub(crate) fn default_weight() -> u16 { 1 } @@ -102,6 +106,21 @@ pub(crate) fn default_pool_size() -> usize { 8 } +pub(crate) fn default_proxy_secret_path() -> Option { + Some("proxy-secret".to_string()) +} + +pub(crate) fn default_middle_proxy_nat_stun() -> Option { + Some("stun.l.google.com:19302".to_string()) +} + +pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec { + vec![ + "stun1.l.google.com:19302".to_string(), + "stun2.l.google.com:19302".to_string(), + ] +} + pub(crate) fn default_middle_proxy_warm_standby() -> usize { DEFAULT_MIDDLE_PROXY_WARM_STANDBY } @@ -303,6 +322,10 @@ pub(crate) fn default_listen_addr_ipv6() -> String { DEFAULT_LISTEN_ADDR_IPV6.to_string() } +pub(crate) fn default_listen_addr_ipv6_opt() -> Option { + Some(default_listen_addr_ipv6()) +} + pub(crate) fn default_access_users() -> HashMap { HashMap::from([( DEFAULT_ACCESS_USER.to_string(), diff --git a/src/config/types.rs b/src/config/types.rs index 1302a97..f42a94a 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -87,7 +87,7 @@ pub struct NetworkConfig { pub ipv4: bool, /// None = auto-detect IPv6 availability. - #[serde(default)] + #[serde(default = "default_network_ipv6")] pub ipv6: Option, /// 4 or 6. @@ -102,7 +102,7 @@ pub struct NetworkConfig { pub stun_servers: Vec, /// Enable TCP STUN fallback when UDP is blocked. - #[serde(default)] + #[serde(default = "default_stun_tcp_fallback")] pub stun_tcp_fallback: bool, /// HTTP-based public IP detection endpoints (fallback after STUN). @@ -140,7 +140,7 @@ pub struct GeneralConfig { #[serde(default = "default_true")] pub fast_mode: bool, - #[serde(default)] + #[serde(default = "default_true")] pub use_middle_proxy: bool, #[serde(default)] @@ -148,7 +148,7 @@ pub struct GeneralConfig { /// Path to proxy-secret binary file (auto-downloaded if absent). /// Infrastructure secret from https://core.telegram.org/getProxySecret. - #[serde(default)] + #[serde(default = "default_proxy_secret_path")] pub proxy_secret_path: Option, /// Public IP override for middle-proxy NAT environments. @@ -157,15 +157,15 @@ pub struct GeneralConfig { pub middle_proxy_nat_ip: Option, /// Enable STUN-based NAT probing to discover public IP:port for ME KDF. - #[serde(default)] + #[serde(default = "default_true")] pub middle_proxy_nat_probe: bool, /// Optional STUN server address (host:port) for NAT probing. - #[serde(default)] + #[serde(default = "default_middle_proxy_nat_stun")] pub middle_proxy_nat_stun: Option, /// Optional list of STUN servers for NAT probing fallback. - #[serde(default)] + #[serde(default = "default_middle_proxy_nat_stun_servers")] pub middle_proxy_nat_stun_servers: Vec, /// Desired size of active Middle-Proxy writer pool. @@ -173,7 +173,7 @@ pub struct GeneralConfig { pub middle_proxy_pool_size: usize, /// Number of warm standby ME connections kept pre-initialized. - #[serde(default)] + #[serde(default = "default_middle_proxy_warm_standby")] pub middle_proxy_warm_standby: usize, /// Enable ME keepalive padding frames. @@ -207,7 +207,7 @@ pub struct GeneralConfig { pub desync_all_full: bool, /// Enable per-IP forensic observation buckets for scanners and handshake failures. - #[serde(default)] + #[serde(default = "default_true")] pub beobachten: bool, /// Observation retention window in minutes for per-IP forensic buckets. @@ -240,7 +240,7 @@ pub struct GeneralConfig { pub me_warmup_step_jitter_ms: u64, /// Max concurrent reconnect attempts per DC. - #[serde(default)] + #[serde(default = "default_me_reconnect_max_concurrent_per_dc")] pub me_reconnect_max_concurrent_per_dc: u32, /// Base backoff in ms for reconnect. @@ -252,7 +252,7 @@ pub struct GeneralConfig { pub me_reconnect_backoff_cap_ms: u64, /// Fast retry attempts before backoff. - #[serde(default)] + #[serde(default = "default_me_reconnect_fast_retry_count")] pub me_reconnect_fast_retry_count: u32, /// Ignore STUN/interface IP mismatch (keep using Middle Proxy even if NAT detected). @@ -280,7 +280,7 @@ pub struct GeneralConfig { /// Unified ME updater interval in seconds for getProxyConfig/getProxyConfigV6/getProxySecret. /// When omitted, effective value falls back to legacy proxy_*_auto_reload_secs fields. - #[serde(default)] + #[serde(default = "default_update_every")] pub update_every: Option, /// Periodic ME pool reinitialization interval in seconds. @@ -371,13 +371,13 @@ impl Default for GeneralConfig { modes: ProxyModes::default(), prefer_ipv6: false, fast_mode: default_true(), - use_middle_proxy: false, + use_middle_proxy: default_true(), ad_tag: None, - proxy_secret_path: None, + proxy_secret_path: default_proxy_secret_path(), middle_proxy_nat_ip: None, - middle_proxy_nat_probe: true, - middle_proxy_nat_stun: None, - middle_proxy_nat_stun_servers: Vec::new(), + middle_proxy_nat_probe: default_true(), + middle_proxy_nat_stun: default_middle_proxy_nat_stun(), + middle_proxy_nat_stun_servers: default_middle_proxy_nat_stun_servers(), middle_proxy_pool_size: default_pool_size(), middle_proxy_warm_standby: default_middle_proxy_warm_standby(), me_keepalive_enabled: default_true(), @@ -399,7 +399,7 @@ impl Default for GeneralConfig { crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), desync_all_full: default_desync_all_full(), - beobachten: true, + beobachten: default_true(), beobachten_minutes: default_beobachten_minutes(), beobachten_flush_secs: default_beobachten_flush_secs(), beobachten_file: default_beobachten_file(), @@ -450,11 +450,11 @@ impl GeneralConfig { } /// `[general.links]` — proxy link generation settings. -#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct LinksConfig { /// List of usernames whose tg:// links to display at startup. /// `"*"` = all users, `["alice", "bob"]` = specific users. - #[serde(default)] + #[serde(default = "default_links_show")] pub show: ShowLink, /// Public hostname/IP for tg:// link generation (overrides detected IP). @@ -466,15 +466,25 @@ pub struct LinksConfig { pub public_port: Option, } +impl Default for LinksConfig { + fn default() -> Self { + Self { + show: default_links_show(), + public_host: None, + public_port: None, + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] pub struct ServerConfig { #[serde(default = "default_port")] pub port: u16, - #[serde(default)] + #[serde(default = "default_listen_addr_ipv4")] pub listen_addr_ipv4: Option, - #[serde(default)] + #[serde(default = "default_listen_addr_ipv6_opt")] pub listen_addr_ipv6: Option, #[serde(default)] @@ -509,8 +519,8 @@ impl Default for ServerConfig { fn default() -> Self { Self { port: default_port(), - listen_addr_ipv4: Some(default_listen_addr()), - listen_addr_ipv6: Some(default_listen_addr_ipv6()), + listen_addr_ipv4: default_listen_addr_ipv4(), + listen_addr_ipv6: default_listen_addr_ipv6_opt(), listen_unix_sock: None, listen_unix_sock_perm: None, listen_tcp: None, @@ -583,7 +593,7 @@ pub struct AntiCensorshipConfig { pub fake_cert_len: usize, /// Enable TLS certificate emulation using cached real certificates. - #[serde(default)] + #[serde(default = "default_true")] pub tls_emulation: bool, /// Directory to store TLS front cache (on disk). @@ -636,7 +646,7 @@ impl Default for AntiCensorshipConfig { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct AccessConfig { - #[serde(default)] + #[serde(default = "default_access_users")] pub users: HashMap, #[serde(default)] @@ -746,7 +756,7 @@ pub struct ListenerConfig { /// In TOML, this can be: /// - `show_link = "*"` — show links for all users /// - `show_link = ["a", "b"]` — show links for specific users -/// - omitted — show no links (default) +/// - omitted — default depends on the owning config field #[derive(Debug, Clone, Default)] pub enum ShowLink { /// Don't show any links (default when omitted). @@ -758,6 +768,10 @@ pub enum ShowLink { Specific(Vec), } +fn default_links_show() -> ShowLink { + ShowLink::All +} + impl ShowLink { /// Returns true if no links should be shown. pub fn is_empty(&self) -> bool { From da684b11fe86190e3cf055f10ef90b164dd59438 Mon Sep 17 00:00:00 2001 From: ivulit Date: Thu, 26 Feb 2026 13:36:33 +0300 Subject: [PATCH 65/98] feat: add mask_proxy_protocol option for PROXY protocol to mask_host MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds mask_proxy_protocol config option (0 = off, 1 = v1 text, 2 = v2 binary) that sends a PROXY protocol header when connecting to mask_host. This lets the backend see the real client IP address. Particularly useful when the masking site (nginx/HAProxy) runs on the same host as telemt and listens on a local port — without this, the backend loses the original client IP entirely. PROXY protocol header is also sent during TLS emulation fetches so that backends with proxy_protocol required don't reject the connection. --- config.full.toml | 1 + src/config/types.rs | 7 ++++++ src/main.rs | 3 +++ src/proxy/client.rs | 16 +++++++------- src/proxy/masking.rs | 39 +++++++++++++++++++++++++++++---- src/tls_front/fetcher.rs | 26 +++++++++++++++++++--- src/transport/proxy_protocol.rs | 10 +++++---- 7 files changed, 83 insertions(+), 19 deletions(-) diff --git a/config.full.toml b/config.full.toml index 44db620..ac55167 100644 --- a/config.full.toml +++ b/config.full.toml @@ -135,6 +135,7 @@ mask = true # mask_host = "www.google.com" # example, defaults to tls_domain when both mask_host/mask_unix_sock are unset # mask_unix_sock = "/var/run/nginx.sock" # example, mutually exclusive with mask_host mask_port = 443 +# mask_proxy_protocol = 0 # Send PROXY protocol header to mask_host: 0 = off, 1 = v1 (text), 2 = v2 (binary) fake_cert_len = 2048 # if tls_emulation=false and default value is used, loader may randomize this value at runtime tls_emulation = true tls_front_dir = "tlsfront" diff --git a/src/config/types.rs b/src/config/types.rs index 1302a97..7703fe4 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -611,6 +611,12 @@ pub struct AntiCensorshipConfig { /// Enforce ALPN echo of client preference. #[serde(default = "default_alpn_enforce")] pub alpn_enforce: bool, + + /// Send PROXY protocol header when connecting to mask_host. + /// 0 = disabled, 1 = v1 (text), 2 = v2 (binary). + /// Allows the backend to see the real client IP. + #[serde(default)] + pub mask_proxy_protocol: u8, } impl Default for AntiCensorshipConfig { @@ -630,6 +636,7 @@ impl Default for AntiCensorshipConfig { tls_new_session_tickets: default_tls_new_session_tickets(), tls_full_cert_ttl_secs: default_tls_full_cert_ttl_secs(), alpn_enforce: default_alpn_enforce(), + mask_proxy_protocol: 0, } } } diff --git a/src/main.rs b/src/main.rs index c2b8c34..e4f7a79 100644 --- a/src/main.rs +++ b/src/main.rs @@ -474,6 +474,7 @@ async fn main() -> std::result::Result<(), Box> { &domain, Duration::from_secs(5), Some(upstream_manager.clone()), + config.censorship.mask_proxy_protocol, ) .await { @@ -486,6 +487,7 @@ async fn main() -> std::result::Result<(), Box> { let cache_clone = cache.clone(); let domains = tls_domains.clone(); let upstream_for_task = upstream_manager.clone(); + let proxy_protocol = config.censorship.mask_proxy_protocol; tokio::spawn(async move { loop { let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600); @@ -498,6 +500,7 @@ async fn main() -> std::result::Result<(), Box> { domain, Duration::from_secs(5), Some(upstream_for_task.clone()), + proxy_protocol, ) .await { diff --git a/src/proxy/client.rs b/src/proxy/client.rs index c598023..d8bbc48 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -143,7 +143,7 @@ where reader, writer, &first_bytes, - real_peer.ip(), + real_peer, &config, &beobachten, ) @@ -168,7 +168,7 @@ where reader, writer, &handshake, - real_peer.ip(), + real_peer, &config, &beobachten, ) @@ -212,7 +212,7 @@ where reader, writer, &first_bytes, - real_peer.ip(), + real_peer, &config, &beobachten, ) @@ -237,7 +237,7 @@ where reader, writer, &handshake, - real_peer.ip(), + real_peer, &config, &beobachten, ) @@ -462,7 +462,7 @@ impl RunningClientHandler { reader, writer, &first_bytes, - peer.ip(), + peer, &self.config, &self.beobachten, ) @@ -501,7 +501,7 @@ impl RunningClientHandler { reader, writer, &handshake, - peer.ip(), + peer, &config, &self.beobachten, ) @@ -570,7 +570,7 @@ impl RunningClientHandler { reader, writer, &first_bytes, - peer.ip(), + peer, &self.config, &self.beobachten, ) @@ -608,7 +608,7 @@ impl RunningClientHandler { reader, writer, &handshake, - peer.ip(), + peer, &config, &self.beobachten, ) diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index cdb6cf9..d12cf41 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -1,7 +1,7 @@ //! Masking - forward unrecognized traffic to mask host use std::str; -use std::net::IpAddr; +use std::net::SocketAddr; use std::time::Duration; use tokio::net::TcpStream; #[cfg(unix)] @@ -11,6 +11,7 @@ use tokio::time::timeout; use tracing::debug; use crate::config::ProxyConfig; use crate::stats::beobachten::BeobachtenStore; +use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder}; const MASK_TIMEOUT: Duration = Duration::from_secs(5); /// Maximum duration for the entire masking relay. @@ -52,7 +53,7 @@ pub async fn handle_bad_client( reader: R, writer: W, initial_data: &[u8], - peer_ip: IpAddr, + peer: SocketAddr, config: &ProxyConfig, beobachten: &BeobachtenStore, ) @@ -63,7 +64,7 @@ where let client_type = detect_client_type(initial_data); if config.general.beobachten { let ttl = Duration::from_secs(config.general.beobachten_minutes.saturating_mul(60)); - beobachten.record(client_type, peer_ip, ttl); + beobachten.record(client_type, peer.ip(), ttl); } if !config.censorship.mask { @@ -119,7 +120,37 @@ where let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await; match connect_result { Ok(Ok(stream)) => { - let (mask_read, mask_write) = stream.into_split(); + let proxy_header: Option> = match config.censorship.mask_proxy_protocol { + 0 => None, + version => { + let header = if let Ok(local_addr) = stream.local_addr() { + match version { + 2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(), + _ => match (peer, local_addr) { + (SocketAddr::V4(src), SocketAddr::V4(dst)) => + ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(), + (SocketAddr::V6(src), SocketAddr::V6(dst)) => + ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(), + _ => + ProxyProtocolV1Builder::new().build(), + }, + } + } else { + match version { + 2 => ProxyProtocolV2Builder::new().build(), + _ => ProxyProtocolV1Builder::new().build(), + } + }; + Some(header) + } + }; + + let (mask_read, mut mask_write) = stream.into_split(); + if let Some(header) = proxy_header { + if mask_write.write_all(&header).await.is_err() { + return; + } + } if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() { debug!("Mask relay timed out"); } diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index 7ac4b42..561d4cc 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -19,6 +19,7 @@ use x509_parser::certificate::X509Certificate; use crate::crypto::SecureRandom; use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE}; +use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder}; use crate::tls_front::types::{ ParsedCertificateInfo, ParsedServerHello, @@ -366,6 +367,7 @@ async fn fetch_via_raw_tls( port: u16, sni: &str, connect_timeout: Duration, + proxy_protocol: u8, ) -> Result { let addr = format!("{host}:{port}"); let mut stream = timeout(connect_timeout, TcpStream::connect(addr)).await??; @@ -373,6 +375,13 @@ async fn fetch_via_raw_tls( let rng = SecureRandom::new(); let client_hello = build_client_hello(sni, &rng); timeout(connect_timeout, async { + if proxy_protocol > 0 { + let header = match proxy_protocol { + 2 => ProxyProtocolV2Builder::new().build(), + _ => ProxyProtocolV1Builder::new().build(), + }; + stream.write_all(&header).await?; + } stream.write_all(&client_hello).await?; stream.flush().await?; Ok::<(), std::io::Error>(()) @@ -424,9 +433,10 @@ async fn fetch_via_rustls( sni: &str, connect_timeout: Duration, upstream: Option>, + proxy_protocol: u8, ) -> Result { // rustls handshake path for certificate and basic negotiated metadata. - let stream = if let Some(manager) = upstream { + let mut stream = if let Some(manager) = upstream { // Resolve host to SocketAddr if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { if let Some(addr) = addrs.find(|a| a.is_ipv4()) { @@ -447,6 +457,15 @@ async fn fetch_via_rustls( timeout(connect_timeout, TcpStream::connect((host, port))).await?? }; + if proxy_protocol > 0 { + let header = match proxy_protocol { + 2 => ProxyProtocolV2Builder::new().build(), + _ => ProxyProtocolV1Builder::new().build(), + }; + stream.write_all(&header).await?; + stream.flush().await?; + } + let config = build_client_config(); let connector = TlsConnector::from(config); @@ -527,8 +546,9 @@ pub async fn fetch_real_tls( sni: &str, connect_timeout: Duration, upstream: Option>, + proxy_protocol: u8, ) -> Result { - let raw_result = match fetch_via_raw_tls(host, port, sni, connect_timeout).await { + let raw_result = match fetch_via_raw_tls(host, port, sni, connect_timeout, proxy_protocol).await { Ok(res) => Some(res), Err(e) => { warn!(sni = %sni, error = %e, "Raw TLS fetch failed"); @@ -536,7 +556,7 @@ pub async fn fetch_real_tls( } }; - match fetch_via_rustls(host, port, sni, connect_timeout, upstream).await { + match fetch_via_rustls(host, port, sni, connect_timeout, upstream, proxy_protocol).await { Ok(rustls_result) => { if let Some(mut raw) = raw_result { raw.cert_info = rustls_result.cert_info; diff --git a/src/transport/proxy_protocol.rs b/src/transport/proxy_protocol.rs index 770be7e..96f4ffb 100644 --- a/src/transport/proxy_protocol.rs +++ b/src/transport/proxy_protocol.rs @@ -233,14 +233,12 @@ async fn parse_v2( } /// Builder for PROXY protocol v1 header -#[allow(dead_code)] pub struct ProxyProtocolV1Builder { family: &'static str, src_addr: Option, dst_addr: Option, } -#[allow(dead_code)] impl ProxyProtocolV1Builder { pub fn new() -> Self { Self { @@ -288,13 +286,17 @@ impl Default for ProxyProtocolV1Builder { } /// Builder for PROXY protocol v2 header -#[allow(dead_code)] pub struct ProxyProtocolV2Builder { src: Option, dst: Option, } -#[allow(dead_code)] +impl Default for ProxyProtocolV2Builder { + fn default() -> Self { + Self::new() + } +} + impl ProxyProtocolV2Builder { pub fn new() -> Self { Self { src: None, dst: None } From fb1f85559ccb3edc1b9679217e14abde57bfcf61 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 14:57:28 +0300 Subject: [PATCH 66/98] Update load.rs --- src/config/load.rs | 48 ++++++++++++++++++++++++++++++++++++---------- 1 file changed, 38 insertions(+), 10 deletions(-) diff --git a/src/config/load.rs b/src/config/load.rs index be6759e..35099be 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -116,8 +116,27 @@ impl ProxyConfig { let base_dir = path.as_ref().parent().unwrap_or(Path::new(".")); let processed = preprocess_includes(&content, base_dir, 0)?; - let mut config: ProxyConfig = + let parsed_toml: toml::Value = toml::from_str(&processed).map_err(|e| ProxyError::Config(e.to_string()))?; + let general_table = parsed_toml + .get("general") + .and_then(|value| value.as_table()); + let update_every_is_explicit = general_table + .map(|table| table.contains_key("update_every")) + .unwrap_or(false); + let legacy_secret_is_explicit = general_table + .map(|table| table.contains_key("proxy_secret_auto_reload_secs")) + .unwrap_or(false); + let legacy_config_is_explicit = general_table + .map(|table| table.contains_key("proxy_config_auto_reload_secs")) + .unwrap_or(false); + + let mut config: ProxyConfig = + parsed_toml.try_into().map_err(|e| ProxyError::Config(e.to_string()))?; + + if !update_every_is_explicit && (legacy_secret_is_explicit || legacy_config_is_explicit) { + config.general.update_every = None; + } if let Some(update_every) = config.general.update_every { if update_every == 0 { @@ -437,15 +456,24 @@ mod tests { "#; let cfg: ProxyConfig = toml::from_str(toml).unwrap(); - assert_eq!(cfg.network.ipv6, None); - assert!(!cfg.network.stun_tcp_fallback); - assert_eq!(cfg.general.middle_proxy_warm_standby, 0); - assert_eq!(cfg.general.me_reconnect_max_concurrent_per_dc, 0); - assert_eq!(cfg.general.me_reconnect_fast_retry_count, 0); - assert_eq!(cfg.general.update_every, None); - assert_eq!(cfg.server.listen_addr_ipv4, None); - assert_eq!(cfg.server.listen_addr_ipv6, None); - assert!(cfg.access.users.is_empty()); + assert_eq!(cfg.network.ipv6, default_network_ipv6()); + assert_eq!(cfg.network.stun_tcp_fallback, default_stun_tcp_fallback()); + assert_eq!( + cfg.general.middle_proxy_warm_standby, + default_middle_proxy_warm_standby() + ); + assert_eq!( + cfg.general.me_reconnect_max_concurrent_per_dc, + default_me_reconnect_max_concurrent_per_dc() + ); + assert_eq!( + cfg.general.me_reconnect_fast_retry_count, + default_me_reconnect_fast_retry_count() + ); + assert_eq!(cfg.general.update_every, default_update_every()); + assert_eq!(cfg.server.listen_addr_ipv4, default_listen_addr_ipv4()); + assert_eq!(cfg.server.listen_addr_ipv6, default_listen_addr_ipv6_opt()); + assert_eq!(cfg.access.users, default_access_users()); } #[test] From d7182ae817674b32563a329640256b8b23765396 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 15:07:04 +0300 Subject: [PATCH 67/98] Update defaults.rs --- src/config/defaults.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index a2b2fc7..04755cb 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -7,7 +7,7 @@ const DEFAULT_NETWORK_IPV6: Option = Some(false); const DEFAULT_STUN_TCP_FALLBACK: bool = true; const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16; const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8; -const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 11; +const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16; const DEFAULT_LISTEN_ADDR_IPV6: &str = "::"; const DEFAULT_ACCESS_USER: &str = "default"; const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000"; From e25b7f5ff8d99e527f9a9553b27fefdcfd258726 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 15:10:21 +0300 Subject: [PATCH 68/98] STUN List --- src/config/defaults.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 04755cb..3fb8c3d 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -116,8 +116,19 @@ pub(crate) fn default_middle_proxy_nat_stun() -> Option { pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec { vec![ + "stun.l.google.com:5349".to_string(), + "stun1.l.google.com:3478".to_string(), + "stun.gmx.net:3478".to_string(), + "stun.l.google.com:19302".to_string(), + "stun.1und1.de:3478".to_string(), "stun1.l.google.com:19302".to_string(), "stun2.l.google.com:19302".to_string(), + "stun3.l.google.com:19302".to_string(), + "stun4.l.google.com:19302".to_string(), + "stun.services.mozilla.com:3478".to_string(), + "stun.stunprotocol.org:3478".to_string(), + "stun.nextcloud.com:3478".to_string(), + "stun.voip.eutelia.it:3478".to_string(), ] } From 7782336264368cbcab383e03a286cc689bbf445b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 17:56:22 +0300 Subject: [PATCH 69/98] ME Probe parallelized --- src/config/defaults.rs | 4 + src/config/hot_reload.rs | 3 + src/config/load.rs | 26 ++++ src/config/types.rs | 5 + src/main.rs | 3 + src/network/probe.rs | 171 +++++++++++++++++++++++-- src/transport/middle_proxy/pool.rs | 102 ++++++++++----- src/transport/middle_proxy/pool_nat.rs | 170 +++++++++++++++++++----- 8 files changed, 411 insertions(+), 73 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 3fb8c3d..4f0a53d 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -132,6 +132,10 @@ pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec { ] } +pub(crate) fn default_stun_nat_probe_concurrency() -> usize { + 8 +} + pub(crate) fn default_middle_proxy_warm_standby() -> usize { DEFAULT_MIDDLE_PROXY_WARM_STANDBY } diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index 7f121f6..c949104 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -96,6 +96,9 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig) { if old.general.use_middle_proxy != new.general.use_middle_proxy { warn!("config reload: use_middle_proxy changed; restart required"); } + if old.general.stun_nat_probe_concurrency != new.general.stun_nat_probe_concurrency { + warn!("config reload: general.stun_nat_probe_concurrency changed; restart required"); + } } /// Resolve the public host for link generation — mirrors the logic in main.rs. diff --git a/src/config/load.rs b/src/config/load.rs index 35099be..31a8b5d 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -166,6 +166,12 @@ impl ProxyConfig { } } + if config.general.stun_nat_probe_concurrency == 0 { + return Err(ProxyError::Config( + "general.stun_nat_probe_concurrency must be > 0".to_string(), + )); + } + if config.general.me_reinit_every_secs == 0 { return Err(ProxyError::Config( "general.me_reinit_every_secs must be > 0".to_string(), @@ -607,6 +613,26 @@ mod tests { let _ = std::fs::remove_file(path); } + #[test] + fn stun_nat_probe_concurrency_zero_is_rejected() { + let toml = r#" + [general] + stun_nat_probe_concurrency = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_stun_nat_probe_concurrency_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.stun_nat_probe_concurrency must be > 0")); + let _ = std::fs::remove_file(path); + } + #[test] fn me_reinit_every_default_is_set() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index e827088..58a3a3e 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -168,6 +168,10 @@ pub struct GeneralConfig { #[serde(default = "default_middle_proxy_nat_stun_servers")] pub middle_proxy_nat_stun_servers: Vec, + /// Maximum number of concurrent STUN probes during NAT detection. + #[serde(default = "default_stun_nat_probe_concurrency")] + pub stun_nat_probe_concurrency: usize, + /// Desired size of active Middle-Proxy writer pool. #[serde(default = "default_pool_size")] pub middle_proxy_pool_size: usize, @@ -378,6 +382,7 @@ impl Default for GeneralConfig { middle_proxy_nat_probe: default_true(), middle_proxy_nat_stun: default_middle_proxy_nat_stun(), middle_proxy_nat_stun_servers: default_middle_proxy_nat_stun_servers(), + stun_nat_probe_concurrency: default_stun_nat_probe_concurrency(), middle_proxy_pool_size: default_pool_size(), middle_proxy_warm_standby: default_middle_proxy_warm_standby(), me_keepalive_enabled: default_true(), diff --git a/src/main.rs b/src/main.rs index e4f7a79..dd4feb8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -257,7 +257,9 @@ async fn main() -> std::result::Result<(), Box> { let probe = run_probe( &config.network, config.general.middle_proxy_nat_stun.clone(), + config.general.middle_proxy_nat_stun_servers.clone(), config.general.middle_proxy_nat_probe, + config.general.stun_nat_probe_concurrency, ) .await?; let decision = decide_network_capabilities(&config.network, &probe); @@ -360,6 +362,7 @@ async fn main() -> std::result::Result<(), Box> { config.general.middle_proxy_nat_probe, config.general.middle_proxy_nat_stun.clone(), config.general.middle_proxy_nat_stun_servers.clone(), + config.general.stun_nat_probe_concurrency, probe.detected_ipv6, config.timeouts.me_one_retry, config.timeouts.me_one_timeout_ms, diff --git a/src/network/probe.rs b/src/network/probe.rs index c52b340..378faa5 100644 --- a/src/network/probe.rs +++ b/src/network/probe.rs @@ -1,12 +1,16 @@ #![allow(dead_code)] +use std::collections::HashMap; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, UdpSocket}; +use std::time::Duration; -use tracing::{info, warn}; +use tokio::task::JoinSet; +use tokio::time::timeout; +use tracing::{debug, info, warn}; use crate::config::NetworkConfig; use crate::error::Result; -use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily}; +use crate::network::stun::{stun_probe_dual, DualStunResult, IpFamily, StunProbeResult}; #[derive(Debug, Clone, Default)] pub struct NetworkProbe { @@ -49,7 +53,15 @@ impl NetworkDecision { } } -pub async fn run_probe(config: &NetworkConfig, stun_addr: Option, nat_probe: bool) -> Result { +const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5); + +pub async fn run_probe( + config: &NetworkConfig, + stun_addr: Option, + stun_servers: Vec, + nat_probe: bool, + stun_nat_probe_concurrency: usize, +) -> Result { let mut probe = NetworkProbe::default(); probe.detected_ipv4 = detect_local_ip_v4(); @@ -58,21 +70,30 @@ pub async fn run_probe(config: &NetworkConfig, stun_addr: Option, nat_pr probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false); probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false); - let stun_server = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string()); let stun_res = if nat_probe { - match stun_probe_dual(&stun_server).await { - Ok(res) => res, - Err(e) => { - warn!(error = %e, "STUN probe failed, continuing without reflection"); - DualStunResult::default() - } - } + let servers = collect_stun_servers(config, stun_addr, stun_servers); + probe_stun_servers_parallel( + &servers, + stun_nat_probe_concurrency.max(1), + ) + .await } else { DualStunResult::default() }; probe.reflected_ipv4 = stun_res.v4.map(|r| r.reflected_addr); probe.reflected_ipv6 = stun_res.v6.map(|r| r.reflected_addr); + // If STUN is blocked but IPv4 is private, try HTTP public-IP fallback. + if nat_probe + && probe.reflected_ipv4.is_none() + && probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false) + { + if let Some(public_ip) = detect_public_ipv4_http(&config.http_ip_detect_urls).await { + probe.reflected_ipv4 = Some(SocketAddr::new(IpAddr::V4(public_ip), 0)); + info!(public_ip = %public_ip, "STUN unavailable, using HTTP public IPv4 fallback"); + } + } + probe.ipv4_nat_detected = match (probe.detected_ipv4, probe.reflected_ipv4) { (Some(det), Some(reflected)) => det != reflected.ip(), _ => false, @@ -94,6 +115,134 @@ pub async fn run_probe(config: &NetworkConfig, stun_addr: Option, nat_pr Ok(probe) } +async fn detect_public_ipv4_http(urls: &[String]) -> Option { + let client = reqwest::Client::builder() + .timeout(Duration::from_secs(3)) + .build() + .ok()?; + + for url in urls { + let response = match client.get(url).send().await { + Ok(response) => response, + Err(_) => continue, + }; + + let body = match response.text().await { + Ok(body) => body, + Err(_) => continue, + }; + + let Ok(ip) = body.trim().parse::() else { + continue; + }; + if !is_bogon_v4(ip) { + return Some(ip); + } + } + + None +} + +fn collect_stun_servers( + config: &NetworkConfig, + stun_addr: Option, + stun_servers: Vec, +) -> Vec { + let mut out = Vec::new(); + if !stun_servers.is_empty() { + for s in stun_servers { + if !s.is_empty() && !out.contains(&s) { + out.push(s); + } + } + } else if let Some(s) = stun_addr + && !s.is_empty() + { + out.push(s); + } + + if out.is_empty() { + for s in &config.stun_servers { + if !s.is_empty() && !out.contains(s) { + out.push(s.clone()); + } + } + } + + if out.is_empty() { + out.push("stun.l.google.com:19302".to_string()); + } + + out +} + +async fn probe_stun_servers_parallel( + servers: &[String], + concurrency: usize, +) -> DualStunResult { + let mut join_set = JoinSet::new(); + let mut next_idx = 0usize; + let mut best_v4_by_ip: HashMap = HashMap::new(); + let mut best_v6_by_ip: HashMap = HashMap::new(); + + while next_idx < servers.len() || !join_set.is_empty() { + while next_idx < servers.len() && join_set.len() < concurrency { + let stun_addr = servers[next_idx].clone(); + next_idx += 1; + join_set.spawn(async move { + let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await; + (stun_addr, res) + }); + } + + let Some(task) = join_set.join_next().await else { + break; + }; + + match task { + Ok((stun_addr, Ok(Ok(result)))) => { + if let Some(v4) = result.v4 { + let entry = best_v4_by_ip.entry(v4.reflected_addr.ip()).or_insert((0, v4)); + entry.0 += 1; + } + if let Some(v6) = result.v6 { + let entry = best_v6_by_ip.entry(v6.reflected_addr.ip()).or_insert((0, v6)); + entry.0 += 1; + } + if result.v4.is_some() || result.v6.is_some() { + debug!(stun = %stun_addr, "STUN server responded within probe timeout"); + } + } + Ok((stun_addr, Ok(Err(e)))) => { + debug!(error = %e, stun = %stun_addr, "STUN probe failed"); + } + Ok((stun_addr, Err(_))) => { + debug!(stun = %stun_addr, "STUN probe timeout"); + } + Err(e) => { + debug!(error = %e, "STUN probe task join failed"); + } + } + } + + let mut out = DualStunResult::default(); + if let Some((_, best)) = best_v4_by_ip + .into_values() + .max_by_key(|(count, _)| *count) + { + info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip()); + out.v4 = Some(best); + } + if let Some((_, best)) = best_v6_by_ip + .into_values() + .max_by_key(|(count, _)| *count) + { + info!("STUN-Quorum reached, IP: {}", best.reflected_addr.ip()); + out.v6 = Some(best); + } + out +} + pub fn decide_network_capabilities(config: &NetworkConfig, probe: &NetworkProbe) -> NetworkDecision { let ipv4_dc = config.ipv4 && probe.detected_ipv4.is_some(); let ipv6_dc = config.ipv6.unwrap_or(probe.detected_ipv6.is_some()) && probe.detected_ipv6.is_some(); diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index e5aebe4..c95457b 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -50,6 +50,8 @@ pub struct MePool { pub(super) nat_probe: bool, pub(super) nat_stun: Option, pub(super) nat_stun_servers: Vec, + pub(super) nat_stun_live_servers: Arc>>, + pub(super) nat_probe_concurrency: usize, pub(super) detected_ipv6: Option, pub(super) nat_probe_attempts: std::sync::atomic::AtomicU8, pub(super) nat_probe_disabled: std::sync::atomic::AtomicBool, @@ -120,6 +122,7 @@ impl MePool { nat_probe: bool, nat_stun: Option, nat_stun_servers: Vec, + nat_probe_concurrency: usize, detected_ipv6: Option, me_one_retry: u8, me_one_timeout_ms: u64, @@ -162,6 +165,8 @@ impl MePool { nat_probe, nat_stun, nat_stun_servers, + nat_stun_live_servers: Arc::new(RwLock::new(Vec::new())), + nat_probe_concurrency: nat_probe_concurrency.max(1), detected_ipv6, nat_probe_attempts: std::sync::atomic::AtomicU8::new(0), nat_probe_disabled: std::sync::atomic::AtomicBool::new(false), @@ -241,6 +246,9 @@ impl MePool { pub fn reset_stun_state(&self) { self.nat_probe_attempts.store(0, Ordering::Relaxed); self.nat_probe_disabled.store(false, Ordering::Relaxed); + if let Ok(mut live) = self.nat_stun_live_servers.try_write() { + live.clear(); + } } pub fn translate_our_addr(&self, addr: SocketAddr) -> SocketAddr { @@ -896,10 +904,25 @@ impl MePool { for family in family_order { let map = self.proxy_map_for_family(family).await; - let dc_addrs: Vec<(i32, Vec<(IpAddr, u16)>)> = map - .iter() - .map(|(dc, addrs)| (*dc, addrs.clone())) + let mut grouped_dc_addrs: HashMap> = HashMap::new(); + for (dc, addrs) in map { + if addrs.is_empty() { + continue; + } + grouped_dc_addrs + .entry(dc.abs()) + .or_default() + .extend(addrs); + } + let mut dc_addrs: Vec<(i32, Vec<(IpAddr, u16)>)> = grouped_dc_addrs + .into_iter() + .map(|(dc, mut addrs)| { + addrs.sort_unstable(); + addrs.dedup(); + (dc, addrs) + }) .collect(); + dc_addrs.sort_unstable_by_key(|(dc, _)| *dc); // Ensure at least one connection per DC; run DCs in parallel. let mut join = tokio::task::JoinSet::new(); @@ -923,38 +946,49 @@ impl MePool { return Err(ProxyError::Proxy("Too many ME DC init failures, falling back to direct".into())); } - // Additional connections up to pool_size total (round-robin across DCs), staggered to de-phase lifecycles. - if self.me_warmup_stagger_enabled { - for (dc, addrs) in dc_addrs.iter() { - for (ip, port) in addrs { - if self.connection_count() >= pool_size { - break; + // Warm reserve writers asynchronously so startup does not block after first working pool is ready. + let pool = Arc::clone(self); + let rng_clone = Arc::clone(rng); + let dc_addrs_bg = dc_addrs.clone(); + tokio::spawn(async move { + if pool.me_warmup_stagger_enabled { + for (dc, addrs) in dc_addrs_bg.iter() { + for (ip, port) in addrs { + if pool.connection_count() >= pool_size { + break; + } + let addr = SocketAddr::new(*ip, *port); + let jitter = rand::rng() + .random_range(0..=pool.me_warmup_step_jitter.as_millis() as u64); + let delay_ms = pool.me_warmup_step_delay.as_millis() as u64 + jitter; + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { + debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed (staggered)"); + } + } } - let addr = SocketAddr::new(*ip, *port); - let jitter = rand::rng().random_range(0..=self.me_warmup_step_jitter.as_millis() as u64); - let delay_ms = self.me_warmup_step_delay.as_millis() as u64 + jitter; - tokio::time::sleep(Duration::from_millis(delay_ms)).await; - if let Err(e) = self.connect_one(addr, rng.as_ref()).await { - debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed (staggered)"); + } else { + for (dc, addrs) in dc_addrs_bg.iter() { + for (ip, port) in addrs { + if pool.connection_count() >= pool_size { + break; + } + let addr = SocketAddr::new(*ip, *port); + if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { + debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed"); + } + } + if pool.connection_count() >= pool_size { + break; + } } } - } - } else { - for (dc, addrs) in dc_addrs.iter() { - for (ip, port) in addrs { - if self.connection_count() >= pool_size { - break; - } - let addr = SocketAddr::new(*ip, *port); - if let Err(e) = self.connect_one(addr, rng.as_ref()).await { - debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed"); - } - } - if self.connection_count() >= pool_size { - break; - } - } - } + debug!( + target_pool_size = pool_size, + current_pool_size = pool.connection_count(), + "Background ME reserve warmup finished" + ); + }); if !self.decision.effective_multipath && self.connection_count() > 0 { break; @@ -964,6 +998,10 @@ impl MePool { if self.writers.read().await.is_empty() { return Err(ProxyError::Proxy("No ME connections".into())); } + info!( + active_writers = self.connection_count(), + "ME primary pool ready; reserve warmup continues in background" + ); Ok(()) } diff --git a/src/transport/middle_proxy/pool_nat.rs b/src/transport/middle_proxy/pool_nat.rs index 9936707..37c0d5b 100644 --- a/src/transport/middle_proxy/pool_nat.rs +++ b/src/transport/middle_proxy/pool_nat.rs @@ -1,7 +1,10 @@ +use std::collections::HashMap; use std::net::{IpAddr, Ipv4Addr}; use std::time::Duration; -use tracing::{info, warn}; +use tokio::task::JoinSet; +use tokio::time::timeout; +use tracing::{debug, info, warn}; use crate::error::{ProxyError, Result}; use crate::network::probe::is_bogon; @@ -10,6 +13,8 @@ use crate::network::stun::{stun_probe_dual, IpFamily, StunProbeResult}; use super::MePool; use std::time::Instant; +const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5); + #[allow(dead_code)] pub async fn stun_probe(stun_addr: Option) -> Result { let stun_addr = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string()); @@ -22,6 +27,99 @@ pub async fn detect_public_ip() -> Option { } impl MePool { + fn configured_stun_servers(&self) -> Vec { + if !self.nat_stun_servers.is_empty() { + return self.nat_stun_servers.clone(); + } + if let Some(s) = &self.nat_stun { + return vec![s.clone()]; + } + vec!["stun.l.google.com:19302".to_string()] + } + + async fn probe_stun_batch_for_family( + &self, + servers: &[String], + family: IpFamily, + attempt: u8, + ) -> (Vec, Option) { + let mut join_set = JoinSet::new(); + let mut next_idx = 0usize; + let mut live_servers = Vec::new(); + let mut best_by_ip: HashMap = HashMap::new(); + let concurrency = self.nat_probe_concurrency.max(1); + + while next_idx < servers.len() || !join_set.is_empty() { + while next_idx < servers.len() && join_set.len() < concurrency { + let stun_addr = servers[next_idx].clone(); + next_idx += 1; + join_set.spawn(async move { + let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await; + (stun_addr, res) + }); + } + + let Some(task) = join_set.join_next().await else { + break; + }; + + match task { + Ok((stun_addr, Ok(Ok(res)))) => { + let picked: Option = match family { + IpFamily::V4 => res.v4, + IpFamily::V6 => res.v6, + }; + + if let Some(result) = picked { + live_servers.push(stun_addr.clone()); + let entry = best_by_ip + .entry(result.reflected_addr.ip()) + .or_insert((0, result.reflected_addr)); + entry.0 += 1; + debug!( + local = %result.local_addr, + reflected = %result.reflected_addr, + family = ?family, + stun = %stun_addr, + "NAT probe: reflected address" + ); + } + } + Ok((stun_addr, Ok(Err(e)))) => { + debug!( + error = %e, + stun = %stun_addr, + attempt = attempt + 1, + "NAT probe failed, trying next server" + ); + } + Ok((stun_addr, Err(_))) => { + debug!( + stun = %stun_addr, + attempt = attempt + 1, + "NAT probe timeout, trying next server" + ); + } + Err(e) => { + debug!( + error = %e, + attempt = attempt + 1, + "NAT probe task join failed" + ); + } + } + } + + live_servers.sort_unstable(); + live_servers.dedup(); + let best_reflected = best_by_ip + .into_values() + .max_by_key(|(count, _)| *count) + .map(|(_, addr)| addr); + + (live_servers, best_reflected) + } + pub(super) fn translate_ip_for_nat(&self, ip: IpAddr) -> IpAddr { let nat_ip = self .nat_ip_cfg @@ -128,39 +226,51 @@ impl MePool { } let attempt = self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed); - let servers = if !self.nat_stun_servers.is_empty() { - self.nat_stun_servers.clone() - } else if let Some(s) = &self.nat_stun { - vec![s.clone()] + let configured_servers = self.configured_stun_servers(); + let live_snapshot = self.nat_stun_live_servers.read().await.clone(); + let primary_servers = if live_snapshot.is_empty() { + configured_servers.clone() } else { - vec!["stun.l.google.com:19302".to_string()] + live_snapshot }; - for stun_addr in servers { - match stun_probe_dual(&stun_addr).await { - Ok(res) => { - let picked: Option = match family { - IpFamily::V4 => res.v4, - IpFamily::V6 => res.v6, - }; - if let Some(result) = picked { - info!(local = %result.local_addr, reflected = %result.reflected_addr, family = ?family, stun = %stun_addr, "NAT probe: reflected address"); - self.nat_probe_attempts.store(0, std::sync::atomic::Ordering::Relaxed); - if let Ok(mut cache) = self.nat_reflection_cache.try_lock() { - let slot = match family { - IpFamily::V4 => &mut cache.v4, - IpFamily::V6 => &mut cache.v6, - }; - *slot = Some((Instant::now(), result.reflected_addr)); - } - return Some(result.reflected_addr); - } - } - Err(e) => { - warn!(error = %e, stun = %stun_addr, attempt = attempt + 1, "NAT probe failed, trying next server"); - } - } + let (mut live_servers, mut selected_reflected) = self + .probe_stun_batch_for_family(&primary_servers, family, attempt) + .await; + + if selected_reflected.is_none() && !configured_servers.is_empty() && primary_servers != configured_servers { + let (rediscovered_live, rediscovered_reflected) = self + .probe_stun_batch_for_family(&configured_servers, family, attempt) + .await; + live_servers = rediscovered_live; + selected_reflected = rediscovered_reflected; } + + let live_server_count = live_servers.len(); + if !live_servers.is_empty() { + *self.nat_stun_live_servers.write().await = live_servers; + } else { + self.nat_stun_live_servers.write().await.clear(); + } + + if let Some(reflected_addr) = selected_reflected { + self.nat_probe_attempts.store(0, std::sync::atomic::Ordering::Relaxed); + info!( + family = ?family, + live_servers = live_server_count, + "STUN-Quorum reached, IP: {}", + reflected_addr.ip() + ); + if let Ok(mut cache) = self.nat_reflection_cache.try_lock() { + let slot = match family { + IpFamily::V4 => &mut cache.v4, + IpFamily::V6 => &mut cache.v6, + }; + *slot = Some((Instant::now(), reflected_addr)); + } + return Some(reflected_addr); + } + let backoff = Duration::from_secs(60 * 2u64.pow((attempt as u32).min(6))); *self.stun_backoff_until.write().await = Some(Instant::now() + backoff); None From 9d2ff25bf577cd328dca4686226b39d2a620aeba Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 18:18:24 +0300 Subject: [PATCH 70/98] Unified STUN + ME Primary parallelized - Unified STUN server source-of-truth - parallelize per-DC primary ME init for multi-endpoint DCs --- src/config/defaults.rs | 18 ++-------- src/config/load.rs | 34 ++++++++++++++++++ src/config/types.rs | 6 ++-- src/main.rs | 6 ++-- src/network/probe.rs | 50 ++++++++------------------ src/transport/middle_proxy/pool.rs | 29 +++++++++++++++ src/transport/middle_proxy/pool_nat.rs | 16 +++++++-- 7 files changed, 99 insertions(+), 60 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index 4f0a53d..d82f8ed 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -111,25 +111,11 @@ pub(crate) fn default_proxy_secret_path() -> Option { } pub(crate) fn default_middle_proxy_nat_stun() -> Option { - Some("stun.l.google.com:19302".to_string()) + None } pub(crate) fn default_middle_proxy_nat_stun_servers() -> Vec { - vec![ - "stun.l.google.com:5349".to_string(), - "stun1.l.google.com:3478".to_string(), - "stun.gmx.net:3478".to_string(), - "stun.l.google.com:19302".to_string(), - "stun.1und1.de:3478".to_string(), - "stun1.l.google.com:19302".to_string(), - "stun2.l.google.com:19302".to_string(), - "stun3.l.google.com:19302".to_string(), - "stun4.l.google.com:19302".to_string(), - "stun.services.mozilla.com:3478".to_string(), - "stun.stunprotocol.org:3478".to_string(), - "stun.nextcloud.com:3478".to_string(), - "stun.voip.eutelia.it:3478".to_string(), - ] + Vec::new() } pub(crate) fn default_stun_nat_probe_concurrency() -> usize { diff --git a/src/config/load.rs b/src/config/load.rs index 31a8b5d..0c1e629 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -65,6 +65,16 @@ fn validate_network_cfg(net: &mut NetworkConfig) -> Result<()> { Ok(()) } +fn push_unique_nonempty(target: &mut Vec, value: String) { + let trimmed = value.trim(); + if trimmed.is_empty() { + return; + } + if !target.iter().any(|existing| existing == trimmed) { + target.push(trimmed.to_string()); + } +} + // ============= Main Config ============= #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -138,6 +148,30 @@ impl ProxyConfig { config.general.update_every = None; } + let legacy_nat_stun = config.general.middle_proxy_nat_stun.take(); + let legacy_nat_stun_servers = std::mem::take(&mut config.general.middle_proxy_nat_stun_servers); + let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty(); + + let mut unified_stun_servers = Vec::new(); + for stun in std::mem::take(&mut config.network.stun_servers) { + push_unique_nonempty(&mut unified_stun_servers, stun); + } + if let Some(stun) = legacy_nat_stun { + push_unique_nonempty(&mut unified_stun_servers, stun); + } + for stun in legacy_nat_stun_servers { + push_unique_nonempty(&mut unified_stun_servers, stun); + } + + if unified_stun_servers.is_empty() { + unified_stun_servers = default_stun_servers(); + } + config.network.stun_servers = unified_stun_servers; + + if legacy_nat_stun_used { + warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"); + } + if let Some(update_every) = config.general.update_every { if update_every == 0 { return Err(ProxyError::Config( diff --git a/src/config/types.rs b/src/config/types.rs index 58a3a3e..68086be 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -160,11 +160,13 @@ pub struct GeneralConfig { #[serde(default = "default_true")] pub middle_proxy_nat_probe: bool, - /// Optional STUN server address (host:port) for NAT probing. + /// Deprecated legacy single STUN server for NAT probing. + /// Use `network.stun_servers` instead. #[serde(default = "default_middle_proxy_nat_stun")] pub middle_proxy_nat_stun: Option, - /// Optional list of STUN servers for NAT probing fallback. + /// Deprecated legacy STUN list for NAT probing fallback. + /// Use `network.stun_servers` instead. #[serde(default = "default_middle_proxy_nat_stun_servers")] pub middle_proxy_nat_stun_servers: Vec, diff --git a/src/main.rs b/src/main.rs index dd4feb8..db46a6d 100644 --- a/src/main.rs +++ b/src/main.rs @@ -256,8 +256,6 @@ async fn main() -> std::result::Result<(), Box> { let probe = run_probe( &config.network, - config.general.middle_proxy_nat_stun.clone(), - config.general.middle_proxy_nat_stun_servers.clone(), config.general.middle_proxy_nat_probe, config.general.stun_nat_probe_concurrency, ) @@ -360,8 +358,8 @@ async fn main() -> std::result::Result<(), Box> { proxy_secret, config.general.middle_proxy_nat_ip, config.general.middle_proxy_nat_probe, - config.general.middle_proxy_nat_stun.clone(), - config.general.middle_proxy_nat_stun_servers.clone(), + None, + config.network.stun_servers.clone(), config.general.stun_nat_probe_concurrency, probe.detected_ipv6, config.timeouts.me_one_retry, diff --git a/src/network/probe.rs b/src/network/probe.rs index 378faa5..6e84682 100644 --- a/src/network/probe.rs +++ b/src/network/probe.rs @@ -57,8 +57,6 @@ const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5); pub async fn run_probe( config: &NetworkConfig, - stun_addr: Option, - stun_servers: Vec, nat_probe: bool, stun_nat_probe_concurrency: usize, ) -> Result { @@ -71,12 +69,17 @@ pub async fn run_probe( probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false); let stun_res = if nat_probe { - let servers = collect_stun_servers(config, stun_addr, stun_servers); - probe_stun_servers_parallel( - &servers, - stun_nat_probe_concurrency.max(1), - ) - .await + let servers = collect_stun_servers(config); + if servers.is_empty() { + warn!("STUN probe is enabled but network.stun_servers is empty"); + DualStunResult::default() + } else { + probe_stun_servers_parallel( + &servers, + stun_nat_probe_concurrency.max(1), + ) + .await + } } else { DualStunResult::default() }; @@ -143,36 +146,13 @@ async fn detect_public_ipv4_http(urls: &[String]) -> Option { None } -fn collect_stun_servers( - config: &NetworkConfig, - stun_addr: Option, - stun_servers: Vec, -) -> Vec { +fn collect_stun_servers(config: &NetworkConfig) -> Vec { let mut out = Vec::new(); - if !stun_servers.is_empty() { - for s in stun_servers { - if !s.is_empty() && !out.contains(&s) { - out.push(s); - } - } - } else if let Some(s) = stun_addr - && !s.is_empty() - { - out.push(s); - } - - if out.is_empty() { - for s in &config.stun_servers { - if !s.is_empty() && !out.contains(s) { - out.push(s.clone()); - } + for s in &config.stun_servers { + if !s.is_empty() && !out.contains(s) { + out.push(s.clone()); } } - - if out.is_empty() { - out.push("stun.l.google.com:19302".to_string()); - } - out } diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index c95457b..21c2b87 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -1199,6 +1199,35 @@ impl MePool { return false; } addrs.shuffle(&mut rand::rng()); + if addrs.len() > 1 { + let mut join = tokio::task::JoinSet::new(); + for (ip, port) in addrs { + let addr = SocketAddr::new(ip, port); + let pool = Arc::clone(&self); + let rng_clone = Arc::clone(&rng); + join.spawn(async move { (addr, pool.connect_one(addr, rng_clone.as_ref()).await) }); + } + + while let Some(res) = join.join_next().await { + match res { + Ok((addr, Ok(()))) => { + info!(%addr, dc = %dc, "ME connected"); + join.abort_all(); + while join.join_next().await.is_some() {} + return true; + } + Ok((addr, Err(e))) => { + warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"); + } + Err(e) => { + warn!(dc = %dc, error = %e, "ME connect task failed"); + } + } + } + warn!(dc = %dc, "All ME servers for DC failed at init"); + return false; + } + for (ip, port) in addrs { let addr = SocketAddr::new(ip, port); match self.connect_one(addr, rng.as_ref()).await { diff --git a/src/transport/middle_proxy/pool_nat.rs b/src/transport/middle_proxy/pool_nat.rs index 37c0d5b..7141236 100644 --- a/src/transport/middle_proxy/pool_nat.rs +++ b/src/transport/middle_proxy/pool_nat.rs @@ -17,7 +17,15 @@ const STUN_BATCH_TIMEOUT: Duration = Duration::from_secs(5); #[allow(dead_code)] pub async fn stun_probe(stun_addr: Option) -> Result { - let stun_addr = stun_addr.unwrap_or_else(|| "stun.l.google.com:19302".to_string()); + let stun_addr = stun_addr.unwrap_or_else(|| { + crate::config::defaults::default_stun_servers() + .into_iter() + .next() + .unwrap_or_default() + }); + if stun_addr.is_empty() { + return Err(ProxyError::Proxy("STUN server is not configured".to_string())); + } stun_probe_dual(&stun_addr).await } @@ -31,10 +39,12 @@ impl MePool { if !self.nat_stun_servers.is_empty() { return self.nat_stun_servers.clone(); } - if let Some(s) = &self.nat_stun { + if let Some(s) = &self.nat_stun + && !s.trim().is_empty() + { return vec![s.clone()]; } - vec!["stun.l.google.com:19302".to_string()] + Vec::new() } async fn probe_stun_batch_for_family( From 1f255d0aa493460d7a1d68041bf40fa2ae3a7316 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 18:41:11 +0300 Subject: [PATCH 71/98] ME Probe + STUN Legacy --- src/config/load.rs | 44 +++++++++++++++++++----------- src/transport/middle_proxy/pool.rs | 22 ++++++++++----- 2 files changed, 43 insertions(+), 23 deletions(-) diff --git a/src/config/load.rs b/src/config/load.rs index 0c1e629..4e0e104 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -131,6 +131,9 @@ impl ProxyConfig { let general_table = parsed_toml .get("general") .and_then(|value| value.as_table()); + let network_table = parsed_toml + .get("network") + .and_then(|value| value.as_table()); let update_every_is_explicit = general_table .map(|table| table.contains_key("update_every")) .unwrap_or(false); @@ -140,6 +143,9 @@ impl ProxyConfig { let legacy_config_is_explicit = general_table .map(|table| table.contains_key("proxy_config_auto_reload_secs")) .unwrap_or(false); + let stun_servers_is_explicit = network_table + .map(|table| table.contains_key("stun_servers")) + .unwrap_or(false); let mut config: ProxyConfig = parsed_toml.try_into().map_err(|e| ProxyError::Config(e.to_string()))?; @@ -151,25 +157,31 @@ impl ProxyConfig { let legacy_nat_stun = config.general.middle_proxy_nat_stun.take(); let legacy_nat_stun_servers = std::mem::take(&mut config.general.middle_proxy_nat_stun_servers); let legacy_nat_stun_used = legacy_nat_stun.is_some() || !legacy_nat_stun_servers.is_empty(); + if stun_servers_is_explicit { + let mut explicit_stun_servers = Vec::new(); + for stun in std::mem::take(&mut config.network.stun_servers) { + push_unique_nonempty(&mut explicit_stun_servers, stun); + } + config.network.stun_servers = explicit_stun_servers; - let mut unified_stun_servers = Vec::new(); - for stun in std::mem::take(&mut config.network.stun_servers) { - push_unique_nonempty(&mut unified_stun_servers, stun); - } - if let Some(stun) = legacy_nat_stun { - push_unique_nonempty(&mut unified_stun_servers, stun); - } - for stun in legacy_nat_stun_servers { - push_unique_nonempty(&mut unified_stun_servers, stun); - } + if legacy_nat_stun_used { + warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are ignored because network.stun_servers is explicitly set"); + } + } else { + // Keep the default STUN pool unless network.stun_servers is explicitly overridden. + let mut unified_stun_servers = default_stun_servers(); + if let Some(stun) = legacy_nat_stun { + push_unique_nonempty(&mut unified_stun_servers, stun); + } + for stun in legacy_nat_stun_servers { + push_unique_nonempty(&mut unified_stun_servers, stun); + } - if unified_stun_servers.is_empty() { - unified_stun_servers = default_stun_servers(); - } - config.network.stun_servers = unified_stun_servers; + config.network.stun_servers = unified_stun_servers; - if legacy_nat_stun_used { - warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"); + if legacy_nat_stun_used { + warn!("general.middle_proxy_nat_stun and general.middle_proxy_nat_stun_servers are deprecated; use network.stun_servers"); + } } if let Some(update_every) = config.general.update_every { diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 21c2b87..a90899d 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -1200,15 +1200,23 @@ impl MePool { } addrs.shuffle(&mut rand::rng()); if addrs.len() > 1 { + let concurrency = 2usize; let mut join = tokio::task::JoinSet::new(); - for (ip, port) in addrs { - let addr = SocketAddr::new(ip, port); - let pool = Arc::clone(&self); - let rng_clone = Arc::clone(&rng); - join.spawn(async move { (addr, pool.connect_one(addr, rng_clone.as_ref()).await) }); - } + let mut next_idx = 0usize; - while let Some(res) = join.join_next().await { + while next_idx < addrs.len() || !join.is_empty() { + while next_idx < addrs.len() && join.len() < concurrency { + let (ip, port) = addrs[next_idx]; + next_idx += 1; + let addr = SocketAddr::new(ip, port); + let pool = Arc::clone(&self); + let rng_clone = Arc::clone(&rng); + join.spawn(async move { (addr, pool.connect_one(addr, rng_clone.as_ref()).await) }); + } + + let Some(res) = join.join_next().await else { + break; + }; match res { Ok((addr, Ok(()))) => { info!(%addr, dc = %dc, "ME connected"); From 4eebb4feb2342e27d0b52df1fc92065aa22aa19a Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 19:01:24 +0300 Subject: [PATCH 72/98] ME Pool Refactoring --- src/main.rs | 43 +- src/transport/middle_proxy/health.rs | 1 + src/transport/middle_proxy/mod.rs | 9 +- src/transport/middle_proxy/pool.rs | 1128 +-------------------- src/transport/middle_proxy/pool_config.rs | 81 ++ src/transport/middle_proxy/pool_init.rs | 201 ++++ src/transport/middle_proxy/pool_refill.rs | 159 +++ src/transport/middle_proxy/pool_reinit.rs | 383 +++++++ src/transport/middle_proxy/pool_writer.rs | 348 +++++++ 9 files changed, 1226 insertions(+), 1127 deletions(-) create mode 100644 src/transport/middle_proxy/pool_config.rs create mode 100644 src/transport/middle_proxy/pool_init.rs create mode 100644 src/transport/middle_proxy/pool_refill.rs create mode 100644 src/transport/middle_proxy/pool_reinit.rs create mode 100644 src/transport/middle_proxy/pool_writer.rs diff --git a/src/main.rs b/src/main.rs index db46a6d..da88fe3 100644 --- a/src/main.rs +++ b/src/main.rs @@ -392,26 +392,33 @@ async fn main() -> std::result::Result<(), Box> { ); let pool_size = config.general.middle_proxy_pool_size.max(1); - match pool.init(pool_size, &rng).await { - Ok(()) => { - info!("Middle-End pool initialized successfully"); + loop { + match pool.init(pool_size, &rng).await { + Ok(()) => { + info!("Middle-End pool initialized successfully"); - // Phase 4: Start health monitor - let pool_clone = pool.clone(); - let rng_clone = rng.clone(); - let min_conns = pool_size; - tokio::spawn(async move { - crate::transport::middle_proxy::me_health_monitor( - pool_clone, rng_clone, min_conns, - ) - .await; - }); + // Phase 4: Start health monitor + let pool_clone = pool.clone(); + let rng_clone = rng.clone(); + let min_conns = pool_size; + tokio::spawn(async move { + crate::transport::middle_proxy::me_health_monitor( + pool_clone, rng_clone, min_conns, + ) + .await; + }); - Some(pool) - } - Err(e) => { - error!(error = %e, "Failed to initialize ME pool. Falling back to direct mode."); - None + break Some(pool); + } + Err(e) => { + warn!( + error = %e, + retry_in_secs = 2, + "ME pool is not ready yet; retrying startup initialization" + ); + pool.reset_stun_state(); + tokio::time::sleep(Duration::from_secs(2)).await; + } } } } diff --git a/src/transport/middle_proxy/health.rs b/src/transport/middle_proxy/health.rs index dde3354..06cca03 100644 --- a/src/transport/middle_proxy/health.rs +++ b/src/transport/middle_proxy/health.rs @@ -22,6 +22,7 @@ pub async fn me_health_monitor(pool: Arc, rng: Arc, _min_c let mut inflight: HashMap<(i32, IpFamily), usize> = HashMap::new(); loop { tokio::time::sleep(Duration::from_secs(HEALTH_INTERVAL_SECS)).await; + pool.prune_closed_writers().await; check_family( IpFamily::V4, &pool, diff --git a/src/transport/middle_proxy/mod.rs b/src/transport/middle_proxy/mod.rs index f9f8c85..3a4ff16 100644 --- a/src/transport/middle_proxy/mod.rs +++ b/src/transport/middle_proxy/mod.rs @@ -1,17 +1,22 @@ //! Middle Proxy RPC transport. mod codec; +mod config_updater; mod handshake; mod health; mod pool; +mod pool_config; +mod pool_init; mod pool_nat; +mod pool_refill; +mod pool_reinit; +mod pool_writer; mod ping; mod reader; mod registry; +mod rotation; mod send; mod secret; -mod rotation; -mod config_updater; mod wire; use bytes::Bytes; diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index a90899d..1e43628 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -2,26 +2,17 @@ use std::collections::{HashMap, HashSet}; use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::sync::Arc; use std::sync::atomic::{AtomicBool, AtomicI32, AtomicU32, AtomicU64, AtomicUsize, Ordering}; -use bytes::BytesMut; -use rand::Rng; -use rand::seq::SliceRandom; -use tokio::sync::{Mutex, RwLock, mpsc, Notify}; -use tokio_util::sync::CancellationToken; -use tracing::{debug, info, warn}; use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; +use tokio::sync::{Mutex, Notify, RwLock, mpsc}; +use tokio_util::sync::CancellationToken; + use crate::crypto::SecureRandom; -use crate::error::{ProxyError, Result}; -use crate::network::probe::NetworkDecision; use crate::network::IpFamily; -use crate::protocol::constants::*; +use crate::network::probe::NetworkDecision; use super::ConnRegistry; -use super::registry::BoundConn; -use super::codec::{RpcWriter, WriterCommand}; -use super::reader::reader_loop; -const ME_ACTIVE_PING_SECS: u64 = 25; -const ME_ACTIVE_PING_JITTER_SECS: i64 = 5; +use super::codec::WriterCommand; #[derive(Clone)] pub struct MeWriter { @@ -104,11 +95,11 @@ impl MePool { (clamped * 1000.0).round() as u32 } - fn permille_to_ratio(permille: u32) -> f32 { + pub(super) fn permille_to_ratio(permille: u32) -> f32 { (permille.min(1000) as f32) / 1000.0 } - fn now_epoch_secs() -> u64 { + pub(super) fn now_epoch_secs() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap_or_default() @@ -200,11 +191,15 @@ impl MePool { hardswap: AtomicBool::new(hardswap), me_pool_drain_ttl_secs: AtomicU64::new(me_pool_drain_ttl_secs), me_pool_force_close_secs: AtomicU64::new(me_pool_force_close_secs), - me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille(me_pool_min_fresh_ratio)), + me_pool_min_fresh_ratio_permille: AtomicU32::new(Self::ratio_to_permille( + me_pool_min_fresh_ratio, + )), me_hardswap_warmup_delay_min_ms: AtomicU64::new(me_hardswap_warmup_delay_min_ms), me_hardswap_warmup_delay_max_ms: AtomicU64::new(me_hardswap_warmup_delay_max_ms), me_hardswap_warmup_extra_passes: AtomicU32::new(me_hardswap_warmup_extra_passes as u32), - me_hardswap_warmup_pass_backoff_base_ms: AtomicU64::new(me_hardswap_warmup_pass_backoff_base_ms), + me_hardswap_warmup_pass_backoff_base_ms: AtomicU64::new( + me_hardswap_warmup_pass_backoff_base_ms, + ), }) } @@ -228,7 +223,8 @@ impl MePool { hardswap_warmup_pass_backoff_base_ms: u64, ) { self.hardswap.store(hardswap, Ordering::Relaxed); - self.me_pool_drain_ttl_secs.store(drain_ttl_secs, Ordering::Relaxed); + self.me_pool_drain_ttl_secs + .store(drain_ttl_secs, Ordering::Relaxed); self.me_pool_force_close_secs .store(force_close_secs, Ordering::Relaxed); self.me_pool_min_fresh_ratio_permille @@ -260,11 +256,11 @@ impl MePool { &self.registry } - fn writers_arc(&self) -> Arc>> { + pub(super) fn writers_arc(&self) -> Arc>> { self.writers.clone() } - fn force_close_timeout(&self) -> Option { + pub(super) fn force_close_timeout(&self) -> Option { let secs = self.me_pool_force_close_secs.load(Ordering::Relaxed); if secs == 0 { None @@ -273,588 +269,6 @@ impl MePool { } } - fn coverage_ratio( - desired_by_dc: &HashMap>, - active_writer_addrs: &HashSet, - ) -> (f32, Vec) { - if desired_by_dc.is_empty() { - return (1.0, Vec::new()); - } - - let mut missing_dc = Vec::::new(); - let mut covered = 0usize; - for (dc, endpoints) in desired_by_dc { - if endpoints.is_empty() { - continue; - } - if endpoints.iter().any(|addr| active_writer_addrs.contains(addr)) { - covered += 1; - } else { - missing_dc.push(*dc); - } - } - - missing_dc.sort_unstable(); - let total = desired_by_dc.len().max(1); - let ratio = (covered as f32) / (total as f32); - (ratio, missing_dc) - } - - pub async fn reconcile_connections(self: &Arc, rng: &SecureRandom) { - let writers = self.writers.read().await; - let current: HashSet = writers - .iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .map(|w| w.addr) - .collect(); - drop(writers); - - for family in self.family_order() { - let map = self.proxy_map_for_family(family).await; - for (_dc, addrs) in map.iter() { - let dc_addrs: Vec = addrs - .iter() - .map(|(ip, port)| SocketAddr::new(*ip, *port)) - .collect(); - if !dc_addrs.iter().any(|a| current.contains(a)) { - let mut shuffled = dc_addrs.clone(); - shuffled.shuffle(&mut rand::rng()); - for addr in shuffled { - if self.connect_one(addr, rng).await.is_ok() { - break; - } - } - } - } - if !self.decision.effective_multipath && !current.is_empty() { - break; - } - } - } - - async fn desired_dc_endpoints(&self) -> HashMap> { - let mut out: HashMap> = HashMap::new(); - - if self.decision.ipv4_me { - let map_v4 = self.proxy_map_v4.read().await.clone(); - for (dc, addrs) in map_v4 { - let entry = out.entry(dc.abs()).or_default(); - for (ip, port) in addrs { - entry.insert(SocketAddr::new(ip, port)); - } - } - } - - if self.decision.ipv6_me { - let map_v6 = self.proxy_map_v6.read().await.clone(); - for (dc, addrs) in map_v6 { - let entry = out.entry(dc.abs()).or_default(); - for (ip, port) in addrs { - entry.insert(SocketAddr::new(ip, port)); - } - } - } - - out - } - - pub(super) fn required_writers_for_dc(endpoint_count: usize) -> usize { - endpoint_count.max(3) - } - - fn hardswap_warmup_connect_delay_ms(&self) -> u64 { - let min_ms = self - .me_hardswap_warmup_delay_min_ms - .load(Ordering::Relaxed); - let max_ms = self - .me_hardswap_warmup_delay_max_ms - .load(Ordering::Relaxed); - let (min_ms, max_ms) = if min_ms <= max_ms { - (min_ms, max_ms) - } else { - (max_ms, min_ms) - }; - if min_ms == max_ms { - return min_ms; - } - rand::rng().random_range(min_ms..=max_ms) - } - - fn hardswap_warmup_backoff_ms(&self, pass_idx: usize) -> u64 { - let base_ms = self - .me_hardswap_warmup_pass_backoff_base_ms - .load(Ordering::Relaxed); - let cap_ms = (self.me_reconnect_backoff_cap.as_millis() as u64).max(base_ms); - let shift = (pass_idx as u32).min(20); - let scaled = base_ms.saturating_mul(1u64 << shift); - let core = scaled.min(cap_ms); - let jitter = (core / 2).max(1); - core.saturating_add(rand::rng().random_range(0..=jitter)) - } - - async fn fresh_writer_count_for_endpoints( - &self, - generation: u64, - endpoints: &HashSet, - ) -> usize { - let ws = self.writers.read().await; - ws.iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| w.generation == generation) - .filter(|w| endpoints.contains(&w.addr)) - .count() - } - - pub(super) async fn connect_endpoints_round_robin( - self: &Arc, - endpoints: &[SocketAddr], - rng: &SecureRandom, - ) -> bool { - if endpoints.is_empty() { - return false; - } - let start = (self.rr.fetch_add(1, Ordering::Relaxed) as usize) % endpoints.len(); - for offset in 0..endpoints.len() { - let idx = (start + offset) % endpoints.len(); - let addr = endpoints[idx]; - match self.connect_one(addr, rng).await { - Ok(()) => return true, - Err(e) => debug!(%addr, error = %e, "ME connect failed during round-robin warmup"), - } - } - false - } - - async fn warmup_generation_for_all_dcs( - self: &Arc, - rng: &SecureRandom, - generation: u64, - desired_by_dc: &HashMap>, - ) { - let extra_passes = self - .me_hardswap_warmup_extra_passes - .load(Ordering::Relaxed) - .min(10) as usize; - let total_passes = 1 + extra_passes; - - for (dc, endpoints) in desired_by_dc { - if endpoints.is_empty() { - continue; - } - - let mut endpoint_list: Vec = endpoints.iter().copied().collect(); - endpoint_list.sort_unstable(); - let required = Self::required_writers_for_dc(endpoint_list.len()); - let mut completed = false; - let mut last_fresh_count = self - .fresh_writer_count_for_endpoints(generation, endpoints) - .await; - - for pass_idx in 0..total_passes { - if last_fresh_count >= required { - completed = true; - break; - } - - let missing = required.saturating_sub(last_fresh_count); - debug!( - dc = *dc, - pass = pass_idx + 1, - total_passes, - fresh_count = last_fresh_count, - required, - missing, - endpoint_count = endpoint_list.len(), - "ME hardswap warmup pass started" - ); - - for attempt_idx in 0..missing { - let delay_ms = self.hardswap_warmup_connect_delay_ms(); - tokio::time::sleep(Duration::from_millis(delay_ms)).await; - - let connected = self.connect_endpoints_round_robin(&endpoint_list, rng).await; - debug!( - dc = *dc, - pass = pass_idx + 1, - total_passes, - attempt = attempt_idx + 1, - delay_ms, - connected, - "ME hardswap warmup connect attempt finished" - ); - } - - last_fresh_count = self - .fresh_writer_count_for_endpoints(generation, endpoints) - .await; - if last_fresh_count >= required { - completed = true; - info!( - dc = *dc, - pass = pass_idx + 1, - total_passes, - fresh_count = last_fresh_count, - required, - "ME hardswap warmup floor reached for DC" - ); - break; - } - - if pass_idx + 1 < total_passes { - let backoff_ms = self.hardswap_warmup_backoff_ms(pass_idx); - debug!( - dc = *dc, - pass = pass_idx + 1, - total_passes, - fresh_count = last_fresh_count, - required, - backoff_ms, - "ME hardswap warmup pass incomplete, delaying next pass" - ); - tokio::time::sleep(Duration::from_millis(backoff_ms)).await; - } - } - - if !completed { - warn!( - dc = *dc, - fresh_count = last_fresh_count, - required, - endpoint_count = endpoint_list.len(), - total_passes, - "ME warmup stopped: unable to reach required writer floor for DC" - ); - } - } - } - - pub async fn zero_downtime_reinit_after_map_change( - self: &Arc, - rng: &SecureRandom, - ) { - let desired_by_dc = self.desired_dc_endpoints().await; - if desired_by_dc.is_empty() { - warn!("ME endpoint map is empty; skipping stale writer drain"); - return; - } - - let previous_generation = self.current_generation(); - let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1; - let hardswap = self.hardswap.load(Ordering::Relaxed); - - if hardswap { - self.warmup_generation_for_all_dcs(rng, generation, &desired_by_dc) - .await; - } else { - self.reconcile_connections(rng).await; - } - - let writers = self.writers.read().await; - let active_writer_addrs: HashSet = writers - .iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .map(|w| w.addr) - .collect(); - let min_ratio = Self::permille_to_ratio( - self.me_pool_min_fresh_ratio_permille - .load(Ordering::Relaxed), - ); - let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs); - if !hardswap && coverage_ratio < min_ratio { - warn!( - previous_generation, - generation, - coverage_ratio = format_args!("{coverage_ratio:.3}"), - min_ratio = format_args!("{min_ratio:.3}"), - missing_dc = ?missing_dc, - "ME reinit coverage below threshold; keeping stale writers" - ); - return; - } - - if hardswap { - let mut fresh_missing_dc = Vec::<(i32, usize, usize)>::new(); - for (dc, endpoints) in &desired_by_dc { - if endpoints.is_empty() { - continue; - } - let required = Self::required_writers_for_dc(endpoints.len()); - let fresh_count = writers - .iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| w.generation == generation) - .filter(|w| endpoints.contains(&w.addr)) - .count(); - if fresh_count < required { - fresh_missing_dc.push((*dc, fresh_count, required)); - } - } - if !fresh_missing_dc.is_empty() { - warn!( - previous_generation, - generation, - missing_dc = ?fresh_missing_dc, - "ME hardswap pending: fresh generation coverage incomplete" - ); - return; - } - } else if !missing_dc.is_empty() { - warn!( - missing_dc = ?missing_dc, - // Keep stale writers alive when fresh coverage is incomplete. - "ME reinit coverage incomplete; keeping stale writers" - ); - return; - } - - let desired_addrs: HashSet = desired_by_dc - .values() - .flat_map(|set| set.iter().copied()) - .collect(); - - let stale_writer_ids: Vec = writers - .iter() - .filter(|w| !w.draining.load(Ordering::Relaxed)) - .filter(|w| { - if hardswap { - w.generation < generation - } else { - !desired_addrs.contains(&w.addr) - } - }) - .map(|w| w.id) - .collect(); - drop(writers); - - if stale_writer_ids.is_empty() { - debug!("ME reinit cycle completed with no stale writers"); - return; - } - - let drain_timeout = self.force_close_timeout(); - let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0); - info!( - stale_writers = stale_writer_ids.len(), - previous_generation, - generation, - hardswap, - coverage_ratio = format_args!("{coverage_ratio:.3}"), - min_ratio = format_args!("{min_ratio:.3}"), - drain_timeout_secs, - "ME reinit cycle covered; draining stale writers" - ); - self.stats.increment_pool_swap_total(); - for writer_id in stale_writer_ids { - self.mark_writer_draining_with_timeout(writer_id, drain_timeout, !hardswap) - .await; - } - } - - pub async fn zero_downtime_reinit_periodic( - self: &Arc, - rng: &SecureRandom, - ) { - self.zero_downtime_reinit_after_map_change(rng).await; - } - - async fn endpoints_for_same_dc(&self, addr: SocketAddr) -> Vec { - let mut target_dc = HashSet::::new(); - let mut endpoints = HashSet::::new(); - - if self.decision.ipv4_me { - let map = self.proxy_map_v4.read().await.clone(); - for (dc, addrs) in &map { - if addrs - .iter() - .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) - { - target_dc.insert(dc.abs()); - } - } - for dc in &target_dc { - for key in [*dc, -*dc] { - if let Some(addrs) = map.get(&key) { - for (ip, port) in addrs { - endpoints.insert(SocketAddr::new(*ip, *port)); - } - } - } - } - } - - if self.decision.ipv6_me { - let map = self.proxy_map_v6.read().await.clone(); - for (dc, addrs) in &map { - if addrs - .iter() - .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) - { - target_dc.insert(dc.abs()); - } - } - for dc in &target_dc { - for key in [*dc, -*dc] { - if let Some(addrs) = map.get(&key) { - for (ip, port) in addrs { - endpoints.insert(SocketAddr::new(*ip, *port)); - } - } - } - } - } - - let mut sorted: Vec = endpoints.into_iter().collect(); - sorted.sort_unstable(); - sorted - } - - async fn refill_writer_after_loss(self: &Arc, addr: SocketAddr) -> bool { - let fast_retries = self.me_reconnect_fast_retry_count.max(1); - - for attempt in 0..fast_retries { - self.stats.increment_me_reconnect_attempt(); - match self.connect_one(addr, self.rng.as_ref()).await { - Ok(()) => { - self.stats.increment_me_reconnect_success(); - self.stats.increment_me_writer_restored_same_endpoint_total(); - info!( - %addr, - attempt = attempt + 1, - "ME writer restored on the same endpoint" - ); - return true; - } - Err(e) => { - debug!( - %addr, - attempt = attempt + 1, - error = %e, - "ME immediate same-endpoint reconnect failed" - ); - } - } - } - - let dc_endpoints = self.endpoints_for_same_dc(addr).await; - if dc_endpoints.is_empty() { - self.stats.increment_me_refill_failed_total(); - return false; - } - - for attempt in 0..fast_retries { - self.stats.increment_me_reconnect_attempt(); - if self - .connect_endpoints_round_robin(&dc_endpoints, self.rng.as_ref()) - .await - { - self.stats.increment_me_reconnect_success(); - self.stats.increment_me_writer_restored_fallback_total(); - info!( - %addr, - attempt = attempt + 1, - "ME writer restored via DC fallback endpoint" - ); - return true; - } - } - - self.stats.increment_me_refill_failed_total(); - false - } - - pub(crate) fn trigger_immediate_refill(self: &Arc, addr: SocketAddr) { - let pool = Arc::clone(self); - tokio::spawn(async move { - { - let mut guard = pool.refill_inflight.lock().await; - if !guard.insert(addr) { - pool.stats.increment_me_refill_skipped_inflight_total(); - return; - } - } - pool.stats.increment_me_refill_triggered_total(); - - let restored = pool.refill_writer_after_loss(addr).await; - if !restored { - warn!(%addr, "ME immediate refill failed"); - } - - let mut guard = pool.refill_inflight.lock().await; - guard.remove(&addr); - }); - } - - pub async fn update_proxy_maps( - &self, - new_v4: HashMap>, - new_v6: Option>>, - ) -> bool { - let mut changed = false; - { - let mut guard = self.proxy_map_v4.write().await; - if !new_v4.is_empty() && *guard != new_v4 { - *guard = new_v4; - changed = true; - } - } - if let Some(v6) = new_v6 { - let mut guard = self.proxy_map_v6.write().await; - if !v6.is_empty() && *guard != v6 { - *guard = v6; - changed = true; - } - } - // Ensure negative DC entries mirror positives when absent (Telegram convention). - { - let mut guard = self.proxy_map_v4.write().await; - let keys: Vec = guard.keys().cloned().collect(); - for k in keys.iter().cloned().filter(|k| *k > 0) { - if !guard.contains_key(&-k) - && let Some(addrs) = guard.get(&k).cloned() - { - guard.insert(-k, addrs); - } - } - } - { - let mut guard = self.proxy_map_v6.write().await; - let keys: Vec = guard.keys().cloned().collect(); - for k in keys.iter().cloned().filter(|k| *k > 0) { - if !guard.contains_key(&-k) - && let Some(addrs) = guard.get(&k).cloned() - { - guard.insert(-k, addrs); - } - } - } - changed - } - - pub async fn update_secret(self: &Arc, new_secret: Vec) -> bool { - if new_secret.len() < 32 { - warn!(len = new_secret.len(), "proxy-secret update ignored (too short)"); - return false; - } - let mut guard = self.proxy_secret.write().await; - if *guard != new_secret { - *guard = new_secret; - drop(guard); - self.reconnect_all().await; - return true; - } - false - } - - pub async fn reconnect_all(self: &Arc) { - let ws = self.writers.read().await.clone(); - for w in ws { - if let Ok(()) = self.connect_one(w.addr, self.rng.as_ref()).await { - self.mark_writer_draining(w.id).await; - tokio::time::sleep(Duration::from_secs(2)).await; - } - } - } - pub(super) async fn key_selector(&self) -> u32 { let secret = self.proxy_secret.read().await; if secret.len() >= 4 { @@ -884,513 +298,13 @@ impl MePool { order } - async fn proxy_map_for_family(&self, family: IpFamily) -> HashMap> { + pub(super) async fn proxy_map_for_family( + &self, + family: IpFamily, + ) -> HashMap> { match family { IpFamily::V4 => self.proxy_map_v4.read().await.clone(), IpFamily::V6 => self.proxy_map_v6.read().await.clone(), } } - - pub async fn init(self: &Arc, pool_size: usize, rng: &Arc) -> Result<()> { - let family_order = self.family_order(); - let ks = self.key_selector().await; - info!( - me_servers = self.proxy_map_v4.read().await.len(), - pool_size, - key_selector = format_args!("0x{ks:08x}"), - secret_len = self.proxy_secret.read().await.len(), - "Initializing ME pool" - ); - - for family in family_order { - let map = self.proxy_map_for_family(family).await; - let mut grouped_dc_addrs: HashMap> = HashMap::new(); - for (dc, addrs) in map { - if addrs.is_empty() { - continue; - } - grouped_dc_addrs - .entry(dc.abs()) - .or_default() - .extend(addrs); - } - let mut dc_addrs: Vec<(i32, Vec<(IpAddr, u16)>)> = grouped_dc_addrs - .into_iter() - .map(|(dc, mut addrs)| { - addrs.sort_unstable(); - addrs.dedup(); - (dc, addrs) - }) - .collect(); - dc_addrs.sort_unstable_by_key(|(dc, _)| *dc); - - // Ensure at least one connection per DC; run DCs in parallel. - let mut join = tokio::task::JoinSet::new(); - let mut dc_failures = 0usize; - for (dc, addrs) in dc_addrs.iter().cloned() { - if addrs.is_empty() { - continue; - } - let pool = Arc::clone(self); - let rng_clone = Arc::clone(rng); - join.spawn(async move { - pool.connect_primary_for_dc(dc, addrs, rng_clone).await - }); - } - while let Some(res) = join.join_next().await { - if let Ok(false) = res { - dc_failures += 1; - } - } - if dc_failures > 2 { - return Err(ProxyError::Proxy("Too many ME DC init failures, falling back to direct".into())); - } - - // Warm reserve writers asynchronously so startup does not block after first working pool is ready. - let pool = Arc::clone(self); - let rng_clone = Arc::clone(rng); - let dc_addrs_bg = dc_addrs.clone(); - tokio::spawn(async move { - if pool.me_warmup_stagger_enabled { - for (dc, addrs) in dc_addrs_bg.iter() { - for (ip, port) in addrs { - if pool.connection_count() >= pool_size { - break; - } - let addr = SocketAddr::new(*ip, *port); - let jitter = rand::rng() - .random_range(0..=pool.me_warmup_step_jitter.as_millis() as u64); - let delay_ms = pool.me_warmup_step_delay.as_millis() as u64 + jitter; - tokio::time::sleep(Duration::from_millis(delay_ms)).await; - if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { - debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed (staggered)"); - } - } - } - } else { - for (dc, addrs) in dc_addrs_bg.iter() { - for (ip, port) in addrs { - if pool.connection_count() >= pool_size { - break; - } - let addr = SocketAddr::new(*ip, *port); - if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { - debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed"); - } - } - if pool.connection_count() >= pool_size { - break; - } - } - } - debug!( - target_pool_size = pool_size, - current_pool_size = pool.connection_count(), - "Background ME reserve warmup finished" - ); - }); - - if !self.decision.effective_multipath && self.connection_count() > 0 { - break; - } - } - - if self.writers.read().await.is_empty() { - return Err(ProxyError::Proxy("No ME connections".into())); - } - info!( - active_writers = self.connection_count(), - "ME primary pool ready; reserve warmup continues in background" - ); - Ok(()) - } - - pub(crate) async fn connect_one(self: &Arc, addr: SocketAddr, rng: &SecureRandom) -> Result<()> { - let secret_len = self.proxy_secret.read().await.len(); - if secret_len < 32 { - return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into())); - } - - let (stream, _connect_ms) = self.connect_tcp(addr).await?; - let hs = self.handshake_only(stream, addr, rng).await?; - - let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed); - let generation = self.current_generation(); - let cancel = CancellationToken::new(); - let degraded = Arc::new(AtomicBool::new(false)); - let draining = Arc::new(AtomicBool::new(false)); - let draining_started_at_epoch_secs = Arc::new(AtomicU64::new(0)); - let allow_drain_fallback = Arc::new(AtomicBool::new(false)); - let (tx, mut rx) = mpsc::channel::(4096); - let mut rpc_writer = RpcWriter { - writer: hs.wr, - key: hs.write_key, - iv: hs.write_iv, - seq_no: 0, - crc_mode: hs.crc_mode, - }; - let cancel_wr = cancel.clone(); - tokio::spawn(async move { - loop { - tokio::select! { - cmd = rx.recv() => { - match cmd { - Some(WriterCommand::Data(payload)) => { - if rpc_writer.send(&payload).await.is_err() { break; } - } - Some(WriterCommand::DataAndFlush(payload)) => { - if rpc_writer.send_and_flush(&payload).await.is_err() { break; } - } - Some(WriterCommand::Close) | None => break, - } - } - _ = cancel_wr.cancelled() => break, - } - } - }); - let writer = MeWriter { - id: writer_id, - addr, - generation, - tx: tx.clone(), - cancel: cancel.clone(), - degraded: degraded.clone(), - draining: draining.clone(), - draining_started_at_epoch_secs: draining_started_at_epoch_secs.clone(), - allow_drain_fallback: allow_drain_fallback.clone(), - }; - self.writers.write().await.push(writer.clone()); - self.conn_count.fetch_add(1, Ordering::Relaxed); - self.writer_available.notify_one(); - - let reg = self.registry.clone(); - let writers_arc = self.writers_arc(); - let ping_tracker = self.ping_tracker.clone(); - let ping_tracker_reader = ping_tracker.clone(); - let rtt_stats = self.rtt_stats.clone(); - let stats_reader = self.stats.clone(); - let stats_ping = self.stats.clone(); - let pool = Arc::downgrade(self); - let cancel_ping = cancel.clone(); - let tx_ping = tx.clone(); - let ping_tracker_ping = ping_tracker.clone(); - let cleanup_done = Arc::new(AtomicBool::new(false)); - let cleanup_for_reader = cleanup_done.clone(); - let cleanup_for_ping = cleanup_done.clone(); - let keepalive_enabled = self.me_keepalive_enabled; - let keepalive_interval = self.me_keepalive_interval; - let keepalive_jitter = self.me_keepalive_jitter; - let cancel_reader_token = cancel.clone(); - let cancel_ping_token = cancel_ping.clone(); - - tokio::spawn(async move { - let res = reader_loop( - hs.rd, - hs.read_key, - hs.read_iv, - hs.crc_mode, - reg.clone(), - BytesMut::new(), - BytesMut::new(), - tx.clone(), - ping_tracker_reader, - rtt_stats.clone(), - stats_reader, - writer_id, - degraded.clone(), - cancel_reader_token.clone(), - ) - .await; - if let Some(pool) = pool.upgrade() - && cleanup_for_reader - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - pool.remove_writer_and_close_clients(writer_id).await; - } - if let Err(e) = res { - warn!(error = %e, "ME reader ended"); - } - let mut ws = writers_arc.write().await; - ws.retain(|w| w.id != writer_id); - info!(remaining = ws.len(), "Dead ME writer removed from pool"); - }); - - let pool_ping = Arc::downgrade(self); - tokio::spawn(async move { - let mut ping_id: i64 = rand::random::(); - // Per-writer jittered start to avoid phase sync. - let startup_jitter = if keepalive_enabled { - let jitter_cap_ms = keepalive_interval.as_millis() / 2; - let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); - Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) - } else { - let jitter = rand::rng() - .random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS); - let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64; - Duration::from_secs(wait) - }; - tokio::select! { - _ = cancel_ping_token.cancelled() => return, - _ = tokio::time::sleep(startup_jitter) => {} - } - loop { - let wait = if keepalive_enabled { - let jitter_cap_ms = keepalive_interval.as_millis() / 2; - let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); - keepalive_interval - + Duration::from_millis( - rand::rng().random_range(0..=effective_jitter_ms as u64) - ) - } else { - let jitter = rand::rng() - .random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS); - let secs = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64; - Duration::from_secs(secs) - }; - tokio::select! { - _ = cancel_ping_token.cancelled() => { - break; - } - _ = tokio::time::sleep(wait) => {} - } - let sent_id = ping_id; - let mut p = Vec::with_capacity(12); - p.extend_from_slice(&RPC_PING_U32.to_le_bytes()); - p.extend_from_slice(&sent_id.to_le_bytes()); - { - let mut tracker = ping_tracker_ping.lock().await; - let before = tracker.len(); - tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120)); - let expired = before.saturating_sub(tracker.len()); - if expired > 0 { - stats_ping.increment_me_keepalive_timeout_by(expired as u64); - } - tracker.insert(sent_id, (std::time::Instant::now(), writer_id)); - } - ping_id = ping_id.wrapping_add(1); - stats_ping.increment_me_keepalive_sent(); - if tx_ping.send(WriterCommand::DataAndFlush(p)).await.is_err() { - stats_ping.increment_me_keepalive_failed(); - debug!("ME ping failed, removing dead writer"); - cancel_ping.cancel(); - if let Some(pool) = pool_ping.upgrade() - && cleanup_for_ping - .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) - .is_ok() - { - pool.remove_writer_and_close_clients(writer_id).await; - } - break; - } - } - }); - - Ok(()) - } - - async fn connect_primary_for_dc( - self: Arc, - dc: i32, - mut addrs: Vec<(IpAddr, u16)>, - rng: Arc, - ) -> bool { - if addrs.is_empty() { - return false; - } - addrs.shuffle(&mut rand::rng()); - if addrs.len() > 1 { - let concurrency = 2usize; - let mut join = tokio::task::JoinSet::new(); - let mut next_idx = 0usize; - - while next_idx < addrs.len() || !join.is_empty() { - while next_idx < addrs.len() && join.len() < concurrency { - let (ip, port) = addrs[next_idx]; - next_idx += 1; - let addr = SocketAddr::new(ip, port); - let pool = Arc::clone(&self); - let rng_clone = Arc::clone(&rng); - join.spawn(async move { (addr, pool.connect_one(addr, rng_clone.as_ref()).await) }); - } - - let Some(res) = join.join_next().await else { - break; - }; - match res { - Ok((addr, Ok(()))) => { - info!(%addr, dc = %dc, "ME connected"); - join.abort_all(); - while join.join_next().await.is_some() {} - return true; - } - Ok((addr, Err(e))) => { - warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"); - } - Err(e) => { - warn!(dc = %dc, error = %e, "ME connect task failed"); - } - } - } - warn!(dc = %dc, "All ME servers for DC failed at init"); - return false; - } - - for (ip, port) in addrs { - let addr = SocketAddr::new(ip, port); - match self.connect_one(addr, rng.as_ref()).await { - Ok(()) => { - info!(%addr, dc = %dc, "ME connected"); - return true; - } - Err(e) => warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"), - } - } - warn!(dc = %dc, "All ME servers for DC failed at init"); - false - } - - pub(crate) async fn remove_writer_and_close_clients(self: &Arc, writer_id: u64) { - let conns = self.remove_writer_only(writer_id).await; - for bound in conns { - let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await; - let _ = self.registry.unregister(bound.conn_id).await; - } - } - - async fn remove_writer_only(self: &Arc, writer_id: u64) -> Vec { - let mut close_tx: Option> = None; - let mut removed_addr: Option = None; - let mut trigger_refill = false; - { - let mut ws = self.writers.write().await; - if let Some(pos) = ws.iter().position(|w| w.id == writer_id) { - let w = ws.remove(pos); - let was_draining = w.draining.load(Ordering::Relaxed); - if was_draining { - self.stats.decrement_pool_drain_active(); - } - self.stats.increment_me_writer_removed_total(); - w.cancel.cancel(); - removed_addr = Some(w.addr); - trigger_refill = !was_draining; - if trigger_refill { - self.stats.increment_me_writer_removed_unexpected_total(); - } - close_tx = Some(w.tx.clone()); - self.conn_count.fetch_sub(1, Ordering::Relaxed); - } - } - if let Some(tx) = close_tx { - let _ = tx.send(WriterCommand::Close).await; - } - if trigger_refill - && let Some(addr) = removed_addr - { - self.trigger_immediate_refill(addr); - } - self.rtt_stats.lock().await.remove(&writer_id); - self.registry.writer_lost(writer_id).await - } - - pub(crate) async fn mark_writer_draining_with_timeout( - self: &Arc, - writer_id: u64, - timeout: Option, - allow_drain_fallback: bool, - ) { - let timeout = timeout.filter(|d| !d.is_zero()); - let found = { - let mut ws = self.writers.write().await; - if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) { - let already_draining = w.draining.swap(true, Ordering::Relaxed); - w.allow_drain_fallback - .store(allow_drain_fallback, Ordering::Relaxed); - w.draining_started_at_epoch_secs - .store(Self::now_epoch_secs(), Ordering::Relaxed); - if !already_draining { - self.stats.increment_pool_drain_active(); - } - w.draining.store(true, Ordering::Relaxed); - true - } else { - false - } - }; - - if !found { - return; - } - - let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0); - debug!( - writer_id, - timeout_secs, - allow_drain_fallback, - "ME writer marked draining" - ); - - let pool = Arc::downgrade(self); - tokio::spawn(async move { - let deadline = timeout.map(|t| Instant::now() + t); - while let Some(p) = pool.upgrade() { - if let Some(deadline_at) = deadline - && Instant::now() >= deadline_at - { - warn!(writer_id, "Drain timeout, force-closing"); - p.stats.increment_pool_force_close_total(); - let _ = p.remove_writer_and_close_clients(writer_id).await; - break; - } - if p.registry.is_writer_empty(writer_id).await { - let _ = p.remove_writer_only(writer_id).await; - break; - } - tokio::time::sleep(Duration::from_secs(1)).await; - } - }); - } - - pub(crate) async fn mark_writer_draining(self: &Arc, writer_id: u64) { - self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300)), false) - .await; - } - - pub(super) fn writer_accepts_new_binding(&self, writer: &MeWriter) -> bool { - if !writer.draining.load(Ordering::Relaxed) { - return true; - } - if !writer.allow_drain_fallback.load(Ordering::Relaxed) { - return false; - } - - let ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed); - if ttl_secs == 0 { - return true; - } - - let started = writer.draining_started_at_epoch_secs.load(Ordering::Relaxed); - if started == 0 { - return false; - } - - Self::now_epoch_secs().saturating_sub(started) <= ttl_secs - } - -} - -#[allow(dead_code)] -fn hex_dump(data: &[u8]) -> String { - const MAX: usize = 64; - let mut out = String::with_capacity(data.len() * 2 + 3); - for (i, b) in data.iter().take(MAX).enumerate() { - if i > 0 { - out.push(' '); - } - out.push_str(&format!("{b:02x}")); - } - if data.len() > MAX { - out.push_str(" …"); - } - out } diff --git a/src/transport/middle_proxy/pool_config.rs b/src/transport/middle_proxy/pool_config.rs new file mode 100644 index 0000000..fe2aad8 --- /dev/null +++ b/src/transport/middle_proxy/pool_config.rs @@ -0,0 +1,81 @@ +use std::collections::HashMap; +use std::net::IpAddr; +use std::sync::Arc; +use std::time::Duration; + +use tracing::warn; + +use super::pool::MePool; + +impl MePool { + pub async fn update_proxy_maps( + &self, + new_v4: HashMap>, + new_v6: Option>>, + ) -> bool { + let mut changed = false; + { + let mut guard = self.proxy_map_v4.write().await; + if !new_v4.is_empty() && *guard != new_v4 { + *guard = new_v4; + changed = true; + } + } + if let Some(v6) = new_v6 { + let mut guard = self.proxy_map_v6.write().await; + if !v6.is_empty() && *guard != v6 { + *guard = v6; + changed = true; + } + } + // Ensure negative DC entries mirror positives when absent (Telegram convention). + { + let mut guard = self.proxy_map_v4.write().await; + let keys: Vec = guard.keys().cloned().collect(); + for k in keys.iter().cloned().filter(|k| *k > 0) { + if !guard.contains_key(&-k) + && let Some(addrs) = guard.get(&k).cloned() + { + guard.insert(-k, addrs); + } + } + } + { + let mut guard = self.proxy_map_v6.write().await; + let keys: Vec = guard.keys().cloned().collect(); + for k in keys.iter().cloned().filter(|k| *k > 0) { + if !guard.contains_key(&-k) + && let Some(addrs) = guard.get(&k).cloned() + { + guard.insert(-k, addrs); + } + } + } + changed + } + + pub async fn update_secret(self: &Arc, new_secret: Vec) -> bool { + if new_secret.len() < 32 { + warn!(len = new_secret.len(), "proxy-secret update ignored (too short)"); + return false; + } + let mut guard = self.proxy_secret.write().await; + if *guard != new_secret { + *guard = new_secret; + drop(guard); + self.reconnect_all().await; + return true; + } + false + } + + pub async fn reconnect_all(self: &Arc) { + let ws = self.writers.read().await.clone(); + for w in ws { + if let Ok(()) = self.connect_one(w.addr, self.rng.as_ref()).await { + self.mark_writer_draining(w.id).await; + tokio::time::sleep(Duration::from_secs(2)).await; + } + } + } +} diff --git a/src/transport/middle_proxy/pool_init.rs b/src/transport/middle_proxy/pool_init.rs new file mode 100644 index 0000000..623be7f --- /dev/null +++ b/src/transport/middle_proxy/pool_init.rs @@ -0,0 +1,201 @@ +use std::collections::{HashMap, HashSet}; +use std::net::{IpAddr, SocketAddr}; +use std::sync::Arc; + +use rand::Rng; +use rand::seq::SliceRandom; +use tracing::{debug, info, warn}; + +use crate::crypto::SecureRandom; +use crate::error::{ProxyError, Result}; + +use super::pool::MePool; + +impl MePool { + pub async fn init(self: &Arc, pool_size: usize, rng: &Arc) -> Result<()> { + let family_order = self.family_order(); + let ks = self.key_selector().await; + info!( + me_servers = self.proxy_map_v4.read().await.len(), + pool_size, + key_selector = format_args!("0x{ks:08x}"), + secret_len = self.proxy_secret.read().await.len(), + "Initializing ME pool" + ); + + for family in family_order { + let map = self.proxy_map_for_family(family).await; + let mut grouped_dc_addrs: HashMap> = HashMap::new(); + for (dc, addrs) in map { + if addrs.is_empty() { + continue; + } + grouped_dc_addrs.entry(dc.abs()).or_default().extend(addrs); + } + let mut dc_addrs: Vec<(i32, Vec<(IpAddr, u16)>)> = grouped_dc_addrs + .into_iter() + .map(|(dc, mut addrs)| { + addrs.sort_unstable(); + addrs.dedup(); + (dc, addrs) + }) + .collect(); + dc_addrs.sort_unstable_by_key(|(dc, _)| *dc); + + // Ensure at least one live writer per DC group; run missing DCs in parallel. + let mut join = tokio::task::JoinSet::new(); + for (dc, addrs) in dc_addrs.iter().cloned() { + if addrs.is_empty() { + continue; + } + let endpoints: HashSet = addrs + .iter() + .map(|(ip, port)| SocketAddr::new(*ip, *port)) + .collect(); + if self.active_writer_count_for_endpoints(&endpoints).await > 0 { + continue; + } + let pool = Arc::clone(self); + let rng_clone = Arc::clone(rng); + join.spawn(async move { pool.connect_primary_for_dc(dc, addrs, rng_clone).await }); + } + while join.join_next().await.is_some() {} + + let mut missing_dcs = Vec::new(); + for (dc, addrs) in &dc_addrs { + let endpoints: HashSet = addrs + .iter() + .map(|(ip, port)| SocketAddr::new(*ip, *port)) + .collect(); + if self.active_writer_count_for_endpoints(&endpoints).await == 0 { + missing_dcs.push(*dc); + } + } + if !missing_dcs.is_empty() { + return Err(ProxyError::Proxy(format!( + "ME init incomplete: no live writers for DC groups {missing_dcs:?}" + ))); + } + + // Warm reserve writers asynchronously so startup does not block after first working pool is ready. + let pool = Arc::clone(self); + let rng_clone = Arc::clone(rng); + let dc_addrs_bg = dc_addrs.clone(); + tokio::spawn(async move { + if pool.me_warmup_stagger_enabled { + for (dc, addrs) in &dc_addrs_bg { + for (ip, port) in addrs { + if pool.connection_count() >= pool_size { + break; + } + let addr = SocketAddr::new(*ip, *port); + let jitter = rand::rng() + .random_range(0..=pool.me_warmup_step_jitter.as_millis() as u64); + let delay_ms = pool.me_warmup_step_delay.as_millis() as u64 + jitter; + tokio::time::sleep(std::time::Duration::from_millis(delay_ms)).await; + if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { + debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed (staggered)"); + } + } + } + } else { + for (dc, addrs) in &dc_addrs_bg { + for (ip, port) in addrs { + if pool.connection_count() >= pool_size { + break; + } + let addr = SocketAddr::new(*ip, *port); + if let Err(e) = pool.connect_one(addr, rng_clone.as_ref()).await { + debug!(%addr, dc = %dc, error = %e, "Extra ME connect failed"); + } + } + if pool.connection_count() >= pool_size { + break; + } + } + } + debug!( + target_pool_size = pool_size, + current_pool_size = pool.connection_count(), + "Background ME reserve warmup finished" + ); + }); + + if !self.decision.effective_multipath && self.connection_count() > 0 { + break; + } + } + + if self.writers.read().await.is_empty() { + return Err(ProxyError::Proxy("No ME connections".into())); + } + info!( + active_writers = self.connection_count(), + "ME primary pool ready; reserve warmup continues in background" + ); + Ok(()) + } + + async fn connect_primary_for_dc( + self: Arc, + dc: i32, + mut addrs: Vec<(IpAddr, u16)>, + rng: Arc, + ) -> bool { + if addrs.is_empty() { + return false; + } + addrs.shuffle(&mut rand::rng()); + if addrs.len() > 1 { + let concurrency = 2usize; + let mut join = tokio::task::JoinSet::new(); + let mut next_idx = 0usize; + + while next_idx < addrs.len() || !join.is_empty() { + while next_idx < addrs.len() && join.len() < concurrency { + let (ip, port) = addrs[next_idx]; + next_idx += 1; + let addr = SocketAddr::new(ip, port); + let pool = Arc::clone(&self); + let rng_clone = Arc::clone(&rng); + join.spawn(async move { + (addr, pool.connect_one(addr, rng_clone.as_ref()).await) + }); + } + + let Some(res) = join.join_next().await else { + break; + }; + match res { + Ok((addr, Ok(()))) => { + info!(%addr, dc = %dc, "ME connected"); + join.abort_all(); + while join.join_next().await.is_some() {} + return true; + } + Ok((addr, Err(e))) => { + warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"); + } + Err(e) => { + warn!(dc = %dc, error = %e, "ME connect task failed"); + } + } + } + warn!(dc = %dc, "All ME servers for DC failed at init"); + return false; + } + + for (ip, port) in addrs { + let addr = SocketAddr::new(ip, port); + match self.connect_one(addr, rng.as_ref()).await { + Ok(()) => { + info!(%addr, dc = %dc, "ME connected"); + return true; + } + Err(e) => warn!(%addr, dc = %dc, error = %e, "ME connect failed, trying next"), + } + } + warn!(dc = %dc, "All ME servers for DC failed at init"); + false + } +} diff --git a/src/transport/middle_proxy/pool_refill.rs b/src/transport/middle_proxy/pool_refill.rs new file mode 100644 index 0000000..6dea6c9 --- /dev/null +++ b/src/transport/middle_proxy/pool_refill.rs @@ -0,0 +1,159 @@ +use std::collections::HashSet; +use std::net::SocketAddr; +use std::sync::Arc; +use std::sync::atomic::Ordering; + +use tracing::{debug, info, warn}; + +use crate::crypto::SecureRandom; + +use super::pool::MePool; + +impl MePool { + pub(super) async fn connect_endpoints_round_robin( + self: &Arc, + endpoints: &[SocketAddr], + rng: &SecureRandom, + ) -> bool { + if endpoints.is_empty() { + return false; + } + let start = (self.rr.fetch_add(1, Ordering::Relaxed) as usize) % endpoints.len(); + for offset in 0..endpoints.len() { + let idx = (start + offset) % endpoints.len(); + let addr = endpoints[idx]; + match self.connect_one(addr, rng).await { + Ok(()) => return true, + Err(e) => debug!(%addr, error = %e, "ME connect failed during round-robin warmup"), + } + } + false + } + + async fn endpoints_for_same_dc(&self, addr: SocketAddr) -> Vec { + let mut target_dc = HashSet::::new(); + let mut endpoints = HashSet::::new(); + + if self.decision.ipv4_me { + let map = self.proxy_map_v4.read().await.clone(); + for (dc, addrs) in &map { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + target_dc.insert(dc.abs()); + } + } + for dc in &target_dc { + for key in [*dc, -*dc] { + if let Some(addrs) = map.get(&key) { + for (ip, port) in addrs { + endpoints.insert(SocketAddr::new(*ip, *port)); + } + } + } + } + } + + if self.decision.ipv6_me { + let map = self.proxy_map_v6.read().await.clone(); + for (dc, addrs) in &map { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + target_dc.insert(dc.abs()); + } + } + for dc in &target_dc { + for key in [*dc, -*dc] { + if let Some(addrs) = map.get(&key) { + for (ip, port) in addrs { + endpoints.insert(SocketAddr::new(*ip, *port)); + } + } + } + } + } + + let mut sorted: Vec = endpoints.into_iter().collect(); + sorted.sort_unstable(); + sorted + } + + async fn refill_writer_after_loss(self: &Arc, addr: SocketAddr) -> bool { + let fast_retries = self.me_reconnect_fast_retry_count.max(1); + + for attempt in 0..fast_retries { + self.stats.increment_me_reconnect_attempt(); + match self.connect_one(addr, self.rng.as_ref()).await { + Ok(()) => { + self.stats.increment_me_reconnect_success(); + self.stats.increment_me_writer_restored_same_endpoint_total(); + info!( + %addr, + attempt = attempt + 1, + "ME writer restored on the same endpoint" + ); + return true; + } + Err(e) => { + debug!( + %addr, + attempt = attempt + 1, + error = %e, + "ME immediate same-endpoint reconnect failed" + ); + } + } + } + + let dc_endpoints = self.endpoints_for_same_dc(addr).await; + if dc_endpoints.is_empty() { + self.stats.increment_me_refill_failed_total(); + return false; + } + + for attempt in 0..fast_retries { + self.stats.increment_me_reconnect_attempt(); + if self + .connect_endpoints_round_robin(&dc_endpoints, self.rng.as_ref()) + .await + { + self.stats.increment_me_reconnect_success(); + self.stats.increment_me_writer_restored_fallback_total(); + info!( + %addr, + attempt = attempt + 1, + "ME writer restored via DC fallback endpoint" + ); + return true; + } + } + + self.stats.increment_me_refill_failed_total(); + false + } + + pub(crate) fn trigger_immediate_refill(self: &Arc, addr: SocketAddr) { + let pool = Arc::clone(self); + tokio::spawn(async move { + { + let mut guard = pool.refill_inflight.lock().await; + if !guard.insert(addr) { + pool.stats.increment_me_refill_skipped_inflight_total(); + return; + } + } + pool.stats.increment_me_refill_triggered_total(); + + let restored = pool.refill_writer_after_loss(addr).await; + if !restored { + warn!(%addr, "ME immediate refill failed"); + } + + let mut guard = pool.refill_inflight.lock().await; + guard.remove(&addr); + }); + } +} diff --git a/src/transport/middle_proxy/pool_reinit.rs b/src/transport/middle_proxy/pool_reinit.rs new file mode 100644 index 0000000..261ac02 --- /dev/null +++ b/src/transport/middle_proxy/pool_reinit.rs @@ -0,0 +1,383 @@ +use std::collections::{HashMap, HashSet}; +use std::net::SocketAddr; +use std::sync::Arc; +use std::sync::atomic::Ordering; +use std::time::Duration; + +use rand::Rng; +use rand::seq::SliceRandom; +use tracing::{debug, info, warn}; + +use crate::crypto::SecureRandom; + +use super::pool::MePool; + +impl MePool { + fn coverage_ratio( + desired_by_dc: &HashMap>, + active_writer_addrs: &HashSet, + ) -> (f32, Vec) { + if desired_by_dc.is_empty() { + return (1.0, Vec::new()); + } + + let mut missing_dc = Vec::::new(); + let mut covered = 0usize; + for (dc, endpoints) in desired_by_dc { + if endpoints.is_empty() { + continue; + } + if endpoints + .iter() + .any(|addr| active_writer_addrs.contains(addr)) + { + covered += 1; + } else { + missing_dc.push(*dc); + } + } + + missing_dc.sort_unstable(); + let total = desired_by_dc.len().max(1); + let ratio = (covered as f32) / (total as f32); + (ratio, missing_dc) + } + + pub async fn reconcile_connections(self: &Arc, rng: &SecureRandom) { + let writers = self.writers.read().await; + let current: HashSet = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .map(|w| w.addr) + .collect(); + drop(writers); + + for family in self.family_order() { + let map = self.proxy_map_for_family(family).await; + for (_dc, addrs) in &map { + let dc_addrs: Vec = addrs + .iter() + .map(|(ip, port)| SocketAddr::new(*ip, *port)) + .collect(); + if !dc_addrs.iter().any(|a| current.contains(a)) { + let mut shuffled = dc_addrs.clone(); + shuffled.shuffle(&mut rand::rng()); + for addr in shuffled { + if self.connect_one(addr, rng).await.is_ok() { + break; + } + } + } + } + if !self.decision.effective_multipath && !current.is_empty() { + break; + } + } + } + + async fn desired_dc_endpoints(&self) -> HashMap> { + let mut out: HashMap> = HashMap::new(); + + if self.decision.ipv4_me { + let map_v4 = self.proxy_map_v4.read().await.clone(); + for (dc, addrs) in map_v4 { + let entry = out.entry(dc.abs()).or_default(); + for (ip, port) in addrs { + entry.insert(SocketAddr::new(ip, port)); + } + } + } + + if self.decision.ipv6_me { + let map_v6 = self.proxy_map_v6.read().await.clone(); + for (dc, addrs) in map_v6 { + let entry = out.entry(dc.abs()).or_default(); + for (ip, port) in addrs { + entry.insert(SocketAddr::new(ip, port)); + } + } + } + + out + } + + pub(super) fn required_writers_for_dc(endpoint_count: usize) -> usize { + endpoint_count.max(3) + } + + fn hardswap_warmup_connect_delay_ms(&self) -> u64 { + let min_ms = self.me_hardswap_warmup_delay_min_ms.load(Ordering::Relaxed); + let max_ms = self.me_hardswap_warmup_delay_max_ms.load(Ordering::Relaxed); + let (min_ms, max_ms) = if min_ms <= max_ms { + (min_ms, max_ms) + } else { + (max_ms, min_ms) + }; + if min_ms == max_ms { + return min_ms; + } + rand::rng().random_range(min_ms..=max_ms) + } + + fn hardswap_warmup_backoff_ms(&self, pass_idx: usize) -> u64 { + let base_ms = self + .me_hardswap_warmup_pass_backoff_base_ms + .load(Ordering::Relaxed); + let cap_ms = (self.me_reconnect_backoff_cap.as_millis() as u64).max(base_ms); + let shift = (pass_idx as u32).min(20); + let scaled = base_ms.saturating_mul(1u64 << shift); + let core = scaled.min(cap_ms); + let jitter = (core / 2).max(1); + core.saturating_add(rand::rng().random_range(0..=jitter)) + } + + async fn fresh_writer_count_for_endpoints( + &self, + generation: u64, + endpoints: &HashSet, + ) -> usize { + let ws = self.writers.read().await; + ws.iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .filter(|w| endpoints.contains(&w.addr)) + .count() + } + + pub(super) async fn active_writer_count_for_endpoints( + &self, + endpoints: &HashSet, + ) -> usize { + let ws = self.writers.read().await; + ws.iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| endpoints.contains(&w.addr)) + .count() + } + + async fn warmup_generation_for_all_dcs( + self: &Arc, + rng: &SecureRandom, + generation: u64, + desired_by_dc: &HashMap>, + ) { + let extra_passes = self + .me_hardswap_warmup_extra_passes + .load(Ordering::Relaxed) + .min(10) as usize; + let total_passes = 1 + extra_passes; + + for (dc, endpoints) in desired_by_dc { + if endpoints.is_empty() { + continue; + } + + let mut endpoint_list: Vec = endpoints.iter().copied().collect(); + endpoint_list.sort_unstable(); + let required = Self::required_writers_for_dc(endpoint_list.len()); + let mut completed = false; + let mut last_fresh_count = self + .fresh_writer_count_for_endpoints(generation, endpoints) + .await; + + for pass_idx in 0..total_passes { + if last_fresh_count >= required { + completed = true; + break; + } + + let missing = required.saturating_sub(last_fresh_count); + debug!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, + required, + missing, + endpoint_count = endpoint_list.len(), + "ME hardswap warmup pass started" + ); + + for attempt_idx in 0..missing { + let delay_ms = self.hardswap_warmup_connect_delay_ms(); + tokio::time::sleep(Duration::from_millis(delay_ms)).await; + + let connected = self.connect_endpoints_round_robin(&endpoint_list, rng).await; + debug!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + attempt = attempt_idx + 1, + delay_ms, + connected, + "ME hardswap warmup connect attempt finished" + ); + } + + last_fresh_count = self + .fresh_writer_count_for_endpoints(generation, endpoints) + .await; + if last_fresh_count >= required { + completed = true; + info!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, + required, + "ME hardswap warmup floor reached for DC" + ); + break; + } + + if pass_idx + 1 < total_passes { + let backoff_ms = self.hardswap_warmup_backoff_ms(pass_idx); + debug!( + dc = *dc, + pass = pass_idx + 1, + total_passes, + fresh_count = last_fresh_count, + required, + backoff_ms, + "ME hardswap warmup pass incomplete, delaying next pass" + ); + tokio::time::sleep(Duration::from_millis(backoff_ms)).await; + } + } + + if !completed { + warn!( + dc = *dc, + fresh_count = last_fresh_count, + required, + endpoint_count = endpoint_list.len(), + total_passes, + "ME warmup stopped: unable to reach required writer floor for DC" + ); + } + } + } + + pub async fn zero_downtime_reinit_after_map_change(self: &Arc, rng: &SecureRandom) { + let desired_by_dc = self.desired_dc_endpoints().await; + if desired_by_dc.is_empty() { + warn!("ME endpoint map is empty; skipping stale writer drain"); + return; + } + + let previous_generation = self.current_generation(); + let generation = self.generation.fetch_add(1, Ordering::Relaxed) + 1; + let hardswap = self.hardswap.load(Ordering::Relaxed); + + if hardswap { + self.warmup_generation_for_all_dcs(rng, generation, &desired_by_dc) + .await; + } else { + self.reconcile_connections(rng).await; + } + + let writers = self.writers.read().await; + let active_writer_addrs: HashSet = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .map(|w| w.addr) + .collect(); + let min_ratio = Self::permille_to_ratio( + self.me_pool_min_fresh_ratio_permille + .load(Ordering::Relaxed), + ); + let (coverage_ratio, missing_dc) = Self::coverage_ratio(&desired_by_dc, &active_writer_addrs); + if !hardswap && coverage_ratio < min_ratio { + warn!( + previous_generation, + generation, + coverage_ratio = format_args!("{coverage_ratio:.3}"), + min_ratio = format_args!("{min_ratio:.3}"), + missing_dc = ?missing_dc, + "ME reinit coverage below threshold; keeping stale writers" + ); + return; + } + + if hardswap { + let mut fresh_missing_dc = Vec::<(i32, usize, usize)>::new(); + for (dc, endpoints) in &desired_by_dc { + if endpoints.is_empty() { + continue; + } + let required = Self::required_writers_for_dc(endpoints.len()); + let fresh_count = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| w.generation == generation) + .filter(|w| endpoints.contains(&w.addr)) + .count(); + if fresh_count < required { + fresh_missing_dc.push((*dc, fresh_count, required)); + } + } + if !fresh_missing_dc.is_empty() { + warn!( + previous_generation, + generation, + missing_dc = ?fresh_missing_dc, + "ME hardswap pending: fresh generation coverage incomplete" + ); + return; + } + } else if !missing_dc.is_empty() { + warn!( + missing_dc = ?missing_dc, + // Keep stale writers alive when fresh coverage is incomplete. + "ME reinit coverage incomplete; keeping stale writers" + ); + return; + } + + let desired_addrs: HashSet = desired_by_dc + .values() + .flat_map(|set| set.iter().copied()) + .collect(); + + let stale_writer_ids: Vec = writers + .iter() + .filter(|w| !w.draining.load(Ordering::Relaxed)) + .filter(|w| { + if hardswap { + w.generation < generation + } else { + !desired_addrs.contains(&w.addr) + } + }) + .map(|w| w.id) + .collect(); + drop(writers); + + if stale_writer_ids.is_empty() { + debug!("ME reinit cycle completed with no stale writers"); + return; + } + + let drain_timeout = self.force_close_timeout(); + let drain_timeout_secs = drain_timeout.map(|d| d.as_secs()).unwrap_or(0); + info!( + stale_writers = stale_writer_ids.len(), + previous_generation, + generation, + hardswap, + coverage_ratio = format_args!("{coverage_ratio:.3}"), + min_ratio = format_args!("{min_ratio:.3}"), + drain_timeout_secs, + "ME reinit cycle covered; draining stale writers" + ); + self.stats.increment_pool_swap_total(); + for writer_id in stale_writer_ids { + self.mark_writer_draining_with_timeout(writer_id, drain_timeout, !hardswap) + .await; + } + } + + pub async fn zero_downtime_reinit_periodic(self: &Arc, rng: &SecureRandom) { + self.zero_downtime_reinit_after_map_change(rng).await; + } +} diff --git a/src/transport/middle_proxy/pool_writer.rs b/src/transport/middle_proxy/pool_writer.rs new file mode 100644 index 0000000..a7d2960 --- /dev/null +++ b/src/transport/middle_proxy/pool_writer.rs @@ -0,0 +1,348 @@ +use std::net::SocketAddr; +use std::sync::Arc; +use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; +use std::time::{Duration, Instant}; + +use bytes::BytesMut; +use rand::Rng; +use tokio::sync::mpsc; +use tokio_util::sync::CancellationToken; +use tracing::{debug, info, warn}; + +use crate::crypto::SecureRandom; +use crate::error::{ProxyError, Result}; +use crate::protocol::constants::RPC_PING_U32; + +use super::codec::{RpcWriter, WriterCommand}; +use super::pool::{MePool, MeWriter}; +use super::reader::reader_loop; +use super::registry::BoundConn; + +const ME_ACTIVE_PING_SECS: u64 = 25; +const ME_ACTIVE_PING_JITTER_SECS: i64 = 5; + +impl MePool { + pub(crate) async fn prune_closed_writers(self: &Arc) { + let closed_writer_ids: Vec = { + let ws = self.writers.read().await; + ws.iter().filter(|w| w.tx.is_closed()).map(|w| w.id).collect() + }; + if closed_writer_ids.is_empty() { + return; + } + + for writer_id in closed_writer_ids { + if self.registry.is_writer_empty(writer_id).await { + let _ = self.remove_writer_only(writer_id).await; + } else { + let _ = self.remove_writer_and_close_clients(writer_id).await; + } + } + } + + pub(crate) async fn connect_one(self: &Arc, addr: SocketAddr, rng: &SecureRandom) -> Result<()> { + let secret_len = self.proxy_secret.read().await.len(); + if secret_len < 32 { + return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into())); + } + + let (stream, _connect_ms) = self.connect_tcp(addr).await?; + let hs = self.handshake_only(stream, addr, rng).await?; + + let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed); + let generation = self.current_generation(); + let cancel = CancellationToken::new(); + let degraded = Arc::new(AtomicBool::new(false)); + let draining = Arc::new(AtomicBool::new(false)); + let draining_started_at_epoch_secs = Arc::new(AtomicU64::new(0)); + let allow_drain_fallback = Arc::new(AtomicBool::new(false)); + let (tx, mut rx) = mpsc::channel::(4096); + let mut rpc_writer = RpcWriter { + writer: hs.wr, + key: hs.write_key, + iv: hs.write_iv, + seq_no: 0, + crc_mode: hs.crc_mode, + }; + let cancel_wr = cancel.clone(); + tokio::spawn(async move { + loop { + tokio::select! { + cmd = rx.recv() => { + match cmd { + Some(WriterCommand::Data(payload)) => { + if rpc_writer.send(&payload).await.is_err() { break; } + } + Some(WriterCommand::DataAndFlush(payload)) => { + if rpc_writer.send_and_flush(&payload).await.is_err() { break; } + } + Some(WriterCommand::Close) | None => break, + } + } + _ = cancel_wr.cancelled() => break, + } + } + }); + let writer = MeWriter { + id: writer_id, + addr, + generation, + tx: tx.clone(), + cancel: cancel.clone(), + degraded: degraded.clone(), + draining: draining.clone(), + draining_started_at_epoch_secs: draining_started_at_epoch_secs.clone(), + allow_drain_fallback: allow_drain_fallback.clone(), + }; + self.writers.write().await.push(writer.clone()); + self.conn_count.fetch_add(1, Ordering::Relaxed); + self.writer_available.notify_one(); + + let reg = self.registry.clone(); + let writers_arc = self.writers_arc(); + let ping_tracker = self.ping_tracker.clone(); + let ping_tracker_reader = ping_tracker.clone(); + let rtt_stats = self.rtt_stats.clone(); + let stats_reader = self.stats.clone(); + let stats_ping = self.stats.clone(); + let pool = Arc::downgrade(self); + let cancel_ping = cancel.clone(); + let tx_ping = tx.clone(); + let ping_tracker_ping = ping_tracker.clone(); + let cleanup_done = Arc::new(AtomicBool::new(false)); + let cleanup_for_reader = cleanup_done.clone(); + let cleanup_for_ping = cleanup_done.clone(); + let keepalive_enabled = self.me_keepalive_enabled; + let keepalive_interval = self.me_keepalive_interval; + let keepalive_jitter = self.me_keepalive_jitter; + let cancel_reader_token = cancel.clone(); + let cancel_ping_token = cancel_ping.clone(); + + tokio::spawn(async move { + let res = reader_loop( + hs.rd, + hs.read_key, + hs.read_iv, + hs.crc_mode, + reg.clone(), + BytesMut::new(), + BytesMut::new(), + tx.clone(), + ping_tracker_reader, + rtt_stats.clone(), + stats_reader, + writer_id, + degraded.clone(), + cancel_reader_token.clone(), + ) + .await; + if let Some(pool) = pool.upgrade() + && cleanup_for_reader + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + pool.remove_writer_and_close_clients(writer_id).await; + } + if let Err(e) = res { + warn!(error = %e, "ME reader ended"); + } + let mut ws = writers_arc.write().await; + ws.retain(|w| w.id != writer_id); + info!(remaining = ws.len(), "Dead ME writer removed from pool"); + }); + + let pool_ping = Arc::downgrade(self); + tokio::spawn(async move { + let mut ping_id: i64 = rand::random::(); + // Per-writer jittered start to avoid phase sync. + let startup_jitter = if keepalive_enabled { + let jitter_cap_ms = keepalive_interval.as_millis() / 2; + let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); + Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) + } else { + let jitter = rand::rng().random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS); + let wait = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64; + Duration::from_secs(wait) + }; + tokio::select! { + _ = cancel_ping_token.cancelled() => return, + _ = tokio::time::sleep(startup_jitter) => {} + } + loop { + let wait = if keepalive_enabled { + let jitter_cap_ms = keepalive_interval.as_millis() / 2; + let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); + keepalive_interval + Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) + } else { + let jitter = rand::rng().random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS); + let secs = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64; + Duration::from_secs(secs) + }; + tokio::select! { + _ = cancel_ping_token.cancelled() => { + break; + } + _ = tokio::time::sleep(wait) => {} + } + let sent_id = ping_id; + let mut p = Vec::with_capacity(12); + p.extend_from_slice(&RPC_PING_U32.to_le_bytes()); + p.extend_from_slice(&sent_id.to_le_bytes()); + { + let mut tracker = ping_tracker_ping.lock().await; + let before = tracker.len(); + tracker.retain(|_, (ts, _)| ts.elapsed() < Duration::from_secs(120)); + let expired = before.saturating_sub(tracker.len()); + if expired > 0 { + stats_ping.increment_me_keepalive_timeout_by(expired as u64); + } + tracker.insert(sent_id, (std::time::Instant::now(), writer_id)); + } + ping_id = ping_id.wrapping_add(1); + stats_ping.increment_me_keepalive_sent(); + if tx_ping.send(WriterCommand::DataAndFlush(p)).await.is_err() { + stats_ping.increment_me_keepalive_failed(); + debug!("ME ping failed, removing dead writer"); + cancel_ping.cancel(); + if let Some(pool) = pool_ping.upgrade() + && cleanup_for_ping + .compare_exchange(false, true, Ordering::AcqRel, Ordering::Relaxed) + .is_ok() + { + pool.remove_writer_and_close_clients(writer_id).await; + } + break; + } + } + }); + + Ok(()) + } + + pub(crate) async fn remove_writer_and_close_clients(self: &Arc, writer_id: u64) { + let conns = self.remove_writer_only(writer_id).await; + for bound in conns { + let _ = self.registry.route(bound.conn_id, super::MeResponse::Close).await; + let _ = self.registry.unregister(bound.conn_id).await; + } + } + + async fn remove_writer_only(self: &Arc, writer_id: u64) -> Vec { + let mut close_tx: Option> = None; + let mut removed_addr: Option = None; + let mut trigger_refill = false; + { + let mut ws = self.writers.write().await; + if let Some(pos) = ws.iter().position(|w| w.id == writer_id) { + let w = ws.remove(pos); + let was_draining = w.draining.load(Ordering::Relaxed); + if was_draining { + self.stats.decrement_pool_drain_active(); + } + self.stats.increment_me_writer_removed_total(); + w.cancel.cancel(); + removed_addr = Some(w.addr); + trigger_refill = !was_draining; + if trigger_refill { + self.stats.increment_me_writer_removed_unexpected_total(); + } + close_tx = Some(w.tx.clone()); + self.conn_count.fetch_sub(1, Ordering::Relaxed); + } + } + if let Some(tx) = close_tx { + let _ = tx.send(WriterCommand::Close).await; + } + if trigger_refill + && let Some(addr) = removed_addr + { + self.trigger_immediate_refill(addr); + } + self.rtt_stats.lock().await.remove(&writer_id); + self.registry.writer_lost(writer_id).await + } + + pub(crate) async fn mark_writer_draining_with_timeout( + self: &Arc, + writer_id: u64, + timeout: Option, + allow_drain_fallback: bool, + ) { + let timeout = timeout.filter(|d| !d.is_zero()); + let found = { + let mut ws = self.writers.write().await; + if let Some(w) = ws.iter_mut().find(|w| w.id == writer_id) { + let already_draining = w.draining.swap(true, Ordering::Relaxed); + w.allow_drain_fallback + .store(allow_drain_fallback, Ordering::Relaxed); + w.draining_started_at_epoch_secs + .store(Self::now_epoch_secs(), Ordering::Relaxed); + if !already_draining { + self.stats.increment_pool_drain_active(); + } + w.draining.store(true, Ordering::Relaxed); + true + } else { + false + } + }; + + if !found { + return; + } + + let timeout_secs = timeout.map(|d| d.as_secs()).unwrap_or(0); + debug!( + writer_id, + timeout_secs, + allow_drain_fallback, + "ME writer marked draining" + ); + + let pool = Arc::downgrade(self); + tokio::spawn(async move { + let deadline = timeout.map(|t| Instant::now() + t); + while let Some(p) = pool.upgrade() { + if let Some(deadline_at) = deadline + && Instant::now() >= deadline_at + { + warn!(writer_id, "Drain timeout, force-closing"); + p.stats.increment_pool_force_close_total(); + let _ = p.remove_writer_and_close_clients(writer_id).await; + break; + } + if p.registry.is_writer_empty(writer_id).await { + let _ = p.remove_writer_only(writer_id).await; + break; + } + tokio::time::sleep(Duration::from_secs(1)).await; + } + }); + } + + pub(crate) async fn mark_writer_draining(self: &Arc, writer_id: u64) { + self.mark_writer_draining_with_timeout(writer_id, Some(Duration::from_secs(300)), false) + .await; + } + + pub(super) fn writer_accepts_new_binding(&self, writer: &MeWriter) -> bool { + if !writer.draining.load(Ordering::Relaxed) { + return true; + } + if !writer.allow_drain_fallback.load(Ordering::Relaxed) { + return false; + } + + let ttl_secs = self.me_pool_drain_ttl_secs.load(Ordering::Relaxed); + if ttl_secs == 0 { + return true; + } + + let started = writer.draining_started_at_epoch_secs.load(Ordering::Relaxed); + if started == 0 { + return false; + } + + Self::now_epoch_secs().saturating_sub(started) <= ttl_secs + } +} From 04e6135935446b9ba081ef9deb38e2806e16d602 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 19:35:34 +0300 Subject: [PATCH 73/98] TLS-F Fetching Optimization --- src/main.rs | 199 +++++++++++++++++++++++++++++++++------------------- 1 file changed, 127 insertions(+), 72 deletions(-) diff --git a/src/main.rs b/src/main.rs index da88fe3..95f7e5a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -254,6 +254,133 @@ async fn main() -> std::result::Result<(), Box> { warn!("Using default tls_domain. Consider setting a custom domain."); } + let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone())); + + let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len()); + tls_domains.push(config.censorship.tls_domain.clone()); + for d in &config.censorship.tls_domains { + if !tls_domains.contains(d) { + tls_domains.push(d.clone()); + } + } + + // Start TLS front fetching in background immediately, in parallel with STUN probing. + let tls_cache: Option> = if config.censorship.tls_emulation { + let cache = Arc::new(TlsFrontCache::new( + &tls_domains, + config.censorship.fake_cert_len, + &config.censorship.tls_front_dir, + )); + cache.load_from_disk().await; + + let port = config.censorship.mask_port; + let proxy_protocol = config.censorship.mask_proxy_protocol; + let mask_host = config + .censorship + .mask_host + .clone() + .unwrap_or_else(|| config.censorship.tls_domain.clone()); + let fetch_timeout = Duration::from_secs(5); + + let cache_initial = cache.clone(); + let domains_initial = tls_domains.clone(); + let host_initial = mask_host.clone(); + let upstream_initial = upstream_manager.clone(); + tokio::spawn(async move { + let mut join = tokio::task::JoinSet::new(); + for domain in domains_initial { + let cache_domain = cache_initial.clone(); + let host_domain = host_initial.clone(); + let upstream_domain = upstream_initial.clone(); + join.spawn(async move { + match crate::tls_front::fetcher::fetch_real_tls( + &host_domain, + port, + &domain, + fetch_timeout, + Some(upstream_domain), + proxy_protocol, + ) + .await + { + Ok(res) => cache_domain.update_from_fetch(&domain, res).await, + Err(e) => { + warn!(domain = %domain, error = %e, "TLS emulation initial fetch failed") + } + } + }); + } + while let Some(res) = join.join_next().await { + if let Err(e) = res { + warn!(error = %e, "TLS emulation initial fetch task join failed"); + } + } + }); + + let cache_timeout = cache.clone(); + let domains_timeout = tls_domains.clone(); + let fake_cert_len = config.censorship.fake_cert_len; + tokio::spawn(async move { + tokio::time::sleep(fetch_timeout).await; + for domain in domains_timeout { + let cached = cache_timeout.get(&domain).await; + if cached.domain == "default" { + warn!( + domain = %domain, + timeout_secs = fetch_timeout.as_secs(), + fake_cert_len, + "TLS-front fetch not ready within timeout; using cache/default fake cert fallback" + ); + } + } + }); + + // Periodic refresh with jitter. + let cache_refresh = cache.clone(); + let domains_refresh = tls_domains.clone(); + let host_refresh = mask_host.clone(); + let upstream_refresh = upstream_manager.clone(); + tokio::spawn(async move { + loop { + let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600); + let jitter_secs = rand::rng().random_range(0..=7200); + tokio::time::sleep(Duration::from_secs(base_secs + jitter_secs)).await; + + let mut join = tokio::task::JoinSet::new(); + for domain in domains_refresh.clone() { + let cache_domain = cache_refresh.clone(); + let host_domain = host_refresh.clone(); + let upstream_domain = upstream_refresh.clone(); + join.spawn(async move { + match crate::tls_front::fetcher::fetch_real_tls( + &host_domain, + port, + &domain, + fetch_timeout, + Some(upstream_domain), + proxy_protocol, + ) + .await + { + Ok(res) => cache_domain.update_from_fetch(&domain, res).await, + Err(e) => warn!(domain = %domain, error = %e, "TLS emulation refresh failed"), + } + }); + } + + while let Some(res) = join.join_next().await { + if let Err(e) = res { + warn!(error = %e, "TLS emulation refresh task join failed"); + } + } + } + }); + + Some(cache) + } else { + None + }; + let probe = run_probe( &config.network, config.general.middle_proxy_nat_probe, @@ -450,80 +577,8 @@ async fn main() -> std::result::Result<(), Box> { Duration::from_secs(config.access.replay_window_secs), )); - let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone())); let buffer_pool = Arc::new(BufferPool::with_config(16 * 1024, 4096)); - // TLS front cache (optional emulation) - let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len()); - tls_domains.push(config.censorship.tls_domain.clone()); - for d in &config.censorship.tls_domains { - if !tls_domains.contains(d) { - tls_domains.push(d.clone()); - } - } - - let tls_cache: Option> = if config.censorship.tls_emulation { - let cache = Arc::new(TlsFrontCache::new( - &tls_domains, - config.censorship.fake_cert_len, - &config.censorship.tls_front_dir, - )); - - cache.load_from_disk().await; - - let port = config.censorship.mask_port; - let mask_host = config.censorship.mask_host.clone() - .unwrap_or_else(|| config.censorship.tls_domain.clone()); - // Initial synchronous fetch to warm cache before serving clients. - for domain in tls_domains.clone() { - match crate::tls_front::fetcher::fetch_real_tls( - &mask_host, - port, - &domain, - Duration::from_secs(5), - Some(upstream_manager.clone()), - config.censorship.mask_proxy_protocol, - ) - .await - { - Ok(res) => cache.update_from_fetch(&domain, res).await, - Err(e) => warn!(domain = %domain, error = %e, "TLS emulation fetch failed"), - } - } - - // Periodic refresh with jitter. - let cache_clone = cache.clone(); - let domains = tls_domains.clone(); - let upstream_for_task = upstream_manager.clone(); - let proxy_protocol = config.censorship.mask_proxy_protocol; - tokio::spawn(async move { - loop { - let base_secs = rand::rng().random_range(4 * 3600..=6 * 3600); - let jitter_secs = rand::rng().random_range(0..=7200); - tokio::time::sleep(Duration::from_secs(base_secs + jitter_secs)).await; - for domain in &domains { - match crate::tls_front::fetcher::fetch_real_tls( - &mask_host, - port, - domain, - Duration::from_secs(5), - Some(upstream_for_task.clone()), - proxy_protocol, - ) - .await - { - Ok(res) => cache_clone.update_from_fetch(domain, res).await, - Err(e) => warn!(domain = %domain, error = %e, "TLS emulation refresh failed"), - } - } - } - }); - - Some(cache) - } else { - None - }; - // Middle-End ping before DC connectivity if let Some(ref pool) = me_pool { let me_results = run_me_ping(pool, &rng).await; From 144f81c4730203ab550c958245e3fcb0ae2f9d03 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 19:37:17 +0300 Subject: [PATCH 74/98] ME Dead Writer w/o dead-lock on timeout --- src/transport/middle_proxy/pool_writer.rs | 24 ++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/src/transport/middle_proxy/pool_writer.rs b/src/transport/middle_proxy/pool_writer.rs index a7d2960..942ddaf 100644 --- a/src/transport/middle_proxy/pool_writer.rs +++ b/src/transport/middle_proxy/pool_writer.rs @@ -20,6 +20,7 @@ use super::registry::BoundConn; const ME_ACTIVE_PING_SECS: u64 = 25; const ME_ACTIVE_PING_JITTER_SECS: i64 = 5; +const ME_IDLE_KEEPALIVE_MAX_SECS: u64 = 5; impl MePool { pub(crate) async fn prune_closed_writers(self: &Arc) { @@ -154,9 +155,18 @@ impl MePool { let pool_ping = Arc::downgrade(self); tokio::spawn(async move { let mut ping_id: i64 = rand::random::(); + let idle_interval_cap = Duration::from_secs(ME_IDLE_KEEPALIVE_MAX_SECS); // Per-writer jittered start to avoid phase sync. let startup_jitter = if keepalive_enabled { - let jitter_cap_ms = keepalive_interval.as_millis() / 2; + let mut interval = keepalive_interval; + if let Some(pool) = pool_ping.upgrade() { + if pool.registry.is_writer_empty(writer_id).await { + interval = interval.min(idle_interval_cap); + } + } else { + return; + } + let jitter_cap_ms = interval.as_millis() / 2; let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) } else { @@ -170,9 +180,17 @@ impl MePool { } loop { let wait = if keepalive_enabled { - let jitter_cap_ms = keepalive_interval.as_millis() / 2; + let mut interval = keepalive_interval; + if let Some(pool) = pool_ping.upgrade() { + if pool.registry.is_writer_empty(writer_id).await { + interval = interval.min(idle_interval_cap); + } + } else { + break; + } + let jitter_cap_ms = interval.as_millis() / 2; let effective_jitter_ms = keepalive_jitter.as_millis().min(jitter_cap_ms).max(1); - keepalive_interval + Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) + interval + Duration::from_millis(rand::rng().random_range(0..=effective_jitter_ms as u64)) } else { let jitter = rand::rng().random_range(-ME_ACTIVE_PING_JITTER_SECS..=ME_ACTIVE_PING_JITTER_SECS); let secs = (ME_ACTIVE_PING_SECS as i64 + jitter).max(5) as u64; From 60231224ac50eb7390090b805e9a179d99b2bd81 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Thu, 26 Feb 2026 19:41:37 +0300 Subject: [PATCH 75/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 994e11f..1d135f3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.1.0" +version = "3.1.2" edition = "2024" [dependencies] From 54ee6ff8105bdf96e75c2d880aec3e9981705ea9 Mon Sep 17 00:00:00 2001 From: nimbo78 <57285184+nimbo78@users.noreply.github.com> Date: Fri, 27 Feb 2026 01:53:22 +0300 Subject: [PATCH 76/98] Update docker-compose.yml docker pull image first, if fail - build --- docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yml b/docker-compose.yml index 8386caf..01e06bb 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,5 +1,6 @@ services: telemt: + image: ghcr.io/telemt/telemt:latest build: . container_name: telemt restart: unless-stopped From ac064fe7738a7f1a1c87f2bcf7f5648a3f3da60c Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Fri, 27 Feb 2026 15:59:27 +0300 Subject: [PATCH 77/98] STUN switch + Ad-tag fixes + DNS-overrides --- src/config/hot_reload.rs | 20 ++++ src/config/load.rs | 115 +++++++++++++++++++- src/config/types.rs | 12 +++ src/main.rs | 39 +++++-- src/metrics.rs | 99 +++++++++++++++--- src/network/dns_overrides.rs | 197 +++++++++++++++++++++++++++++++++++ src/network/mod.rs | 1 + src/network/probe.rs | 5 +- src/network/stun.rs | 11 ++ src/proxy/masking.rs | 7 +- src/tls_front/fetcher.rs | 34 ++++-- src/transport/upstream.rs | 46 ++++---- 12 files changed, 530 insertions(+), 56 deletions(-) create mode 100644 src/network/dns_overrides.rs diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index c949104..acc64cd 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -16,6 +16,7 @@ //! | `general` | `me_pool_drain_ttl_secs` | Applied on next ME map update | //! | `general` | `me_pool_min_fresh_ratio` | Applied on next ME map update | //! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update | +//! | `network` | `dns_overrides` | Applied immediately | //! | `access` | All user/quota fields | Effective immediately | //! //! Fields that require re-binding sockets (`server.port`, `censorship.*`, @@ -39,6 +40,7 @@ use super::load::ProxyConfig; pub struct HotFields { pub log_level: LogLevel, pub ad_tag: Option, + pub dns_overrides: Vec, pub middle_proxy_pool_size: usize, pub desync_all_full: bool, pub update_every_secs: u64, @@ -58,6 +60,7 @@ impl HotFields { Self { log_level: cfg.general.log_level.clone(), ad_tag: cfg.general.ad_tag.clone(), + dns_overrides: cfg.network.dns_overrides.clone(), middle_proxy_pool_size: cfg.general.middle_proxy_pool_size, desync_all_full: cfg.general.desync_all_full, update_every_secs: cfg.general.effective_update_every_secs(), @@ -189,6 +192,13 @@ fn log_changes( ); } + if old_hot.dns_overrides != new_hot.dns_overrides { + info!( + "config reload: network.dns_overrides updated ({} entries)", + new_hot.dns_overrides.len() + ); + } + if old_hot.middle_proxy_pool_size != new_hot.middle_proxy_pool_size { info!( "config reload: middle_proxy_pool_size: {} → {}", @@ -354,6 +364,16 @@ fn reload_config( return; } + if old_hot.dns_overrides != new_hot.dns_overrides + && let Err(e) = crate::network::dns_overrides::install_entries(&new_hot.dns_overrides) + { + error!( + "config reload: invalid network.dns_overrides: {}; keeping old config", + e + ); + return; + } + warn_non_hot_changes(&old_cfg, &new_cfg); log_changes(&old_hot, &new_hot, &new_cfg, log_tx, detected_ip_v4, detected_ip_v6); config_tx.send(Arc::new(new_cfg)).ok(); diff --git a/src/config/load.rs b/src/config/load.rs index 4e0e104..c1bbdef 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -75,6 +75,23 @@ fn push_unique_nonempty(target: &mut Vec, value: String) { } } +fn is_valid_ad_tag(tag: &str) -> bool { + tag.len() == 32 && tag.chars().all(|ch| ch.is_ascii_hexdigit()) +} + +fn sanitize_ad_tag(ad_tag: &mut Option) { + let Some(tag) = ad_tag.as_ref() else { + return; + }; + + if !is_valid_ad_tag(tag) { + warn!( + "Invalid general.ad_tag value, expected exactly 32 hex chars; ad_tag is disabled" + ); + *ad_tag = None; + } +} + // ============= Main Config ============= #[derive(Debug, Clone, Serialize, Deserialize, Default)] @@ -184,6 +201,8 @@ impl ProxyConfig { } } + sanitize_ad_tag(&mut config.general.ad_tag); + if let Some(update_every) = config.general.update_every { if update_every == 0 { return Err(ProxyError::Config( @@ -380,6 +399,7 @@ impl ProxyConfig { } validate_network_cfg(&mut config.network)?; + crate::network::dns_overrides::validate_entries(&config.network.dns_overrides)?; if config.general.use_middle_proxy && config.network.ipv6 == Some(true) { warn!("IPv6 with Middle Proxy is experimental and may cause KDF address mismatch; consider disabling IPv6 or ME"); @@ -482,14 +502,18 @@ impl ProxyConfig { if let Some(tag) = &self.general.ad_tag { let zeros = "00000000000000000000000000000000"; + if !is_valid_ad_tag(tag) { + return Err(ProxyError::Config( + "general.ad_tag must be exactly 32 hex characters".to_string(), + )); + } if tag == zeros { warn!("ad_tag is all zeros; register a valid proxy tag via @MTProxybot to enable sponsored channel"); } - if tag.len() != 32 || tag.chars().any(|c| !c.is_ascii_hexdigit()) { - warn!("ad_tag is not a 32-char hex string; ensure you use value issued by @MTProxybot"); - } } + crate::network::dns_overrides::validate_entries(&self.network.dns_overrides)?; + Ok(()) } } @@ -509,6 +533,7 @@ mod tests { let cfg: ProxyConfig = toml::from_str(toml).unwrap(); assert_eq!(cfg.network.ipv6, default_network_ipv6()); + assert_eq!(cfg.network.stun_use, default_true()); assert_eq!(cfg.network.stun_tcp_fallback, default_stun_tcp_fallback()); assert_eq!( cfg.general.middle_proxy_warm_standby, @@ -532,6 +557,7 @@ mod tests { fn impl_defaults_are_sourced_from_default_helpers() { let network = NetworkConfig::default(); assert_eq!(network.ipv6, default_network_ipv6()); + assert_eq!(network.stun_use, default_true()); assert_eq!(network.stun_tcp_fallback, default_stun_tcp_fallback()); let general = GeneralConfig::default(); @@ -934,4 +960,87 @@ mod tests { assert_eq!(cfg.general.me_reinit_drain_timeout_secs, 90); let _ = std::fs::remove_file(path); } + + #[test] + fn invalid_ad_tag_is_disabled_during_load() { + let toml = r#" + [general] + ad_tag = "not_hex" + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_invalid_ad_tag_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert!(cfg.general.ad_tag.is_none()); + let _ = std::fs::remove_file(path); + } + + #[test] + fn valid_ad_tag_is_preserved_during_load() { + let toml = r#" + [general] + ad_tag = "00112233445566778899aabbccddeeff" + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_valid_ad_tag_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!( + cfg.general.ad_tag.as_deref(), + Some("00112233445566778899aabbccddeeff") + ); + let _ = std::fs::remove_file(path); + } + + #[test] + fn invalid_dns_override_is_rejected() { + let toml = r#" + [network] + dns_overrides = ["example.com:443:2001:db8::10"] + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_invalid_dns_override_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("must be bracketed")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn valid_dns_override_is_accepted() { + let toml = r#" + [network] + dns_overrides = ["example.com:443:127.0.0.1", "example.net:443:[2001:db8::10]"] + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_valid_dns_override_test.toml"); + std::fs::write(&path, toml).unwrap(); + let cfg = ProxyConfig::load(&path).unwrap(); + assert_eq!(cfg.network.dns_overrides.len(), 2); + let _ = std::fs::remove_file(path); + } } diff --git a/src/config/types.rs b/src/config/types.rs index 68086be..7d9f13a 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -97,6 +97,11 @@ pub struct NetworkConfig { #[serde(default)] pub multipath: bool, + /// Global switch for STUN probing. + /// When false, STUN is fully disabled and only non-STUN detection remains. + #[serde(default = "default_true")] + pub stun_use: bool, + /// STUN servers list for public IP discovery. #[serde(default = "default_stun_servers")] pub stun_servers: Vec, @@ -112,6 +117,11 @@ pub struct NetworkConfig { /// Cache file path for detected public IP. #[serde(default = "default_cache_public_ip_path")] pub cache_public_ip_path: String, + + /// Runtime DNS overrides in `host:port:ip` format. + /// IPv6 IP values must be bracketed: `[2001:db8::1]`. + #[serde(default)] + pub dns_overrides: Vec, } impl Default for NetworkConfig { @@ -121,10 +131,12 @@ impl Default for NetworkConfig { ipv6: default_network_ipv6(), prefer: default_prefer_4(), multipath: false, + stun_use: default_true(), stun_servers: default_stun_servers(), stun_tcp_fallback: default_stun_tcp_fallback(), http_ip_detect_urls: default_http_ip_detect_urls(), cache_public_ip_path: default_cache_public_ip_path(), + dns_overrides: Vec::new(), } } } diff --git a/src/main.rs b/src/main.rs index 95f7e5a..7389117 100644 --- a/src/main.rs +++ b/src/main.rs @@ -193,6 +193,11 @@ async fn main() -> std::result::Result<(), Box> { std::process::exit(1); } + if let Err(e) = crate::network::dns_overrides::install_entries(&config.network.dns_overrides) { + eprintln!("[telemt] Invalid network.dns_overrides: {}", e); + std::process::exit(1); + } + let has_rust_log = std::env::var("RUST_LOG").is_ok(); let effective_log_level = if cli_silent { LogLevel::Silent @@ -403,6 +408,12 @@ async fn main() -> std::result::Result<(), Box> { if !config.access.user_max_unique_ips.is_empty() { info!("IP limits configured for {} users", config.access.user_max_unique_ips.len()); } + if !config.network.dns_overrides.is_empty() { + info!( + "Runtime DNS overrides configured: {} entries", + config.network.dns_overrides.len() + ); + } // Connection concurrency limit let max_connections = Arc::new(Semaphore::new(10_000)); @@ -417,14 +428,17 @@ async fn main() -> std::result::Result<(), Box> { // ===================================================================== let me_pool: Option> = if use_middle_proxy { info!("=== Middle Proxy Mode ==="); + let me_nat_probe = config.general.middle_proxy_nat_probe && config.network.stun_use; + if config.general.middle_proxy_nat_probe && !config.network.stun_use { + info!("Middle-proxy STUN probing disabled by network.stun_use=false"); + } // ad_tag (proxy_tag) for advertising - let proxy_tag = config.general.ad_tag.as_ref().map(|tag| { - hex::decode(tag).unwrap_or_else(|_| { - warn!("Invalid ad_tag hex, middle proxy ad_tag will be empty"); - Vec::new() - }) - }); + let proxy_tag = config + .general + .ad_tag + .as_ref() + .map(|tag| hex::decode(tag).expect("general.ad_tag must be validated before startup")); // ============================================================= // CRITICAL: Download Telegram proxy-secret (NOT user secret!) @@ -484,7 +498,7 @@ async fn main() -> std::result::Result<(), Box> { proxy_tag, proxy_secret, config.general.middle_proxy_nat_ip, - config.general.middle_proxy_nat_probe, + me_nat_probe, None, config.network.stun_servers.clone(), config.general.stun_nat_probe_concurrency, @@ -1037,9 +1051,18 @@ async fn main() -> std::result::Result<(), Box> { let stats = stats.clone(); let beobachten = beobachten.clone(); let config_rx_metrics = config_rx.clone(); + let ip_tracker_metrics = ip_tracker.clone(); let whitelist = config.server.metrics_whitelist.clone(); tokio::spawn(async move { - metrics::serve(port, stats, beobachten, config_rx_metrics, whitelist).await; + metrics::serve( + port, + stats, + beobachten, + ip_tracker_metrics, + config_rx_metrics, + whitelist, + ) + .await; }); } diff --git a/src/metrics.rs b/src/metrics.rs index 08abb2d..63b337b 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -1,4 +1,5 @@ use std::convert::Infallible; +use std::collections::{BTreeSet, HashMap}; use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; @@ -13,6 +14,7 @@ use tokio::net::TcpListener; use tracing::{info, warn, debug}; use crate::config::ProxyConfig; +use crate::ip_tracker::UserIpTracker; use crate::stats::beobachten::BeobachtenStore; use crate::stats::Stats; @@ -20,6 +22,7 @@ pub async fn serve( port: u16, stats: Arc, beobachten: Arc, + ip_tracker: Arc, config_rx: tokio::sync::watch::Receiver>, whitelist: Vec, ) { @@ -49,13 +52,15 @@ pub async fn serve( let stats = stats.clone(); let beobachten = beobachten.clone(); + let ip_tracker = ip_tracker.clone(); let config_rx_conn = config_rx.clone(); tokio::spawn(async move { let svc = service_fn(move |req| { let stats = stats.clone(); let beobachten = beobachten.clone(); + let ip_tracker = ip_tracker.clone(); let config = config_rx_conn.borrow().clone(); - async move { handle(req, &stats, &beobachten, &config) } + async move { handle(req, &stats, &beobachten, &ip_tracker, &config).await } }); if let Err(e) = http1::Builder::new() .serve_connection(hyper_util::rt::TokioIo::new(stream), svc) @@ -67,14 +72,15 @@ pub async fn serve( } } -fn handle( +async fn handle( req: Request, stats: &Stats, beobachten: &BeobachtenStore, + ip_tracker: &UserIpTracker, config: &ProxyConfig, ) -> Result>, Infallible> { if req.uri().path() == "/metrics" { - let body = render_metrics(stats); + let body = render_metrics(stats, config, ip_tracker).await; let resp = Response::builder() .status(StatusCode::OK) .header("content-type", "text/plain; version=0.0.4; charset=utf-8") @@ -109,7 +115,7 @@ fn render_beobachten(beobachten: &BeobachtenStore, config: &ProxyConfig) -> Stri beobachten.snapshot_text(ttl) } -fn render_metrics(stats: &Stats) -> String { +async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIpTracker) -> String { use std::fmt::Write; let mut out = String::with_capacity(4096); @@ -349,6 +355,41 @@ fn render_metrics(stats: &Stats) -> String { let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed)); } + let ip_stats = ip_tracker.get_stats().await; + let ip_counts: HashMap = ip_stats + .into_iter() + .map(|(user, count, _)| (user, count)) + .collect(); + + let mut unique_users = BTreeSet::new(); + unique_users.extend(config.access.user_max_unique_ips.keys().cloned()); + unique_users.extend(ip_counts.keys().cloned()); + + let _ = writeln!(out, "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_current gauge"); + let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge"); + let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge"); + + for user in unique_users { + let current = ip_counts.get(&user).copied().unwrap_or(0); + let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0); + let utilization = if limit > 0 { + current as f64 / limit as f64 + } else { + 0.0 + }; + let _ = writeln!(out, "telemt_user_unique_ips_current{{user=\"{}\"}} {}", user, current); + let _ = writeln!(out, "telemt_user_unique_ips_limit{{user=\"{}\"}} {}", user, limit); + let _ = writeln!( + out, + "telemt_user_unique_ips_utilization{{user=\"{}\"}} {:.6}", + user, + utilization + ); + } + out } @@ -358,9 +399,16 @@ mod tests { use std::net::IpAddr; use http_body_util::BodyExt; - #[test] - fn test_render_metrics_format() { + #[tokio::test] + async fn test_render_metrics_format() { let stats = Arc::new(Stats::new()); + let tracker = UserIpTracker::new(); + let mut config = ProxyConfig::default(); + config + .access + .user_max_unique_ips + .insert("alice".to_string(), 4); + stats.increment_connects_all(); stats.increment_connects_all(); stats.increment_connects_bad(); @@ -372,8 +420,12 @@ mod tests { stats.increment_user_msgs_from("alice"); stats.increment_user_msgs_to("alice"); stats.increment_user_msgs_to("alice"); + tracker + .check_and_add("alice", "203.0.113.10".parse().unwrap()) + .await + .unwrap(); - let output = render_metrics(&stats); + let output = render_metrics(&stats, &config, &tracker).await; assert!(output.contains("telemt_connections_total 2")); assert!(output.contains("telemt_connections_bad_total 1")); @@ -384,22 +436,29 @@ mod tests { assert!(output.contains("telemt_user_octets_to_client{user=\"alice\"} 2048")); assert!(output.contains("telemt_user_msgs_from_client{user=\"alice\"} 1")); assert!(output.contains("telemt_user_msgs_to_client{user=\"alice\"} 2")); + assert!(output.contains("telemt_user_unique_ips_current{user=\"alice\"} 1")); + assert!(output.contains("telemt_user_unique_ips_limit{user=\"alice\"} 4")); + assert!(output.contains("telemt_user_unique_ips_utilization{user=\"alice\"} 0.250000")); } - #[test] - fn test_render_empty_stats() { + #[tokio::test] + async fn test_render_empty_stats() { let stats = Stats::new(); - let output = render_metrics(&stats); + let tracker = UserIpTracker::new(); + let config = ProxyConfig::default(); + let output = render_metrics(&stats, &config, &tracker).await; assert!(output.contains("telemt_connections_total 0")); assert!(output.contains("telemt_connections_bad_total 0")); assert!(output.contains("telemt_handshake_timeouts_total 0")); assert!(!output.contains("user=")); } - #[test] - fn test_render_has_type_annotations() { + #[tokio::test] + async fn test_render_has_type_annotations() { let stats = Stats::new(); - let output = render_metrics(&stats); + let tracker = UserIpTracker::new(); + let config = ProxyConfig::default(); + let output = render_metrics(&stats, &config, &tracker).await; assert!(output.contains("# TYPE telemt_uptime_seconds gauge")); assert!(output.contains("# TYPE telemt_connections_total counter")); assert!(output.contains("# TYPE telemt_connections_bad_total counter")); @@ -408,12 +467,16 @@ mod tests { assert!(output.contains( "# TYPE telemt_me_writer_removed_unexpected_minus_restored_total gauge" )); + assert!(output.contains("# TYPE telemt_user_unique_ips_current gauge")); + assert!(output.contains("# TYPE telemt_user_unique_ips_limit gauge")); + assert!(output.contains("# TYPE telemt_user_unique_ips_utilization gauge")); } #[tokio::test] async fn test_endpoint_integration() { let stats = Arc::new(Stats::new()); let beobachten = Arc::new(BeobachtenStore::new()); + let tracker = UserIpTracker::new(); let mut config = ProxyConfig::default(); stats.increment_connects_all(); stats.increment_connects_all(); @@ -423,7 +486,7 @@ mod tests { .uri("/metrics") .body(()) .unwrap(); - let resp = handle(req, &stats, &beobachten, &config).unwrap(); + let resp = handle(req, &stats, &beobachten, &tracker, &config).await.unwrap(); assert_eq!(resp.status(), StatusCode::OK); let body = resp.into_body().collect().await.unwrap().to_bytes(); assert!(std::str::from_utf8(body.as_ref()).unwrap().contains("telemt_connections_total 3")); @@ -439,7 +502,9 @@ mod tests { .uri("/beobachten") .body(()) .unwrap(); - let resp_beob = handle(req_beob, &stats, &beobachten, &config).unwrap(); + let resp_beob = handle(req_beob, &stats, &beobachten, &tracker, &config) + .await + .unwrap(); assert_eq!(resp_beob.status(), StatusCode::OK); let body_beob = resp_beob.into_body().collect().await.unwrap().to_bytes(); let beob_text = std::str::from_utf8(body_beob.as_ref()).unwrap(); @@ -450,7 +515,9 @@ mod tests { .uri("/other") .body(()) .unwrap(); - let resp404 = handle(req404, &stats, &beobachten, &config).unwrap(); + let resp404 = handle(req404, &stats, &beobachten, &tracker, &config) + .await + .unwrap(); assert_eq!(resp404.status(), StatusCode::NOT_FOUND); } } diff --git a/src/network/dns_overrides.rs b/src/network/dns_overrides.rs new file mode 100644 index 0000000..447863a --- /dev/null +++ b/src/network/dns_overrides.rs @@ -0,0 +1,197 @@ +//! Runtime DNS overrides for `host:port` targets. + +use std::collections::HashMap; +use std::net::{IpAddr, Ipv6Addr, SocketAddr}; +use std::sync::{OnceLock, RwLock}; + +use crate::error::{ProxyError, Result}; + +type OverrideMap = HashMap<(String, u16), IpAddr>; + +static DNS_OVERRIDES: OnceLock> = OnceLock::new(); + +fn overrides_store() -> &'static RwLock { + DNS_OVERRIDES.get_or_init(|| RwLock::new(HashMap::new())) +} + +fn parse_ip_spec(ip_spec: &str) -> Result { + if ip_spec.starts_with('[') && ip_spec.ends_with(']') { + let inner = &ip_spec[1..ip_spec.len() - 1]; + let ipv6 = inner.parse::().map_err(|_| { + ProxyError::Config(format!( + "network.dns_overrides IPv6 override is invalid: '{ip_spec}'" + )) + })?; + return Ok(IpAddr::V6(ipv6)); + } + + let ip = ip_spec.parse::().map_err(|_| { + ProxyError::Config(format!( + "network.dns_overrides IP is invalid: '{ip_spec}'" + )) + })?; + if matches!(ip, IpAddr::V6(_)) { + return Err(ProxyError::Config(format!( + "network.dns_overrides IPv6 must be bracketed: '{ip_spec}'" + ))); + } + Ok(ip) +} + +fn parse_entry(entry: &str) -> Result<((String, u16), IpAddr)> { + let trimmed = entry.trim(); + if trimmed.is_empty() { + return Err(ProxyError::Config( + "network.dns_overrides entry cannot be empty".to_string(), + )); + } + + let first_sep = trimmed.find(':').ok_or_else(|| { + ProxyError::Config(format!( + "network.dns_overrides entry must use host:port:ip format: '{trimmed}'" + )) + })?; + let second_sep = trimmed[first_sep + 1..] + .find(':') + .map(|idx| first_sep + 1 + idx) + .ok_or_else(|| { + ProxyError::Config(format!( + "network.dns_overrides entry must use host:port:ip format: '{trimmed}'" + )) + })?; + + let host = trimmed[..first_sep].trim(); + let port_str = trimmed[first_sep + 1..second_sep].trim(); + let ip_str = trimmed[second_sep + 1..].trim(); + + if host.is_empty() { + return Err(ProxyError::Config(format!( + "network.dns_overrides host cannot be empty: '{trimmed}'" + ))); + } + if host.contains(':') { + return Err(ProxyError::Config(format!( + "network.dns_overrides host must be a domain name without ':' in this format: '{trimmed}'" + ))); + } + + let port = port_str.parse::().map_err(|_| { + ProxyError::Config(format!( + "network.dns_overrides port is invalid: '{trimmed}'" + )) + })?; + let ip = parse_ip_spec(ip_str)?; + + Ok(((host.to_ascii_lowercase(), port), ip)) +} + +fn parse_entries(entries: &[String]) -> Result { + let mut parsed = HashMap::new(); + for entry in entries { + let (key, ip) = parse_entry(entry)?; + parsed.insert(key, ip); + } + Ok(parsed) +} + +/// Validate `network.dns_overrides` entries without updating runtime state. +pub fn validate_entries(entries: &[String]) -> Result<()> { + let _ = parse_entries(entries)?; + Ok(()) +} + +/// Replace runtime DNS overrides with a new validated snapshot. +pub fn install_entries(entries: &[String]) -> Result<()> { + let parsed = parse_entries(entries)?; + let mut guard = overrides_store() + .write() + .map_err(|_| ProxyError::Config("network.dns_overrides runtime lock is poisoned".to_string()))?; + *guard = parsed; + Ok(()) +} + +/// Resolve a hostname override for `(host, port)` if present. +pub fn resolve(host: &str, port: u16) -> Option { + let key = (host.to_ascii_lowercase(), port); + overrides_store() + .read() + .ok() + .and_then(|guard| guard.get(&key).copied()) +} + +/// Resolve a hostname override and construct a socket address when present. +pub fn resolve_socket_addr(host: &str, port: u16) -> Option { + resolve(host, port).map(|ip| SocketAddr::new(ip, port)) +} + +/// Parse a runtime endpoint in `host:port` format. +/// +/// Supports: +/// - `example.com:443` +/// - `[2001:db8::1]:443` +pub fn split_host_port(endpoint: &str) -> Option<(String, u16)> { + if endpoint.starts_with('[') { + let bracket_end = endpoint.find(']')?; + if endpoint.as_bytes().get(bracket_end + 1) != Some(&b':') { + return None; + } + let host = endpoint[1..bracket_end].trim(); + let port = endpoint[bracket_end + 2..].trim().parse::().ok()?; + if host.is_empty() { + return None; + } + return Some((host.to_ascii_lowercase(), port)); + } + + let split_idx = endpoint.rfind(':')?; + let host = endpoint[..split_idx].trim(); + let port = endpoint[split_idx + 1..].trim().parse::().ok()?; + if host.is_empty() || host.contains(':') { + return None; + } + + Some((host.to_ascii_lowercase(), port)) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn validate_accepts_ipv4_and_bracketed_ipv6() { + let entries = vec![ + "example.com:443:127.0.0.1".to_string(), + "example.net:8443:[2001:db8::10]".to_string(), + ]; + assert!(validate_entries(&entries).is_ok()); + } + + #[test] + fn validate_rejects_unbracketed_ipv6() { + let entries = vec!["example.net:443:2001:db8::10".to_string()]; + let err = validate_entries(&entries).unwrap_err().to_string(); + assert!(err.contains("must be bracketed")); + } + + #[test] + fn install_and_resolve_are_case_insensitive_for_host() { + let entries = vec!["MyPetrovich.ru:8443:127.0.0.1".to_string()]; + install_entries(&entries).unwrap(); + + let resolved = resolve("mypetrovich.ru", 8443); + assert_eq!(resolved, Some("127.0.0.1".parse().unwrap())); + } + + #[test] + fn split_host_port_parses_supported_shapes() { + assert_eq!( + split_host_port("example.com:443"), + Some(("example.com".to_string(), 443)) + ); + assert_eq!( + split_host_port("[2001:db8::1]:443"), + Some(("2001:db8::1".to_string(), 443)) + ); + assert_eq!(split_host_port("2001:db8::1:443"), None); + } +} diff --git a/src/network/mod.rs b/src/network/mod.rs index 78a1040..e57622d 100644 --- a/src/network/mod.rs +++ b/src/network/mod.rs @@ -1,3 +1,4 @@ +pub mod dns_overrides; pub mod probe; pub mod stun; diff --git a/src/network/probe.rs b/src/network/probe.rs index 6e84682..2ceeb2c 100644 --- a/src/network/probe.rs +++ b/src/network/probe.rs @@ -68,7 +68,7 @@ pub async fn run_probe( probe.ipv4_is_bogon = probe.detected_ipv4.map(is_bogon_v4).unwrap_or(false); probe.ipv6_is_bogon = probe.detected_ipv6.map(is_bogon_v6).unwrap_or(false); - let stun_res = if nat_probe { + let stun_res = if nat_probe && config.stun_use { let servers = collect_stun_servers(config); if servers.is_empty() { warn!("STUN probe is enabled but network.stun_servers is empty"); @@ -80,6 +80,9 @@ pub async fn run_probe( ) .await } + } else if nat_probe { + info!("STUN probe is disabled by network.stun_use=false"); + DualStunResult::default() } else { DualStunResult::default() }; diff --git a/src/network/stun.rs b/src/network/stun.rs index 5bda495..bb5a873 100644 --- a/src/network/stun.rs +++ b/src/network/stun.rs @@ -7,6 +7,7 @@ use tokio::net::{lookup_host, UdpSocket}; use tokio::time::{timeout, Duration, sleep}; use crate::error::{ProxyError, Result}; +use crate::network::dns_overrides::{resolve, split_host_port}; #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum IpFamily { @@ -198,6 +199,16 @@ async fn resolve_stun_addr(stun_addr: &str, family: IpFamily) -> Result Some(addr), + _ => None, + }); + } + let mut addrs = lookup_host(stun_addr) .await .map_err(|e| ProxyError::Proxy(format!("STUN resolve failed: {e}")))?; diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index d12cf41..8f19b40 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -10,6 +10,7 @@ use tokio::io::{AsyncRead, AsyncWrite, AsyncReadExt, AsyncWriteExt}; use tokio::time::timeout; use tracing::debug; use crate::config::ProxyConfig; +use crate::network::dns_overrides::resolve_socket_addr; use crate::stats::beobachten::BeobachtenStore; use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder}; @@ -115,8 +116,10 @@ where "Forwarding bad client to mask host" ); - // Connect to mask host - let mask_addr = format!("{}:{}", mask_host, mask_port); + // Apply runtime DNS override for mask target when configured. + let mask_addr = resolve_socket_addr(mask_host, mask_port) + .map(|addr| addr.to_string()) + .unwrap_or_else(|| format!("{}:{}", mask_host, mask_port)); let connect_result = timeout(MASK_TIMEOUT, TcpStream::connect(&mask_addr)).await; match connect_result { Ok(Ok(stream)) => { diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index 561d4cc..ba80332 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -18,6 +18,7 @@ use x509_parser::prelude::FromDer; use x509_parser::certificate::X509Certificate; use crate::crypto::SecureRandom; +use crate::network::dns_overrides::resolve_socket_addr; use crate::protocol::constants::{TLS_RECORD_APPLICATION, TLS_RECORD_HANDSHAKE}; use crate::transport::proxy_protocol::{ProxyProtocolV1Builder, ProxyProtocolV2Builder}; use crate::tls_front::types::{ @@ -333,6 +334,17 @@ fn u24_bytes(value: usize) -> Option<[u8; 3]> { ]) } +async fn connect_with_dns_override( + host: &str, + port: u16, + connect_timeout: Duration, +) -> Result { + if let Some(addr) = resolve_socket_addr(host, port) { + return Ok(timeout(connect_timeout, TcpStream::connect(addr)).await??); + } + Ok(timeout(connect_timeout, TcpStream::connect((host, port))).await??) +} + fn encode_tls13_certificate_message(cert_chain_der: &[Vec]) -> Option> { if cert_chain_der.is_empty() { return None; @@ -369,8 +381,7 @@ async fn fetch_via_raw_tls( connect_timeout: Duration, proxy_protocol: u8, ) -> Result { - let addr = format!("{host}:{port}"); - let mut stream = timeout(connect_timeout, TcpStream::connect(addr)).await??; + let mut stream = connect_with_dns_override(host, port, connect_timeout).await?; let rng = SecureRandom::new(); let client_hello = build_client_hello(sni, &rng); @@ -437,24 +448,31 @@ async fn fetch_via_rustls( ) -> Result { // rustls handshake path for certificate and basic negotiated metadata. let mut stream = if let Some(manager) = upstream { - // Resolve host to SocketAddr - if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { + if let Some(addr) = resolve_socket_addr(host, port) { + match manager.connect(addr, None, None).await { + Ok(s) => s, + Err(e) => { + warn!(sni = %sni, error = %e, "Upstream connect failed, using direct connect"); + connect_with_dns_override(host, port, connect_timeout).await? + } + } + } else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { if let Some(addr) = addrs.find(|a| a.is_ipv4()) { match manager.connect(addr, None, None).await { Ok(s) => s, Err(e) => { warn!(sni = %sni, error = %e, "Upstream connect failed, using direct connect"); - timeout(connect_timeout, TcpStream::connect((host, port))).await?? + connect_with_dns_override(host, port, connect_timeout).await? } } } else { - timeout(connect_timeout, TcpStream::connect((host, port))).await?? + connect_with_dns_override(host, port, connect_timeout).await? } } else { - timeout(connect_timeout, TcpStream::connect((host, port))).await?? + connect_with_dns_override(host, port, connect_timeout).await? } } else { - timeout(connect_timeout, TcpStream::connect((host, port))).await?? + connect_with_dns_override(host, port, connect_timeout).await? }; if proxy_protocol > 0 { diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index e2198a8..a442597 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -17,6 +17,7 @@ use tracing::{debug, warn, info, trace}; use crate::config::{UpstreamConfig, UpstreamType}; use crate::error::{Result, ProxyError}; +use crate::network::dns_overrides::{resolve_socket_addr, split_host_port}; use crate::protocol::constants::{TG_DATACENTERS_V4, TG_DATACENTERS_V6, TG_DATACENTER_PORT}; use crate::transport::socket::{create_outgoing_socket_bound, resolve_interface_ip}; use crate::transport::socks::{connect_socks4, connect_socks5}; @@ -209,6 +210,31 @@ impl UpstreamManager { None } + async fn connect_hostname_with_dns_override( + address: &str, + connect_timeout: Duration, + ) -> Result { + if let Some((host, port)) = split_host_port(address) + && let Some(addr) = resolve_socket_addr(&host, port) + { + return match tokio::time::timeout(connect_timeout, TcpStream::connect(addr)).await { + Ok(Ok(stream)) => Ok(stream), + Ok(Err(e)) => Err(ProxyError::Io(e)), + Err(_) => Err(ProxyError::ConnectionTimeout { + addr: addr.to_string(), + }), + }; + } + + match tokio::time::timeout(connect_timeout, TcpStream::connect(address)).await { + Ok(Ok(stream)) => Ok(stream), + Ok(Err(e)) => Err(ProxyError::Io(e)), + Err(_) => Err(ProxyError::ConnectionTimeout { + addr: address.to_string(), + }), + } + } + /// Select upstream using latency-weighted random selection. async fn select_upstream(&self, dc_idx: Option, scope: Option<&str>) -> Option { let upstreams = self.upstreams.read().await; @@ -433,15 +459,7 @@ impl UpstreamManager { if interface.is_some() { warn!("SOCKS4 interface binding is not supported for hostname addresses, ignoring"); } - match tokio::time::timeout(connect_timeout, TcpStream::connect(address)).await { - Ok(Ok(stream)) => stream, - Ok(Err(e)) => return Err(ProxyError::Io(e)), - Err(_) => { - return Err(ProxyError::ConnectionTimeout { - addr: address.clone(), - }); - } - } + Self::connect_hostname_with_dns_override(address, connect_timeout).await? }; // replace socks user_id with config.selected_scope, if set @@ -503,15 +521,7 @@ impl UpstreamManager { if interface.is_some() { warn!("SOCKS5 interface binding is not supported for hostname addresses, ignoring"); } - match tokio::time::timeout(connect_timeout, TcpStream::connect(address)).await { - Ok(Ok(stream)) => stream, - Ok(Err(e)) => return Err(ProxyError::Io(e)), - Err(_) => { - return Err(ProxyError::ConnectionTimeout { - addr: address.clone(), - }); - } - } + Self::connect_hostname_with_dns_override(address, connect_timeout).await? }; debug!(config = ?config, "Socks5 connection"); From 3d9660f83eaaf003d60e4e00ee2833d9189af0a8 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 01:20:17 +0300 Subject: [PATCH 78/98] Upstreams for ME + Egress-data from UM + ME-over-SOCKS + Bind-aware STUN --- src/main.rs | 1 + src/network/stun.rs | 27 +++- src/transport/middle_proxy/handshake.rs | 160 ++++++++++++++++++---- src/transport/middle_proxy/ping.rs | 4 +- src/transport/middle_proxy/pool.rs | 4 + src/transport/middle_proxy/pool_nat.rs | 57 +++++--- src/transport/middle_proxy/pool_writer.rs | 4 +- src/transport/mod.rs | 2 +- src/transport/socks.rs | 42 ++++-- src/transport/upstream.rs | 84 ++++++++++-- 10 files changed, 307 insertions(+), 78 deletions(-) diff --git a/src/main.rs b/src/main.rs index 7389117..b065d4e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -509,6 +509,7 @@ async fn main() -> std::result::Result<(), Box> { cfg_v6.map.clone(), cfg_v4.default_dc.or(cfg_v6.default_dc), decision.clone(), + Some(upstream_manager.clone()), rng.clone(), stats.clone(), config.general.me_keepalive_enabled, diff --git a/src/network/stun.rs b/src/network/stun.rs index bb5a873..c3a235f 100644 --- a/src/network/stun.rs +++ b/src/network/stun.rs @@ -41,16 +41,31 @@ pub async fn stun_probe_dual(stun_addr: &str) -> Result { } pub async fn stun_probe_family(stun_addr: &str, family: IpFamily) -> Result> { + stun_probe_family_with_bind(stun_addr, family, None).await +} + +pub async fn stun_probe_family_with_bind( + stun_addr: &str, + family: IpFamily, + bind_ip: Option, +) -> Result> { use rand::RngCore; - let bind_addr = match family { - IpFamily::V4 => "0.0.0.0:0", - IpFamily::V6 => "[::]:0", + let bind_addr = match (family, bind_ip) { + (IpFamily::V4, Some(IpAddr::V4(ip))) => SocketAddr::new(IpAddr::V4(ip), 0), + (IpFamily::V6, Some(IpAddr::V6(ip))) => SocketAddr::new(IpAddr::V6(ip), 0), + (IpFamily::V4, Some(IpAddr::V6(_))) | (IpFamily::V6, Some(IpAddr::V4(_))) => { + return Ok(None); + } + (IpFamily::V4, None) => SocketAddr::new(IpAddr::V4(Ipv4Addr::UNSPECIFIED), 0), + (IpFamily::V6, None) => SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), 0), }; - let socket = UdpSocket::bind(bind_addr) - .await - .map_err(|e| ProxyError::Proxy(format!("STUN bind failed: {e}")))?; + let socket = match UdpSocket::bind(bind_addr).await { + Ok(socket) => socket, + Err(_) if bind_ip.is_some() => return Ok(None), + Err(e) => return Err(ProxyError::Proxy(format!("STUN bind failed: {e}"))), + }; let target_addr = resolve_stun_addr(stun_addr, family).await?; if let Some(addr) = target_addr { diff --git a/src/transport/middle_proxy/handshake.rs b/src/transport/middle_proxy/handshake.rs index d9bcdde..988834a 100644 --- a/src/transport/middle_proxy/handshake.rs +++ b/src/transport/middle_proxy/handshake.rs @@ -17,10 +17,12 @@ use tracing::{debug, info, warn}; use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256}; use crate::error::{ProxyError, Result}; use crate::network::IpFamily; +use crate::network::probe::is_bogon; use crate::protocol::constants::{ ME_CONNECT_TIMEOUT_SECS, ME_HANDSHAKE_TIMEOUT_SECS, RPC_CRYPTO_AES_U32, RPC_HANDSHAKE_ERROR_U32, rpc_crypto_flags, }; +use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind}; use super::codec::{ RpcChecksumMode, build_handshake_payload, build_nonce_payload, build_rpc_frame, @@ -43,33 +45,125 @@ pub(crate) struct HandshakeOutput { } impl MePool { - /// TCP connect with timeout + return RTT in milliseconds. - pub(crate) async fn connect_tcp(&self, addr: SocketAddr) -> Result<(TcpStream, f64)> { - let start = Instant::now(); - let connect_fut = async { - if addr.is_ipv6() - && let Some(v6) = self.detected_ipv6 - { - match TcpSocket::new_v6() { - Ok(sock) => { - if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) { - debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind"); - } else { - match sock.connect(addr).await { - Ok(stream) => return Ok(stream), - Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"), - } - } + async fn resolve_dc_idx_for_endpoint(&self, addr: SocketAddr) -> Option { + if addr.is_ipv4() { + let map = self.proxy_map_v4.read().await; + for (dc, addrs) in map.iter() { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + let abs_dc = dc.abs(); + if abs_dc > 0 + && let Ok(dc_idx) = i16::try_from(abs_dc) + { + return Some(dc_idx); } - Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"), } } - TcpStream::connect(addr).await + } else { + let map = self.proxy_map_v6.read().await; + for (dc, addrs) in map.iter() { + if addrs + .iter() + .any(|(ip, port)| SocketAddr::new(*ip, *port) == addr) + { + let abs_dc = dc.abs(); + if abs_dc > 0 + && let Ok(dc_idx) = i16::try_from(abs_dc) + { + return Some(dc_idx); + } + } + } + } + None + } + + fn direct_bind_ip_for_stun( + family: IpFamily, + upstream_egress: Option, + ) -> Option { + let info = upstream_egress?; + if info.route_kind != UpstreamRouteKind::Direct { + return None; + } + match (family, info.direct_bind_ip) { + (IpFamily::V4, Some(IpAddr::V4(ip))) => Some(IpAddr::V4(ip)), + (IpFamily::V6, Some(IpAddr::V6(ip))) => Some(IpAddr::V6(ip)), + _ => None, + } + } + + fn select_socks_bound_addr( + family: IpFamily, + upstream_egress: Option, + ) -> Option { + let info = upstream_egress?; + if !matches!( + info.route_kind, + UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5 + ) { + return None; + } + let bound = info.socks_bound_addr?; + let family_matches = matches!( + (family, bound.ip()), + (IpFamily::V4, IpAddr::V4(_)) | (IpFamily::V6, IpAddr::V6(_)) + ); + if !family_matches || is_bogon(bound.ip()) || bound.ip().is_unspecified() { + return None; + } + Some(bound) + } + + /// TCP connect with timeout + return RTT in milliseconds. + pub(crate) async fn connect_tcp( + &self, + addr: SocketAddr, + ) -> Result<(TcpStream, f64, Option)> { + let start = Instant::now(); + let (stream, upstream_egress) = if let Some(upstream) = &self.upstream { + let dc_idx = self.resolve_dc_idx_for_endpoint(addr).await; + let (stream, egress) = timeout( + Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), + upstream.connect_with_details(addr, dc_idx, None), + ) + .await + .map_err(|_| ProxyError::ConnectionTimeout { + addr: addr.to_string(), + })??; + (stream, Some(egress)) + } else { + let connect_fut = async { + if addr.is_ipv6() + && let Some(v6) = self.detected_ipv6 + { + match TcpSocket::new_v6() { + Ok(sock) => { + if let Err(e) = sock.bind(SocketAddr::new(IpAddr::V6(v6), 0)) { + debug!(error = %e, bind_ip = %v6, "ME IPv6 bind failed, falling back to default bind"); + } else { + match sock.connect(addr).await { + Ok(stream) => return Ok(stream), + Err(e) => debug!(error = %e, target = %addr, "ME IPv6 bound connect failed, retrying default connect"), + } + } + } + Err(e) => debug!(error = %e, "ME IPv6 socket creation failed, falling back to default connect"), + } + } + TcpStream::connect(addr).await + }; + + let stream = timeout(Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), connect_fut) + .await + .map_err(|_| ProxyError::ConnectionTimeout { + addr: addr.to_string(), + })??; + (stream, None) }; - let stream = timeout(Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), connect_fut) - .await - .map_err(|_| ProxyError::ConnectionTimeout { addr: addr.to_string() })??; let connect_ms = start.elapsed().as_secs_f64() * 1000.0; stream.set_nodelay(true).ok(); if let Err(e) = Self::configure_keepalive(&stream) { @@ -79,7 +173,7 @@ impl MePool { if let Err(e) = Self::configure_user_timeout(stream.as_raw_fd()) { warn!(error = %e, "ME TCP_USER_TIMEOUT setup failed"); } - Ok((stream, connect_ms)) + Ok((stream, connect_ms, upstream_egress)) } fn configure_keepalive(stream: &TcpStream) -> std::io::Result<()> { @@ -117,12 +211,14 @@ impl MePool { &self, stream: TcpStream, addr: SocketAddr, + upstream_egress: Option, rng: &SecureRandom, ) -> Result { let hs_start = Instant::now(); let local_addr = stream.local_addr().map_err(ProxyError::Io)?; - let peer_addr = stream.peer_addr().map_err(ProxyError::Io)?; + let transport_peer_addr = stream.peer_addr().map_err(ProxyError::Io)?; + let peer_addr = addr; let _ = self.maybe_detect_nat_ip(local_addr.ip()).await; let family = if local_addr.ip().is_ipv4() { @@ -130,8 +226,12 @@ impl MePool { } else { IpFamily::V6 }; - let reflected = if self.nat_probe { - self.maybe_reflect_public_addr(family).await + let socks_bound_addr = Self::select_socks_bound_addr(family, upstream_egress); + let reflected = if let Some(bound) = socks_bound_addr { + Some(bound) + } else if self.nat_probe { + let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress); + self.maybe_reflect_public_addr(family, bind_ip).await } else { None }; @@ -197,7 +297,9 @@ impl MePool { %local_addr_nat, reflected_ip = reflected.map(|r| r.ip()).as_ref().map(ToString::to_string), %peer_addr, + %transport_peer_addr, %peer_addr_nat, + socks_bound_addr = socks_bound_addr.map(|v| v.to_string()), key_selector = format_args!("0x{ks:08x}"), crypto_schema = format_args!("0x{schema:08x}"), skew_secs = skew, @@ -206,7 +308,11 @@ impl MePool { let ts_bytes = crypto_ts.to_le_bytes(); let server_port_bytes = peer_addr_nat.port().to_le_bytes(); - let client_port_bytes = local_addr_nat.port().to_le_bytes(); + let client_port_for_kdf = socks_bound_addr + .map(|bound| bound.port()) + .filter(|port| *port != 0) + .unwrap_or(local_addr_nat.port()); + let client_port_bytes = client_port_for_kdf.to_le_bytes(); let server_ip = extract_ip_material(peer_addr_nat); let client_ip = extract_ip_material(local_addr_nat); diff --git a/src/transport/middle_proxy/ping.rs b/src/transport/middle_proxy/ping.rs index a1dd1e6..aae11e6 100644 --- a/src/transport/middle_proxy/ping.rs +++ b/src/transport/middle_proxy/ping.rs @@ -122,9 +122,9 @@ pub async fn run_me_ping(pool: &Arc, rng: &SecureRandom) -> Vec { + Ok((stream, conn_rtt, upstream_egress)) => { connect_ms = Some(conn_rtt); - match pool.handshake_only(stream, addr, rng).await { + match pool.handshake_only(stream, addr, upstream_egress, rng).await { Ok(hs) => { handshake_ms = Some(hs.handshake_ms); // drop halves to close diff --git a/src/transport/middle_proxy/pool.rs b/src/transport/middle_proxy/pool.rs index 1e43628..d2e8fa4 100644 --- a/src/transport/middle_proxy/pool.rs +++ b/src/transport/middle_proxy/pool.rs @@ -10,6 +10,7 @@ use tokio_util::sync::CancellationToken; use crate::crypto::SecureRandom; use crate::network::IpFamily; use crate::network::probe::NetworkDecision; +use crate::transport::UpstreamManager; use super::ConnRegistry; use super::codec::WriterCommand; @@ -33,6 +34,7 @@ pub struct MePool { pub(super) writers: Arc>>, pub(super) rr: AtomicU64, pub(super) decision: NetworkDecision, + pub(super) upstream: Option>, pub(super) rng: Arc, pub(super) proxy_tag: Option>, pub(super) proxy_secret: Arc>>, @@ -121,6 +123,7 @@ impl MePool { proxy_map_v6: HashMap>, default_dc: Option, decision: NetworkDecision, + upstream: Option>, rng: Arc, stats: Arc, me_keepalive_enabled: bool, @@ -148,6 +151,7 @@ impl MePool { writers: Arc::new(RwLock::new(Vec::new())), rr: AtomicU64::new(0), decision, + upstream, rng, proxy_tag, proxy_secret: Arc::new(RwLock::new(proxy_secret)), diff --git a/src/transport/middle_proxy/pool_nat.rs b/src/transport/middle_proxy/pool_nat.rs index 7141236..bfcb0e2 100644 --- a/src/transport/middle_proxy/pool_nat.rs +++ b/src/transport/middle_proxy/pool_nat.rs @@ -8,7 +8,7 @@ use tracing::{debug, info, warn}; use crate::error::{ProxyError, Result}; use crate::network::probe::is_bogon; -use crate::network::stun::{stun_probe_dual, IpFamily, StunProbeResult}; +use crate::network::stun::{stun_probe_dual, stun_probe_family_with_bind, IpFamily}; use super::MePool; use std::time::Instant; @@ -52,6 +52,7 @@ impl MePool { servers: &[String], family: IpFamily, attempt: u8, + bind_ip: Option, ) -> (Vec, Option) { let mut join_set = JoinSet::new(); let mut next_idx = 0usize; @@ -64,7 +65,11 @@ impl MePool { let stun_addr = servers[next_idx].clone(); next_idx += 1; join_set.spawn(async move { - let res = timeout(STUN_BATCH_TIMEOUT, stun_probe_dual(&stun_addr)).await; + let res = timeout( + STUN_BATCH_TIMEOUT, + stun_probe_family_with_bind(&stun_addr, family, bind_ip), + ) + .await; (stun_addr, res) }); } @@ -74,12 +79,7 @@ impl MePool { }; match task { - Ok((stun_addr, Ok(Ok(res)))) => { - let picked: Option = match family { - IpFamily::V4 => res.v4, - IpFamily::V6 => res.v6, - }; - + Ok((stun_addr, Ok(Ok(picked)))) => { if let Some(result) = picked { live_servers.push(stun_addr.clone()); let entry = best_by_ip @@ -207,10 +207,21 @@ impl MePool { pub(super) async fn maybe_reflect_public_addr( &self, family: IpFamily, + bind_ip: Option, ) -> Option { const STUN_CACHE_TTL: Duration = Duration::from_secs(600); + let use_shared_cache = bind_ip.is_none(); + if !use_shared_cache { + match (family, bind_ip) { + (IpFamily::V4, Some(IpAddr::V4(_))) + | (IpFamily::V6, Some(IpAddr::V6(_))) + | (_, None) => {} + _ => return None, + } + } // Backoff window - if let Some(until) = *self.stun_backoff_until.read().await + if use_shared_cache + && let Some(until) = *self.stun_backoff_until.read().await && Instant::now() < until { if let Ok(cache) = self.nat_reflection_cache.try_lock() { @@ -223,7 +234,9 @@ impl MePool { return None; } - if let Ok(mut cache) = self.nat_reflection_cache.try_lock() { + if use_shared_cache + && let Ok(mut cache) = self.nat_reflection_cache.try_lock() + { let slot = match family { IpFamily::V4 => &mut cache.v4, IpFamily::V6 => &mut cache.v6, @@ -235,7 +248,11 @@ impl MePool { } } - let attempt = self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed); + let attempt = if use_shared_cache { + self.nat_probe_attempts.fetch_add(1, std::sync::atomic::Ordering::Relaxed) + } else { + 0 + }; let configured_servers = self.configured_stun_servers(); let live_snapshot = self.nat_stun_live_servers.read().await.clone(); let primary_servers = if live_snapshot.is_empty() { @@ -245,12 +262,12 @@ impl MePool { }; let (mut live_servers, mut selected_reflected) = self - .probe_stun_batch_for_family(&primary_servers, family, attempt) + .probe_stun_batch_for_family(&primary_servers, family, attempt, bind_ip) .await; if selected_reflected.is_none() && !configured_servers.is_empty() && primary_servers != configured_servers { let (rediscovered_live, rediscovered_reflected) = self - .probe_stun_batch_for_family(&configured_servers, family, attempt) + .probe_stun_batch_for_family(&configured_servers, family, attempt, bind_ip) .await; live_servers = rediscovered_live; selected_reflected = rediscovered_reflected; @@ -264,14 +281,18 @@ impl MePool { } if let Some(reflected_addr) = selected_reflected { - self.nat_probe_attempts.store(0, std::sync::atomic::Ordering::Relaxed); + if use_shared_cache { + self.nat_probe_attempts.store(0, std::sync::atomic::Ordering::Relaxed); + } info!( family = ?family, live_servers = live_server_count, "STUN-Quorum reached, IP: {}", reflected_addr.ip() ); - if let Ok(mut cache) = self.nat_reflection_cache.try_lock() { + if use_shared_cache + && let Ok(mut cache) = self.nat_reflection_cache.try_lock() + { let slot = match family { IpFamily::V4 => &mut cache.v4, IpFamily::V6 => &mut cache.v6, @@ -281,8 +302,10 @@ impl MePool { return Some(reflected_addr); } - let backoff = Duration::from_secs(60 * 2u64.pow((attempt as u32).min(6))); - *self.stun_backoff_until.write().await = Some(Instant::now() + backoff); + if use_shared_cache { + let backoff = Duration::from_secs(60 * 2u64.pow((attempt as u32).min(6))); + *self.stun_backoff_until.write().await = Some(Instant::now() + backoff); + } None } } diff --git a/src/transport/middle_proxy/pool_writer.rs b/src/transport/middle_proxy/pool_writer.rs index 942ddaf..28f5538 100644 --- a/src/transport/middle_proxy/pool_writer.rs +++ b/src/transport/middle_proxy/pool_writer.rs @@ -47,8 +47,8 @@ impl MePool { return Err(ProxyError::Proxy("proxy-secret too short for ME auth".into())); } - let (stream, _connect_ms) = self.connect_tcp(addr).await?; - let hs = self.handshake_only(stream, addr, rng).await?; + let (stream, _connect_ms, upstream_egress) = self.connect_tcp(addr).await?; + let hs = self.handshake_only(stream, addr, upstream_egress, rng).await?; let writer_id = self.next_writer_id.fetch_add(1, Ordering::Relaxed); let generation = self.current_generation(); diff --git a/src/transport/mod.rs b/src/transport/mod.rs index ead0565..cba5465 100644 --- a/src/transport/mod.rs +++ b/src/transport/mod.rs @@ -14,5 +14,5 @@ pub use socket::*; #[allow(unused_imports)] pub use socks::*; #[allow(unused_imports)] -pub use upstream::{DcPingResult, StartupPingResult, UpstreamManager}; +pub use upstream::{DcPingResult, StartupPingResult, UpstreamEgressInfo, UpstreamManager, UpstreamRouteKind}; pub mod middle_proxy; diff --git a/src/transport/socks.rs b/src/transport/socks.rs index 8196b52..5369787 100644 --- a/src/transport/socks.rs +++ b/src/transport/socks.rs @@ -5,11 +5,16 @@ use tokio::io::{AsyncReadExt, AsyncWriteExt}; use tokio::net::TcpStream; use crate::error::{ProxyError, Result}; +#[derive(Debug, Clone, Copy)] +pub struct SocksBoundAddr { + pub addr: SocketAddr, +} + pub async fn connect_socks4( stream: &mut TcpStream, target: SocketAddr, user_id: Option<&str>, -) -> Result<()> { +) -> Result { let ip = match target.ip() { IpAddr::V4(ip) => ip, IpAddr::V6(_) => return Err(ProxyError::Proxy("SOCKS4 does not support IPv6".to_string())), @@ -36,8 +41,13 @@ pub async fn connect_socks4( if resp[1] != 90 { return Err(ProxyError::Proxy(format!("SOCKS4 request rejected: code {}", resp[1]))); } - - Ok(()) + + let bound_port = u16::from_be_bytes([resp[2], resp[3]]); + let bound_ip = IpAddr::from([resp[4], resp[5], resp[6], resp[7]]); + + Ok(SocksBoundAddr { + addr: SocketAddr::new(bound_ip, bound_port), + }) } pub async fn connect_socks5( @@ -45,7 +55,7 @@ pub async fn connect_socks5( target: SocketAddr, username: Option<&str>, password: Option<&str>, -) -> Result<()> { +) -> Result { // 1. Auth negotiation // VER (1) | NMETHODS (1) | METHODS (variable) let mut methods = vec![0u8]; // No auth @@ -122,24 +132,36 @@ pub async fn connect_socks5( return Err(ProxyError::Proxy(format!("SOCKS5 request failed: code {}", head[1]))); } - // Skip address part of response - match head[3] { + // Parse bound address from response. + let bound_addr = match head[3] { 1 => { // IPv4 let mut addr = [0u8; 4 + 2]; stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; + let ip = IpAddr::from([addr[0], addr[1], addr[2], addr[3]]); + let port = u16::from_be_bytes([addr[4], addr[5]]); + SocketAddr::new(ip, port) }, 3 => { // Domain let mut len = [0u8; 1]; stream.read_exact(&mut len).await.map_err(ProxyError::Io)?; let mut addr = vec![0u8; len[0] as usize + 2]; stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; + // Domain-bound response is not useful for KDF IP material. + let port_pos = addr.len().saturating_sub(2); + let port = u16::from_be_bytes([addr[port_pos], addr[port_pos + 1]]); + SocketAddr::new(IpAddr::from([0, 0, 0, 0]), port) }, 4 => { // IPv6 let mut addr = [0u8; 16 + 2]; stream.read_exact(&mut addr).await.map_err(ProxyError::Io)?; + let ip = IpAddr::from(<[u8; 16]>::try_from(&addr[..16]).map_err(|_| { + ProxyError::Proxy("Invalid SOCKS5 IPv6 bound address".to_string()) + })?); + let port = u16::from_be_bytes([addr[16], addr[17]]); + SocketAddr::new(ip, port) }, _ => return Err(ProxyError::Proxy("Invalid address type in SOCKS5 response".to_string())), - } - - Ok(()) -} \ No newline at end of file + }; + + Ok(SocksBoundAddr { addr: bound_addr }) +} diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index a442597..eff05b8 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -151,6 +151,21 @@ pub struct StartupPingResult { pub both_available: bool, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum UpstreamRouteKind { + Direct, + Socks4, + Socks5, +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct UpstreamEgressInfo { + pub route_kind: UpstreamRouteKind, + pub local_addr: Option, + pub direct_bind_ip: Option, + pub socks_bound_addr: Option, +} + // ============= Upstream Manager ============= #[derive(Clone)] @@ -316,6 +331,17 @@ impl UpstreamManager { /// Connect to target through a selected upstream. pub async fn connect(&self, target: SocketAddr, dc_idx: Option, scope: Option<&str>) -> Result { + let (stream, _) = self.connect_with_details(target, dc_idx, scope).await?; + Ok(stream) + } + + /// Connect to target through a selected upstream and return egress details. + pub async fn connect_with_details( + &self, + target: SocketAddr, + dc_idx: Option, + scope: Option<&str>, + ) -> Result<(TcpStream, UpstreamEgressInfo)> { let idx = self.select_upstream(dc_idx, scope).await .ok_or_else(|| ProxyError::Config("No upstreams available".to_string()))?; @@ -337,7 +363,7 @@ impl UpstreamManager { }; match self.connect_via_upstream(&upstream, target, bind_rr).await { - Ok(stream) => { + Ok((stream, egress)) => { let rtt_ms = start.elapsed().as_secs_f64() * 1000.0; let mut guard = self.upstreams.write().await; if let Some(u) = guard.get_mut(idx) { @@ -351,7 +377,7 @@ impl UpstreamManager { u.dc_latency[di].update(rtt_ms); } } - Ok(stream) + Ok((stream, egress)) }, Err(e) => { let mut guard = self.upstreams.write().await; @@ -373,7 +399,7 @@ impl UpstreamManager { config: &UpstreamConfig, target: SocketAddr, bind_rr: Option>, - ) -> Result { + ) -> Result<(TcpStream, UpstreamEgressInfo)> { match &config.upstream_type { UpstreamType::Direct { interface, bind_addresses } => { let bind_ip = Self::resolve_bind_address( @@ -414,7 +440,16 @@ impl UpstreamManager { return Err(ProxyError::Io(e)); } - Ok(stream) + let local_addr = stream.local_addr().ok(); + Ok(( + stream, + UpstreamEgressInfo { + route_kind: UpstreamRouteKind::Direct, + local_addr, + direct_bind_ip: bind_ip, + socks_bound_addr: None, + }, + )) }, UpstreamType::Socks4 { address, interface, user_id } => { let connect_timeout = Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS); @@ -467,16 +502,30 @@ impl UpstreamManager { .filter(|s| !s.is_empty()); let _user_id: Option<&str> = scope.or(user_id.as_deref()); - match tokio::time::timeout(connect_timeout, connect_socks4(&mut stream, target, _user_id)).await { - Ok(Ok(())) => {} + let bound = match tokio::time::timeout( + connect_timeout, + connect_socks4(&mut stream, target, _user_id), + ) + .await + { + Ok(Ok(bound)) => bound, Ok(Err(e)) => return Err(e), Err(_) => { return Err(ProxyError::ConnectionTimeout { addr: target.to_string(), }); } - } - Ok(stream) + }; + let local_addr = stream.local_addr().ok(); + Ok(( + stream, + UpstreamEgressInfo { + route_kind: UpstreamRouteKind::Socks4, + local_addr, + direct_bind_ip: None, + socks_bound_addr: Some(bound.addr), + }, + )) }, UpstreamType::Socks5 { address, interface, username, password } => { let connect_timeout = Duration::from_secs(DIRECT_CONNECT_TIMEOUT_SECS); @@ -531,21 +580,30 @@ impl UpstreamManager { let _username: Option<&str> = scope.or(username.as_deref()); let _password: Option<&str> = scope.or(password.as_deref()); - match tokio::time::timeout( + let bound = match tokio::time::timeout( connect_timeout, connect_socks5(&mut stream, target, _username, _password), ) .await { - Ok(Ok(())) => {} + Ok(Ok(bound)) => bound, Ok(Err(e)) => return Err(e), Err(_) => { return Err(ProxyError::ConnectionTimeout { addr: target.to_string(), }); } - } - Ok(stream) + }; + let local_addr = stream.local_addr().ok(); + Ok(( + stream, + UpstreamEgressInfo { + route_kind: UpstreamRouteKind::Socks5, + local_addr, + direct_bind_ip: None, + socks_bound_addr: Some(bound.addr), + }, + )) }, } } @@ -777,7 +835,7 @@ impl UpstreamManager { target: SocketAddr, ) -> Result { let start = Instant::now(); - let _stream = self.connect_via_upstream(config, target, bind_rr).await?; + let _ = self.connect_via_upstream(config, target, bind_rr).await?; Ok(start.elapsed().as_secs_f64() * 1000.0) } From 05edbab06ca7fefddecd8083db0efdfecdec167c Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Sat, 28 Feb 2026 01:20:49 +0300 Subject: [PATCH 79/98] Update README.md MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Нашелся тот, кто не смог найти ссылку. --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index e2a898f..093f2cd 100644 --- a/README.md +++ b/README.md @@ -191,6 +191,8 @@ then Ctrl+X -> Y -> Enter to save **5.** In Shell type `systemctl enable telemt` - then telemt will start with system startup, after the network is up +**6.** In Shell type `journalctl -u telemt -n -g "links" --no-pager -o cat | tac` - get the connection links + ## Configuration ### Minimal Configuration for First Start ```toml From 6b8aa7270ecb18c54836f53bc6612d88cedee9be Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 01:54:29 +0300 Subject: [PATCH 80/98] Bind_addresses prio over interfaces --- src/transport/upstream.rs | 79 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index eff05b8..edcf476 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -185,21 +185,82 @@ impl UpstreamManager { } } + #[cfg(unix)] + fn resolve_interface_addrs(name: &str, want_ipv6: bool) -> Vec { + use nix::ifaddrs::getifaddrs; + + let mut out = Vec::new(); + if let Ok(addrs) = getifaddrs() { + for iface in addrs { + if iface.interface_name != name { + continue; + } + if let Some(address) = iface.address { + if let Some(v4) = address.as_sockaddr_in() { + if !want_ipv6 { + out.push(IpAddr::V4(v4.ip())); + } + } else if let Some(v6) = address.as_sockaddr_in6() + && want_ipv6 + { + out.push(IpAddr::V6(v6.ip())); + } + } + } + } + out.sort_unstable(); + out.dedup(); + out + } + fn resolve_bind_address( interface: &Option, bind_addresses: &Option>, target: SocketAddr, rr: Option<&AtomicUsize>, + validate_ip_on_interface: bool, ) -> Option { let want_ipv6 = target.is_ipv6(); if let Some(addrs) = bind_addresses { - let candidates: Vec = addrs + let mut candidates: Vec = addrs .iter() .filter_map(|s| s.parse::().ok()) .filter(|ip| ip.is_ipv6() == want_ipv6) .collect(); + // Explicit bind IP has strict priority over interface auto-selection. + if validate_ip_on_interface + && let Some(iface) = interface + && iface.parse::().is_err() + { + #[cfg(unix)] + { + let iface_addrs = Self::resolve_interface_addrs(iface, want_ipv6); + if !iface_addrs.is_empty() { + candidates.retain(|ip| { + let ok = iface_addrs.contains(ip); + if !ok { + warn!( + interface = %iface, + bind_ip = %ip, + target = %target, + "Configured bind address is not assigned to interface" + ); + } + ok + }); + } else if !candidates.is_empty() { + warn!( + interface = %iface, + target = %target, + "Configured interface has no addresses for target family; falling back to direct connect without bind" + ); + candidates.clear(); + } + } + } + if !candidates.is_empty() { if let Some(counter) = rr { let idx = counter.fetch_add(1, Ordering::Relaxed) % candidates.len(); @@ -207,6 +268,19 @@ impl UpstreamManager { } return candidates.first().copied(); } + + if validate_ip_on_interface + && interface + .as_ref() + .is_some_and(|iface| iface.parse::().is_err()) + { + warn!( + interface = interface.as_deref().unwrap_or(""), + target = %target, + "No valid bind_addresses left for interface; falling back to direct connect without bind" + ); + return None; + } } if let Some(iface) = interface { @@ -407,6 +481,7 @@ impl UpstreamManager { bind_addresses, target, bind_rr.as_deref(), + true, ); let socket = create_outgoing_socket_bound(target, bind_ip)?; @@ -461,6 +536,7 @@ impl UpstreamManager { &None, proxy_addr, bind_rr.as_deref(), + false, ); let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?; @@ -537,6 +613,7 @@ impl UpstreamManager { &None, proxy_addr, bind_rr.as_deref(), + false, ); let socket = create_outgoing_socket_bound(proxy_addr, bind_ip)?; From e0d5561095d7a3d990ec2c26e9c1933cba59643b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 02:19:19 +0300 Subject: [PATCH 81/98] TUNING.md --- docs/TUNING.de.md | 219 ++++++++++++++++++++++++++++++++++++++++++++++ docs/TUNING.en.md | 219 ++++++++++++++++++++++++++++++++++++++++++++++ docs/TUNING.ru.md | 219 ++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 657 insertions(+) create mode 100644 docs/TUNING.de.md create mode 100644 docs/TUNING.en.md create mode 100644 docs/TUNING.ru.md diff --git a/docs/TUNING.de.md b/docs/TUNING.de.md new file mode 100644 index 0000000..8c3c950 --- /dev/null +++ b/docs/TUNING.de.md @@ -0,0 +1,219 @@ +# Telemt Tuning-Leitfaden: Middle-End und Upstreams + +Dieses Dokument beschreibt das aktuelle Laufzeitverhalten für Middle-End (ME) und Upstream-Routing basierend auf: +- `src/config/types.rs` +- `src/config/defaults.rs` +- `src/config/load.rs` +- `src/transport/upstream.rs` + +Die unten angegebenen `Default`-Werte sind Code-Defaults (bei fehlendem Schlüssel), nicht zwingend die Werte aus `config.full.toml`. + +## Middle-End-Parameter + +### 1) ME-Grundmodus, NAT und STUN + +| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel | +|---|---|---:|---|---|---| +| `general.use_middle_proxy` | `bool` | `true` | keine | Aktiviert den ME-Transportmodus. Bei `false` wird Direct-Modus verwendet. | `use_middle_proxy = true` | +| `general.proxy_secret_path` | `Option` | `"proxy-secret"` | Pfad kann `null` sein | Pfad zur Telegram-Infrastrukturdatei `proxy-secret`. | `proxy_secret_path = "proxy-secret"` | +| `general.middle_proxy_nat_ip` | `Option` | `null` | gültige IP bei gesetztem Wert | Manueller Override der öffentlichen NAT-IP für ME-Adressmaterial. | `middle_proxy_nat_ip = "203.0.113.10"` | +| `general.middle_proxy_nat_probe` | `bool` | `true` | wird auf `true` erzwungen, wenn `use_middle_proxy=true` | Aktiviert NAT-Probing für ME. | `middle_proxy_nat_probe = true` | +| `general.stun_nat_probe_concurrency` | `usize` | `8` | muss `> 0` sein | Maximale parallele STUN-Probes während NAT-Erkennung. | `stun_nat_probe_concurrency = 16` | +| `network.stun_use` | `bool` | `true` | keine | Globaler STUN-Schalter. Bei `false` wird STUN deaktiviert. | `stun_use = true` | +| `network.stun_servers` | `Vec` | integrierter öffentlicher Pool | Duplikate/leer werden entfernt | Primäre STUN-Serverliste für NAT/Public-Endpoint-Erkennung. | `stun_servers = ["stun1.l.google.com:19302"]` | +| `network.stun_tcp_fallback` | `bool` | `true` | keine | Aktiviert TCP-Fallback, wenn UDP-STUN blockiert ist. | `stun_tcp_fallback = true` | +| `network.http_ip_detect_urls` | `Vec` | `ifconfig.me` + `api.ipify.org` | keine | HTTP-Fallback zur öffentlichen IPv4-Erkennung, falls STUN ausfällt. | `http_ip_detect_urls = ["https://api.ipify.org"]` | +| `general.stun_iface_mismatch_ignore` | `bool` | `false` | keine | Reserviertes Feld in der aktuellen Revision (derzeit kein aktiver Runtime-Verbrauch). | `stun_iface_mismatch_ignore = false` | +| `timeouts.me_one_retry` | `u8` | `12` | keine | Anzahl schneller Reconnect-Versuche bei Single-Endpoint-DC-Fällen. | `me_one_retry = 6` | +| `timeouts.me_one_timeout_ms` | `u64` | `1200` | keine | Timeout pro schnellem Einzelversuch (ms). | `me_one_timeout_ms = 1500` | + +### 2) Poolgröße, Keepalive und Reconnect-Policy + +| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel | +|---|---|---:|---|---|---| +| `general.middle_proxy_pool_size` | `usize` | `8` | keine | Zielgröße des aktiven ME-Writer-Pools. | `middle_proxy_pool_size = 12` | +| `general.middle_proxy_warm_standby` | `usize` | `16` | keine | Reserviertes Kompatibilitätsfeld in der aktuellen Revision (kein aktiver Runtime-Consumer). | `middle_proxy_warm_standby = 16` | +| `general.me_keepalive_enabled` | `bool` | `true` | keine | Aktiviert periodischen ME-Keepalive/Ping-Traffic. | `me_keepalive_enabled = true` | +| `general.me_keepalive_interval_secs` | `u64` | `25` | keine | Basisintervall für Keepalive (Sekunden). | `me_keepalive_interval_secs = 20` | +| `general.me_keepalive_jitter_secs` | `u64` | `5` | keine | Keepalive-Jitter zur Vermeidung synchroner Peaks. | `me_keepalive_jitter_secs = 3` | +| `general.me_keepalive_payload_random` | `bool` | `true` | keine | Randomisiert Keepalive-Payload-Bytes. | `me_keepalive_payload_random = true` | +| `general.me_warmup_stagger_enabled` | `bool` | `true` | keine | Aktiviert gestaffeltes Warmup zusätzlicher ME-Verbindungen. | `me_warmup_stagger_enabled = true` | +| `general.me_warmup_step_delay_ms` | `u64` | `500` | keine | Basisverzögerung zwischen Warmup-Schritten (ms). | `me_warmup_step_delay_ms = 300` | +| `general.me_warmup_step_jitter_ms` | `u64` | `300` | keine | Zusätzlicher zufälliger Warmup-Jitter (ms). | `me_warmup_step_jitter_ms = 200` | +| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | keine | Begrenzung paralleler Reconnect-Worker pro DC. | `me_reconnect_max_concurrent_per_dc = 12` | +| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | keine | Initiales Reconnect-Backoff (ms). | `me_reconnect_backoff_base_ms = 250` | +| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | keine | Maximales Reconnect-Backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` | +| `general.me_reconnect_fast_retry_count` | `u32` | `16` | keine | Budget für Sofort-Retries vor längerem Backoff. | `me_reconnect_fast_retry_count = 8` | + +### 3) Reinit/Hardswap, Secret-Rotation und Degradation + +| Parameter | Typ | Default | Einschränkungen / Validierung | Laufzeiteffekt | Beispiel | +|---|---|---:|---|---|---| +| `general.hardswap` | `bool` | `true` | keine | Aktiviert generation-basierte Hardswap-Strategie für den ME-Pool. | `hardswap = true` | +| `general.me_reinit_every_secs` | `u64` | `900` | muss `> 0` sein | Intervall für periodische ME-Reinitialisierung. | `me_reinit_every_secs = 600` | +| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | muss `<= me_hardswap_warmup_delay_max_ms` sein | Untere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_min_ms = 500` | +| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | muss `> 0` sein | Obere Grenze für Warmup-Dial-Abstände. | `me_hardswap_warmup_delay_max_ms = 1200` | +| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | Bereich `[0,10]` | Zusätzliche Warmup-Pässe nach dem Basispass. | `me_hardswap_warmup_extra_passes = 2` | +| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | muss `> 0` sein | Basis-Backoff zwischen zusätzlichen Warmup-Pässen. | `me_hardswap_warmup_pass_backoff_base_ms = 400` | +| `general.me_config_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer ME-Config-Snapshots vor Apply. | `me_config_stable_snapshots = 3` | +| `general.me_config_apply_cooldown_secs` | `u64` | `300` | keine | Cooldown zwischen angewendeten ME-Map-Updates. | `me_config_apply_cooldown_secs = 120` | +| `general.proxy_secret_stable_snapshots` | `u8` | `2` | muss `> 0` sein | Anzahl identischer Secret-Snapshots vor Rotation. | `proxy_secret_stable_snapshots = 3` | +| `general.proxy_secret_rotate_runtime` | `bool` | `true` | keine | Aktiviert Runtime-Rotation des Proxy-Secrets. | `proxy_secret_rotate_runtime = true` | +| `general.proxy_secret_len_max` | `usize` | `256` | Bereich `[32,4096]` | Obergrenze für akzeptierte Secret-Länge. | `proxy_secret_len_max = 512` | +| `general.update_every` | `Option` | `300` | wenn gesetzt: `> 0`; bei `null`: Legacy-Min-Fallback | Einheitliches Refresh-Intervall für ME-Config + Secret-Updater. | `update_every = 300` | +| `general.me_pool_drain_ttl_secs` | `u64` | `90` | keine | Zeitraum, in dem stale Writer noch als Fallback zulässig sind. | `me_pool_drain_ttl_secs = 120` | +| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | Bereich `[0.0,1.0]` | Coverage-Schwelle vor Drain der alten Generation. | `me_pool_min_fresh_ratio = 0.9` | +| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = kein Force-Close; wenn `>0 && < TTL`, dann auf TTL angehoben | Force-Close-Timeout für draining stale Writer. | `me_reinit_drain_timeout_secs = 0` | +| `general.auto_degradation_enabled` | `bool` | `true` | keine | Reserviertes Kompatibilitätsfeld in aktueller Revision (kein aktiver Runtime-Consumer). | `auto_degradation_enabled = true` | +| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | keine | Reservierter Kompatibilitäts-Schwellenwert in aktueller Revision (kein aktiver Runtime-Consumer). | `degradation_min_unavailable_dc_groups = 2` | + +## Deprecated / Legacy Parameter + +| Parameter | Status | Ersatz | Aktuelles Verhalten | Migrationshinweis | +|---|---|---|---|---| +| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Wert nach `network.stun_servers` verschieben, Legacy-Key entfernen. | +| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Wird nur dann in `network.stun_servers` gemerged, wenn `network.stun_servers` nicht explizit gesetzt ist. | Werte nach `network.stun_servers` verschieben, Legacy-Key entfernen. | +| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. | +| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Nur aktiv, wenn `update_every = null` (Legacy-Fallback). | `general.update_every` explizit setzen, Legacy-Key entfernen. | + +## Wie Upstreams konfiguriert werden + +### Upstream-Schema + +| Feld | Gilt für | Typ | Pflicht | Default | Bedeutung | +|---|---|---|---|---|---| +| `[[upstreams]].type` | alle Upstreams | `"direct" \| "socks4" \| "socks5"` | ja | n/a | Upstream-Transporttyp. | +| `[[upstreams]].weight` | alle Upstreams | `u16` | nein | `1` | Basisgewicht für weighted-random Auswahl. | +| `[[upstreams]].enabled` | alle Upstreams | `bool` | nein | `true` | Deaktivierte Einträge werden beim Start ignoriert. | +| `[[upstreams]].scopes` | alle Upstreams | `String` | nein | `""` | Komma-separierte Scope-Tags für Request-Routing. | +| `interface` | `direct` | `Option` | nein | `null` | Interface-Name (z. B. `eth0`) oder lokale Literal-IP. | +| `bind_addresses` | `direct` | `Option>` | nein | `null` | Explizite Source-IP-Kandidaten (strikter Vorrang vor `interface`). | +| `address` | `socks4` | `String` | ja | n/a | SOCKS4-Server (`ip:port` oder `host:port`). | +| `interface` | `socks4` | `Option` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. | +| `user_id` | `socks4` | `Option` | nein | `null` | SOCKS4 User-ID für CONNECT. | +| `address` | `socks5` | `String` | ja | n/a | SOCKS5-Server (`ip:port` oder `host:port`). | +| `interface` | `socks5` | `Option` | nein | `null` | Wird nur genutzt, wenn `address` als `ip:port` angegeben ist. | +| `username` | `socks5` | `Option` | nein | `null` | SOCKS5 Benutzername. | +| `password` | `socks5` | `Option` | nein | `null` | SOCKS5 Passwort. | + +### Runtime-Regeln (wichtig) + +1. Wenn `[[upstreams]]` fehlt, injiziert der Loader einen Default-`direct`-Upstream. +2. Scope-Filterung basiert auf exaktem Token-Match: +- mit Request-Scope -> nur Einträge, deren `scopes` genau dieses Token enthält; +- ohne Request-Scope -> nur Einträge mit leerem `scopes`. +3. Unter healthy Upstreams erfolgt die Auswahl per weighted random: `weight * latency_factor`. +4. Gibt es im gefilterten Set keinen healthy Upstream, wird zufällig aus dem gefilterten Set gewählt. +5. `direct`-Bind-Auflösung: +- zuerst `bind_addresses` (nur gleiche IP-Familie wie Target); +- bei `interface` (Name) + `bind_addresses` wird jede Candidate-IP gegen Interface-Adressen validiert; +- ungültige Kandidaten werden mit `WARN` verworfen; +- bleiben keine gültigen Kandidaten übrig, erfolgt unbound direct connect (`bind_ip=None`); +- wenn `bind_addresses` nicht passt, wird `interface` verwendet (Literal-IP oder Interface-Primäradresse). +6. Für `socks4/socks5` mit Hostname-`address` ist Interface-Binding nicht unterstützt und wird mit Warnung ignoriert. +7. Runtime DNS Overrides werden für Hostname-Auflösung bei Upstream-Verbindungen genutzt. +8. Im ME-Modus wird der gewählte Upstream auch für den ME-TCP-Dial-Pfad verwendet. +9. Im ME-Modus ist bei `direct` mit bind/interface die STUN-Reflection bind-aware für KDF-Adressmaterial. +10. Im ME-Modus werden bei SOCKS-Upstream `BND.ADDR/BND.PORT` für KDF verwendet, wenn gültig/öffentlich und gleiche IP-Familie. + +## Upstream-Konfigurationsbeispiele + +### Beispiel 1: Minimaler direct Upstream + +```toml +[[upstreams]] +type = "direct" +weight = 1 +enabled = true +``` + +### Beispiel 2: direct mit Interface + expliziten bind IPs + +```toml +[[upstreams]] +type = "direct" +interface = "eth0" +bind_addresses = ["192.168.1.100", "192.168.1.101"] +weight = 3 +enabled = true +``` + +### Beispiel 3: SOCKS5 Upstream mit Authentifizierung + +```toml +[[upstreams]] +type = "socks5" +address = "198.51.100.30:1080" +username = "proxy-user" +password = "proxy-pass" +weight = 2 +enabled = true +``` + +### Beispiel 4: Gemischte Upstreams mit Scopes + +```toml +[[upstreams]] +type = "direct" +weight = 5 +enabled = true +scopes = "" + +[[upstreams]] +type = "socks5" +address = "203.0.113.40:1080" +username = "edge" +password = "edgepass" +weight = 3 +enabled = true +scopes = "premium,me" +``` + +### Beispiel 5: ME-orientiertes Tuning-Profil + +```toml +[general] +use_middle_proxy = true +proxy_secret_path = "proxy-secret" +middle_proxy_nat_probe = true +stun_nat_probe_concurrency = 16 +middle_proxy_pool_size = 12 +me_keepalive_enabled = true +me_keepalive_interval_secs = 20 +me_keepalive_jitter_secs = 4 +me_reconnect_max_concurrent_per_dc = 12 +me_reconnect_backoff_base_ms = 300 +me_reconnect_backoff_cap_ms = 10000 +me_reconnect_fast_retry_count = 10 +hardswap = true +me_reinit_every_secs = 600 +me_hardswap_warmup_delay_min_ms = 500 +me_hardswap_warmup_delay_max_ms = 1200 +me_hardswap_warmup_extra_passes = 2 +me_hardswap_warmup_pass_backoff_base_ms = 400 +me_config_stable_snapshots = 3 +me_config_apply_cooldown_secs = 120 +proxy_secret_stable_snapshots = 3 +proxy_secret_rotate_runtime = true +proxy_secret_len_max = 512 +update_every = 300 +me_pool_drain_ttl_secs = 120 +me_pool_min_fresh_ratio = 0.9 +me_reinit_drain_timeout_secs = 180 + +[timeouts] +me_one_retry = 8 +me_one_timeout_ms = 1200 + +[network] +stun_use = true +stun_tcp_fallback = true +stun_servers = [ + "stun1.l.google.com:19302", + "stun2.l.google.com:19302" +] +http_ip_detect_urls = [ + "https://api.ipify.org", + "https://ifconfig.me/ip" +] +``` diff --git a/docs/TUNING.en.md b/docs/TUNING.en.md new file mode 100644 index 0000000..1bbc439 --- /dev/null +++ b/docs/TUNING.en.md @@ -0,0 +1,219 @@ +# Telemt Tuning Guide: Middle-End and Upstreams + +This document describes the current runtime behavior for Middle-End (ME) and upstream routing based on: +- `src/config/types.rs` +- `src/config/defaults.rs` +- `src/config/load.rs` +- `src/transport/upstream.rs` + +Defaults below are code defaults (used when a key is omitted), not necessarily values from `config.full.toml` examples. + +## Middle-End Parameters + +### 1) Core ME mode, NAT, and STUN + +| Parameter | Type | Default | Constraints / validation | Runtime effect | Example | +|---|---|---:|---|---|---| +| `general.use_middle_proxy` | `bool` | `true` | none | Enables ME transport mode. If `false`, Direct mode is used. | `use_middle_proxy = true` | +| `general.proxy_secret_path` | `Option` | `"proxy-secret"` | path may be `null` | Path to Telegram infrastructure proxy-secret file. | `proxy_secret_path = "proxy-secret"` | +| `general.middle_proxy_nat_ip` | `Option` | `null` | valid IP when set | Manual public NAT IP override for ME address material. | `middle_proxy_nat_ip = "203.0.113.10"` | +| `general.middle_proxy_nat_probe` | `bool` | `true` | auto-forced to `true` when `use_middle_proxy=true` | Enables ME NAT probing. | `middle_proxy_nat_probe = true` | +| `general.stun_nat_probe_concurrency` | `usize` | `8` | must be `> 0` | Max parallel STUN probes during NAT discovery. | `stun_nat_probe_concurrency = 16` | +| `network.stun_use` | `bool` | `true` | none | Global STUN switch. If `false`, STUN probing is disabled. | `stun_use = true` | +| `network.stun_servers` | `Vec` | built-in public pool | deduplicated + empty values removed | Primary STUN server list for NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` | +| `network.stun_tcp_fallback` | `bool` | `true` | none | Enables TCP fallback path when UDP STUN is blocked. | `stun_tcp_fallback = true` | +| `network.http_ip_detect_urls` | `Vec` | `ifconfig.me` + `api.ipify.org` | none | HTTP fallback for public IPv4 detection if STUN is unavailable. | `http_ip_detect_urls = ["https://api.ipify.org"]` | +| `general.stun_iface_mismatch_ignore` | `bool` | `false` | none | Reserved flag in current revision (not consumed by runtime path). | `stun_iface_mismatch_ignore = false` | +| `timeouts.me_one_retry` | `u8` | `12` | none | Fast reconnect attempts for single-endpoint DC cases. | `me_one_retry = 6` | +| `timeouts.me_one_timeout_ms` | `u64` | `1200` | none | Timeout per quick single-endpoint attempt (ms). | `me_one_timeout_ms = 1500` | + +### 2) Pool size, keepalive, and reconnect policy + +| Parameter | Type | Default | Constraints / validation | Runtime effect | Example | +|---|---|---:|---|---|---| +| `general.middle_proxy_pool_size` | `usize` | `8` | none | Target active ME writer pool size. | `middle_proxy_pool_size = 12` | +| `general.middle_proxy_warm_standby` | `usize` | `16` | none | Reserved compatibility field in current revision (no active runtime consumer). | `middle_proxy_warm_standby = 16` | +| `general.me_keepalive_enabled` | `bool` | `true` | none | Enables periodic ME keepalive/ping traffic. | `me_keepalive_enabled = true` | +| `general.me_keepalive_interval_secs` | `u64` | `25` | none | Base keepalive interval (seconds). | `me_keepalive_interval_secs = 20` | +| `general.me_keepalive_jitter_secs` | `u64` | `5` | none | Keepalive jitter to avoid synchronization bursts. | `me_keepalive_jitter_secs = 3` | +| `general.me_keepalive_payload_random` | `bool` | `true` | none | Randomizes keepalive payload bytes. | `me_keepalive_payload_random = true` | +| `general.me_warmup_stagger_enabled` | `bool` | `true` | none | Staggers extra ME warmup dials to avoid spikes. | `me_warmup_stagger_enabled = true` | +| `general.me_warmup_step_delay_ms` | `u64` | `500` | none | Base delay between warmup dial steps (ms). | `me_warmup_step_delay_ms = 300` | +| `general.me_warmup_step_jitter_ms` | `u64` | `300` | none | Additional random delay for warmup steps (ms). | `me_warmup_step_jitter_ms = 200` | +| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | none | Limits concurrent reconnect workers per DC in health recovery. | `me_reconnect_max_concurrent_per_dc = 12` | +| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | none | Initial reconnect backoff (ms). | `me_reconnect_backoff_base_ms = 250` | +| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | none | Maximum reconnect backoff (ms). | `me_reconnect_backoff_cap_ms = 10000` | +| `general.me_reconnect_fast_retry_count` | `u32` | `16` | none | Immediate retry budget before long backoff behavior. | `me_reconnect_fast_retry_count = 8` | + +### 3) Reinit/hardswap, secret rotation, and degradation + +| Parameter | Type | Default | Constraints / validation | Runtime effect | Example | +|---|---|---:|---|---|---| +| `general.hardswap` | `bool` | `true` | none | Enables generation-based ME hardswap strategy. | `hardswap = true` | +| `general.me_reinit_every_secs` | `u64` | `900` | must be `> 0` | Periodic ME reinit interval. | `me_reinit_every_secs = 600` | +| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | must be `<= me_hardswap_warmup_delay_max_ms` | Lower bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_min_ms = 500` | +| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | must be `> 0` | Upper bound for hardswap warmup dial spacing. | `me_hardswap_warmup_delay_max_ms = 1200` | +| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | must be within `[0,10]` | Additional warmup passes after base pass. | `me_hardswap_warmup_extra_passes = 2` | +| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | must be `> 0` | Base backoff between extra warmup passes. | `me_hardswap_warmup_pass_backoff_base_ms = 400` | +| `general.me_config_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical ME config snapshots required before apply. | `me_config_stable_snapshots = 3` | +| `general.me_config_apply_cooldown_secs` | `u64` | `300` | none | Cooldown between applied ME map updates. | `me_config_apply_cooldown_secs = 120` | +| `general.proxy_secret_stable_snapshots` | `u8` | `2` | must be `> 0` | Number of identical proxy-secret snapshots required before rotation. | `proxy_secret_stable_snapshots = 3` | +| `general.proxy_secret_rotate_runtime` | `bool` | `true` | none | Enables runtime proxy-secret rotation. | `proxy_secret_rotate_runtime = true` | +| `general.proxy_secret_len_max` | `usize` | `256` | must be within `[32,4096]` | Upper limit for accepted proxy-secret length. | `proxy_secret_len_max = 512` | +| `general.update_every` | `Option` | `300` | if set: must be `> 0`; if `null`: legacy min fallback | Unified refresh interval for ME config + secret updater. | `update_every = 300` | +| `general.me_pool_drain_ttl_secs` | `u64` | `90` | none | Time window where stale writers remain fallback-eligible. | `me_pool_drain_ttl_secs = 120` | +| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | must be within `[0.0,1.0]` | Coverage threshold before stale generation can be drained. | `me_pool_min_fresh_ratio = 0.9` | +| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` means no force-close; if `>0 && < TTL` it is bumped to TTL | Force-close timeout for draining stale writers. | `me_reinit_drain_timeout_secs = 0` | +| `general.auto_degradation_enabled` | `bool` | `true` | none | Reserved compatibility flag in current revision (no active runtime consumer). | `auto_degradation_enabled = true` | +| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | none | Reserved compatibility threshold in current revision (no active runtime consumer). | `degradation_min_unavailable_dc_groups = 2` | + +## Deprecated / Legacy Parameters + +| Parameter | Status | Replacement | Current behavior | Migration recommendation | +|---|---|---|---|---| +| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move value into `network.stun_servers` and remove legacy key. | +| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Merged into `network.stun_servers` only when `network.stun_servers` is not explicitly set. | Move values into `network.stun_servers` and remove legacy key. | +| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. | +| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Used only when `update_every = null` (legacy fallback path). | Set `general.update_every` explicitly and remove legacy key. | + +## How Upstreams Are Configured + +### Upstream schema + +| Field | Applies to | Type | Required | Default | Meaning | +|---|---|---|---|---|---| +| `[[upstreams]].type` | all upstreams | `"direct" \| "socks4" \| "socks5"` | yes | n/a | Upstream transport type. | +| `[[upstreams]].weight` | all upstreams | `u16` | no | `1` | Base weight for weighted-random selection. | +| `[[upstreams]].enabled` | all upstreams | `bool` | no | `true` | Disabled entries are ignored at startup. | +| `[[upstreams]].scopes` | all upstreams | `String` | no | `""` | Comma-separated scope tags for request-level routing. | +| `interface` | `direct` | `Option` | no | `null` | Interface name (e.g. `eth0`) or literal local IP for bind selection. | +| `bind_addresses` | `direct` | `Option>` | no | `null` | Explicit local source IP candidates (strict priority over `interface`). | +| `address` | `socks4` | `String` | yes | n/a | SOCKS4 server endpoint (`ip:port` or `host:port`). | +| `interface` | `socks4` | `Option` | no | `null` | Used only for SOCKS server `ip:port` dial path. | +| `user_id` | `socks4` | `Option` | no | `null` | SOCKS4 user ID for CONNECT request. | +| `address` | `socks5` | `String` | yes | n/a | SOCKS5 server endpoint (`ip:port` or `host:port`). | +| `interface` | `socks5` | `Option` | no | `null` | Used only for SOCKS server `ip:port` dial path. | +| `username` | `socks5` | `Option` | no | `null` | SOCKS5 username auth. | +| `password` | `socks5` | `Option` | no | `null` | SOCKS5 password auth. | + +### Runtime rules (important) + +1. If `[[upstreams]]` is omitted, loader injects one default `direct` upstream. +2. Scope filtering is exact-token based: +- when request scope is set -> only entries whose `scopes` contains that exact token; +- when request scope is not set -> only entries with empty `scopes`. +3. Healthy upstreams are selected by weighted random using: `weight * latency_factor`. +4. If no healthy upstream exists in filtered set, random selection is used among filtered entries. +5. `direct` bind resolution order: +- `bind_addresses` candidates (same IP family as target) first; +- if `interface` is an interface name and `bind_addresses` is set, each candidate IP is validated against addresses currently assigned to that interface; +- invalid candidates are dropped with `WARN`; +- if no valid candidate remains, connection falls back to unbound direct connect (`bind_ip=None`); +- if no `bind_addresses` candidate, `interface` is used (literal IP or resolved interface primary IP). +6. For `socks4/socks5` with `address` as hostname, interface binding is not supported and is ignored with warning. +7. Runtime DNS overrides are used for upstream hostname resolution. +8. In ME mode, the selected upstream is also used for ME TCP dial path. +9. In ME mode for `direct` upstream with bind/interface, STUN reflection logic is bind-aware for KDF source material. +10. In ME mode for SOCKS upstream, SOCKS `BND.ADDR/BND.PORT` is used for KDF when it is valid/public for the same family. + +## Upstream Configuration Examples + +### Example 1: Minimal direct upstream + +```toml +[[upstreams]] +type = "direct" +weight = 1 +enabled = true +``` + +### Example 2: Direct with interface + explicit bind addresses + +```toml +[[upstreams]] +type = "direct" +interface = "eth0" +bind_addresses = ["192.168.1.100", "192.168.1.101"] +weight = 3 +enabled = true +``` + +### Example 3: SOCKS5 upstream with authentication + +```toml +[[upstreams]] +type = "socks5" +address = "198.51.100.30:1080" +username = "proxy-user" +password = "proxy-pass" +weight = 2 +enabled = true +``` + +### Example 4: Mixed upstreams with scopes + +```toml +[[upstreams]] +type = "direct" +weight = 5 +enabled = true +scopes = "" + +[[upstreams]] +type = "socks5" +address = "203.0.113.40:1080" +username = "edge" +password = "edgepass" +weight = 3 +enabled = true +scopes = "premium,me" +``` + +### Example 5: ME-focused tuning profile + +```toml +[general] +use_middle_proxy = true +proxy_secret_path = "proxy-secret" +middle_proxy_nat_probe = true +stun_nat_probe_concurrency = 16 +middle_proxy_pool_size = 12 +me_keepalive_enabled = true +me_keepalive_interval_secs = 20 +me_keepalive_jitter_secs = 4 +me_reconnect_max_concurrent_per_dc = 12 +me_reconnect_backoff_base_ms = 300 +me_reconnect_backoff_cap_ms = 10000 +me_reconnect_fast_retry_count = 10 +hardswap = true +me_reinit_every_secs = 600 +me_hardswap_warmup_delay_min_ms = 500 +me_hardswap_warmup_delay_max_ms = 1200 +me_hardswap_warmup_extra_passes = 2 +me_hardswap_warmup_pass_backoff_base_ms = 400 +me_config_stable_snapshots = 3 +me_config_apply_cooldown_secs = 120 +proxy_secret_stable_snapshots = 3 +proxy_secret_rotate_runtime = true +proxy_secret_len_max = 512 +update_every = 300 +me_pool_drain_ttl_secs = 120 +me_pool_min_fresh_ratio = 0.9 +me_reinit_drain_timeout_secs = 180 + +[timeouts] +me_one_retry = 8 +me_one_timeout_ms = 1200 + +[network] +stun_use = true +stun_tcp_fallback = true +stun_servers = [ + "stun1.l.google.com:19302", + "stun2.l.google.com:19302" +] +http_ip_detect_urls = [ + "https://api.ipify.org", + "https://ifconfig.me/ip" +] +``` diff --git a/docs/TUNING.ru.md b/docs/TUNING.ru.md new file mode 100644 index 0000000..48a2b6c --- /dev/null +++ b/docs/TUNING.ru.md @@ -0,0 +1,219 @@ +# Руководство по тюнингу Telemt: Middle-End и Upstreams + +Документ описывает актуальное поведение Middle-End (ME) и маршрутизации через upstream на основе: +- `src/config/types.rs` +- `src/config/defaults.rs` +- `src/config/load.rs` +- `src/transport/upstream.rs` + +Значения `Default` ниже — это значения из кода при отсутствии ключа в конфиге, а не обязательно значения из примеров `config.full.toml`. + +## Параметры Middle-End + +### 1) Базовый режим ME, NAT и STUN + +| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример | +|---|---|---:|---|---|---| +| `general.use_middle_proxy` | `bool` | `true` | нет | Включает транспорт ME. При `false` используется Direct-режим. | `use_middle_proxy = true` | +| `general.proxy_secret_path` | `Option` | `"proxy-secret"` | путь может быть `null` | Путь к инфраструктурному proxy-secret Telegram. | `proxy_secret_path = "proxy-secret"` | +| `general.middle_proxy_nat_ip` | `Option` | `null` | валидный IP при задании | Ручной override публичного NAT IP для адресного материала ME. | `middle_proxy_nat_ip = "203.0.113.10"` | +| `general.middle_proxy_nat_probe` | `bool` | `true` | авто-принудительно `true`, если `use_middle_proxy=true` | Включает NAT probing для ME. | `middle_proxy_nat_probe = true` | +| `general.stun_nat_probe_concurrency` | `usize` | `8` | должно быть `> 0` | Максимум параллельных STUN-проб при NAT-детекте. | `stun_nat_probe_concurrency = 16` | +| `network.stun_use` | `bool` | `true` | нет | Глобальный переключатель STUN. При `false` STUN отключен. | `stun_use = true` | +| `network.stun_servers` | `Vec` | встроенный публичный пул | удаляются дубликаты и пустые значения | Основной список STUN-серверов для NAT/public endpoint discovery. | `stun_servers = ["stun1.l.google.com:19302"]` | +| `network.stun_tcp_fallback` | `bool` | `true` | нет | Включает TCP fallback, если UDP STUN недоступен. | `stun_tcp_fallback = true` | +| `network.http_ip_detect_urls` | `Vec` | `ifconfig.me` + `api.ipify.org` | нет | HTTP fallback для определения публичного IPv4 при недоступности STUN. | `http_ip_detect_urls = ["https://api.ipify.org"]` | +| `general.stun_iface_mismatch_ignore` | `bool` | `false` | нет | Зарезервированный флаг в текущей ревизии (runtime его не использует). | `stun_iface_mismatch_ignore = false` | +| `timeouts.me_one_retry` | `u8` | `12` | нет | Количество быстрых reconnect-попыток для DC с одним endpoint. | `me_one_retry = 6` | +| `timeouts.me_one_timeout_ms` | `u64` | `1200` | нет | Таймаут одной быстрой попытки (мс). | `me_one_timeout_ms = 1500` | + +### 2) Размер пула, keepalive и reconnect-политика + +| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример | +|---|---|---:|---|---|---| +| `general.middle_proxy_pool_size` | `usize` | `8` | нет | Целевой размер активного пула ME-writer соединений. | `middle_proxy_pool_size = 12` | +| `general.middle_proxy_warm_standby` | `usize` | `16` | нет | Зарезервированное поле совместимости в текущей ревизии (активного runtime-consumer нет). | `middle_proxy_warm_standby = 16` | +| `general.me_keepalive_enabled` | `bool` | `true` | нет | Включает периодические keepalive/ping кадры ME. | `me_keepalive_enabled = true` | +| `general.me_keepalive_interval_secs` | `u64` | `25` | нет | Базовый интервал keepalive (сек). | `me_keepalive_interval_secs = 20` | +| `general.me_keepalive_jitter_secs` | `u64` | `5` | нет | Джиттер keepalive для предотвращения синхронных всплесков. | `me_keepalive_jitter_secs = 3` | +| `general.me_keepalive_payload_random` | `bool` | `true` | нет | Рандомизирует payload keepalive-кадров. | `me_keepalive_payload_random = true` | +| `general.me_warmup_stagger_enabled` | `bool` | `true` | нет | Включает staggered warmup дополнительных ME-коннектов. | `me_warmup_stagger_enabled = true` | +| `general.me_warmup_step_delay_ms` | `u64` | `500` | нет | Базовая задержка между шагами warmup (мс). | `me_warmup_step_delay_ms = 300` | +| `general.me_warmup_step_jitter_ms` | `u64` | `300` | нет | Дополнительный случайный warmup-джиттер (мс). | `me_warmup_step_jitter_ms = 200` | +| `general.me_reconnect_max_concurrent_per_dc` | `u32` | `8` | нет | Ограничивает параллельные reconnect worker'ы на один DC. | `me_reconnect_max_concurrent_per_dc = 12` | +| `general.me_reconnect_backoff_base_ms` | `u64` | `500` | нет | Начальный backoff reconnect (мс). | `me_reconnect_backoff_base_ms = 250` | +| `general.me_reconnect_backoff_cap_ms` | `u64` | `30000` | нет | Верхняя граница backoff reconnect (мс). | `me_reconnect_backoff_cap_ms = 10000` | +| `general.me_reconnect_fast_retry_count` | `u32` | `16` | нет | Бюджет быстрых retry до длинного backoff. | `me_reconnect_fast_retry_count = 8` | + +### 3) Reinit/hardswap, ротация секрета и деградация + +| Параметр | Тип | Default | Ограничения / валидация | Влияние на runtime | Пример | +|---|---|---:|---|---|---| +| `general.hardswap` | `bool` | `true` | нет | Включает generation-based стратегию hardswap для ME-пула. | `hardswap = true` | +| `general.me_reinit_every_secs` | `u64` | `900` | должно быть `> 0` | Интервал периодического reinit ME-пула. | `me_reinit_every_secs = 600` | +| `general.me_hardswap_warmup_delay_min_ms` | `u64` | `1000` | должно быть `<= me_hardswap_warmup_delay_max_ms` | Нижняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_min_ms = 500` | +| `general.me_hardswap_warmup_delay_max_ms` | `u64` | `2000` | должно быть `> 0` | Верхняя граница пауз между warmup dial попытками. | `me_hardswap_warmup_delay_max_ms = 1200` | +| `general.me_hardswap_warmup_extra_passes` | `u8` | `3` | диапазон `[0,10]` | Дополнительные warmup-проходы после базового. | `me_hardswap_warmup_extra_passes = 2` | +| `general.me_hardswap_warmup_pass_backoff_base_ms` | `u64` | `500` | должно быть `> 0` | Базовый backoff между extra-pass в warmup. | `me_hardswap_warmup_pass_backoff_base_ms = 400` | +| `general.me_config_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед применением ME map update. | `me_config_stable_snapshots = 3` | +| `general.me_config_apply_cooldown_secs` | `u64` | `300` | нет | Cooldown между применёнными обновлениями ME map. | `me_config_apply_cooldown_secs = 120` | +| `general.proxy_secret_stable_snapshots` | `u8` | `2` | должно быть `> 0` | Количество одинаковых snapshot перед runtime-rotation proxy-secret. | `proxy_secret_stable_snapshots = 3` | +| `general.proxy_secret_rotate_runtime` | `bool` | `true` | нет | Включает runtime-ротацию proxy-secret. | `proxy_secret_rotate_runtime = true` | +| `general.proxy_secret_len_max` | `usize` | `256` | диапазон `[32,4096]` | Верхний лимит длины принимаемого proxy-secret. | `proxy_secret_len_max = 512` | +| `general.update_every` | `Option` | `300` | если задано: `> 0`; если `null`: fallback на legacy минимум | Единый интервал refresh для ME config + secret updater. | `update_every = 300` | +| `general.me_pool_drain_ttl_secs` | `u64` | `90` | нет | Время, когда stale writer ещё может использоваться как fallback. | `me_pool_drain_ttl_secs = 120` | +| `general.me_pool_min_fresh_ratio` | `f32` | `0.8` | диапазон `[0.0,1.0]` | Порог покрытия fresh-поколения перед drain старого поколения. | `me_pool_min_fresh_ratio = 0.9` | +| `general.me_reinit_drain_timeout_secs` | `u64` | `120` | `0` = без force-close; если `>0 && < TTL`, поднимается до TTL | Таймаут force-close для draining stale writer. | `me_reinit_drain_timeout_secs = 0` | +| `general.auto_degradation_enabled` | `bool` | `true` | нет | Зарезервированный флаг совместимости в текущей ревизии (активного runtime-consumer нет). | `auto_degradation_enabled = true` | +| `general.degradation_min_unavailable_dc_groups` | `u8` | `2` | нет | Зарезервированный порог совместимости в текущей ревизии (активного runtime-consumer нет). | `degradation_min_unavailable_dc_groups = 2` | + +## Устаревшие / legacy параметры + +| Параметр | Статус | Замена | Текущее поведение | Рекомендация миграции | +|---|---|---|---|---| +| `general.middle_proxy_nat_stun` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значение в `network.stun_servers`, legacy-ключ удалить. | +| `general.middle_proxy_nat_stun_servers` | Deprecated | `network.stun_servers` | Добавляется в `network.stun_servers`, только если `network.stun_servers` не задан явно. | Перенести значения в `network.stun_servers`, legacy-ключ удалить. | +| `general.proxy_secret_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. | +| `general.proxy_config_auto_reload_secs` | Deprecated | `general.update_every` | Используется только если `update_every = null` (legacy fallback). | Явно задать `general.update_every`, legacy-ключ удалить. | + +## Как конфигурируются Upstreams + +### Схема upstream + +| Поле | Применимость | Тип | Обязательно | Default | Назначение | +|---|---|---|---|---|---| +| `[[upstreams]].type` | все upstream | `"direct" \| "socks4" \| "socks5"` | да | n/a | Тип upstream транспорта. | +| `[[upstreams]].weight` | все upstream | `u16` | нет | `1` | Базовый вес в weighted-random выборе. | +| `[[upstreams]].enabled` | все upstream | `bool` | нет | `true` | Выключенные записи игнорируются на старте. | +| `[[upstreams]].scopes` | все upstream | `String` | нет | `""` | Список scope-токенов через запятую для маршрутизации. | +| `interface` | `direct` | `Option` | нет | `null` | Имя интерфейса (например `eth0`) или literal локальный IP. | +| `bind_addresses` | `direct` | `Option>` | нет | `null` | Явные кандидаты source IP (имеют приоритет над `interface`). | +| `address` | `socks4` | `String` | да | n/a | Адрес SOCKS4 сервера (`ip:port` или `host:port`). | +| `interface` | `socks4` | `Option` | нет | `null` | Используется только если `address` задан как `ip:port`. | +| `user_id` | `socks4` | `Option` | нет | `null` | SOCKS4 user ID в CONNECT-запросе. | +| `address` | `socks5` | `String` | да | n/a | Адрес SOCKS5 сервера (`ip:port` или `host:port`). | +| `interface` | `socks5` | `Option` | нет | `null` | Используется только если `address` задан как `ip:port`. | +| `username` | `socks5` | `Option` | нет | `null` | Логин SOCKS5 auth. | +| `password` | `socks5` | `Option` | нет | `null` | Пароль SOCKS5 auth. | + +### Runtime-правила (важно) + +1. Если `[[upstreams]]` отсутствует, loader добавляет один upstream `direct` по умолчанию. +2. Scope-фильтрация — по точному совпадению токена: +- если scope запроса задан -> используются только записи, где `scopes` содержит такой же токен; +- если scope запроса не задан -> используются только записи с пустым `scopes`. +3. Среди healthy upstream используется weighted-random выбор: `weight * latency_factor`. +4. Если в отфильтрованном наборе нет healthy upstream, выбирается случайный из отфильтрованных. +5. Порядок выбора bind для `direct`: +- сначала `bind_addresses` (только IP нужного семейства); +- если одновременно заданы `interface` (имя) и `bind_addresses`, каждый IP проверяется на принадлежность интерфейсу; +- несовпадающие IP отбрасываются с `WARN`; +- если валидных IP не осталось, используется unbound direct connect (`bind_ip=None`); +- если `bind_addresses` не подходит, применяется `interface` (literal IP или адрес интерфейса). +6. Для `socks4/socks5` с `address` в виде hostname интерфейсный bind не поддерживается и игнорируется с предупреждением. +7. Runtime DNS overrides применяются к резолвингу hostname в upstream-подключениях. +8. В ME-режиме выбранный upstream также используется для ME TCP dial path. +9. В ME-режиме для `direct` upstream с bind/interface STUN-рефлексия выполняется bind-aware для KDF материала. +10. В ME-режиме для SOCKS upstream используются `BND.ADDR/BND.PORT` для KDF, если адрес валиден/публичен и соответствует IP family. + +## Примеры конфигурации Upstreams + +### Пример 1: минимальный direct upstream + +```toml +[[upstreams]] +type = "direct" +weight = 1 +enabled = true +``` + +### Пример 2: direct с interface + явными bind IP + +```toml +[[upstreams]] +type = "direct" +interface = "eth0" +bind_addresses = ["192.168.1.100", "192.168.1.101"] +weight = 3 +enabled = true +``` + +### Пример 3: SOCKS5 upstream с аутентификацией + +```toml +[[upstreams]] +type = "socks5" +address = "198.51.100.30:1080" +username = "proxy-user" +password = "proxy-pass" +weight = 2 +enabled = true +``` + +### Пример 4: смешанные upstream с scopes + +```toml +[[upstreams]] +type = "direct" +weight = 5 +enabled = true +scopes = "" + +[[upstreams]] +type = "socks5" +address = "203.0.113.40:1080" +username = "edge" +password = "edgepass" +weight = 3 +enabled = true +scopes = "premium,me" +``` + +### Пример 5: профиль тюнинга под ME + +```toml +[general] +use_middle_proxy = true +proxy_secret_path = "proxy-secret" +middle_proxy_nat_probe = true +stun_nat_probe_concurrency = 16 +middle_proxy_pool_size = 12 +me_keepalive_enabled = true +me_keepalive_interval_secs = 20 +me_keepalive_jitter_secs = 4 +me_reconnect_max_concurrent_per_dc = 12 +me_reconnect_backoff_base_ms = 300 +me_reconnect_backoff_cap_ms = 10000 +me_reconnect_fast_retry_count = 10 +hardswap = true +me_reinit_every_secs = 600 +me_hardswap_warmup_delay_min_ms = 500 +me_hardswap_warmup_delay_max_ms = 1200 +me_hardswap_warmup_extra_passes = 2 +me_hardswap_warmup_pass_backoff_base_ms = 400 +me_config_stable_snapshots = 3 +me_config_apply_cooldown_secs = 120 +proxy_secret_stable_snapshots = 3 +proxy_secret_rotate_runtime = true +proxy_secret_len_max = 512 +update_every = 300 +me_pool_drain_ttl_secs = 120 +me_pool_min_fresh_ratio = 0.9 +me_reinit_drain_timeout_secs = 180 + +[timeouts] +me_one_retry = 8 +me_one_timeout_ms = 1200 + +[network] +stun_use = true +stun_tcp_fallback = true +stun_servers = [ + "stun1.l.google.com:19302", + "stun2.l.google.com:19302" +] +http_ip_detect_urls = [ + "https://api.ipify.org", + "https://ifconfig.me/ip" +] +``` From bf11ebbaa36f601a2cc187b2b4844e773757ade7 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 02:23:34 +0300 Subject: [PATCH 82/98] Update TUNING.ru.md --- docs/TUNING.ru.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/TUNING.ru.md b/docs/TUNING.ru.md index 48a2b6c..6ea4d69 100644 --- a/docs/TUNING.ru.md +++ b/docs/TUNING.ru.md @@ -96,7 +96,7 @@ | `username` | `socks5` | `Option` | нет | `null` | Логин SOCKS5 auth. | | `password` | `socks5` | `Option` | нет | `null` | Пароль SOCKS5 auth. | -### Runtime-правила (важно) +### Runtime-правила 1. Если `[[upstreams]]` отсутствует, loader добавляет один upstream `direct` по умолчанию. 2. Scope-фильтрация — по точному совпадению токена: From a61882af6e4f7b6f5849043a589436d294f472d5 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 02:55:21 +0300 Subject: [PATCH 83/98] TLS Fetch on unix-socket --- src/main.rs | 7 ++ src/tls_front/fetcher.rs | 201 ++++++++++++++++++++++++++++++++------- 2 files changed, 171 insertions(+), 37 deletions(-) diff --git a/src/main.rs b/src/main.rs index b065d4e..e759095 100644 --- a/src/main.rs +++ b/src/main.rs @@ -285,17 +285,20 @@ async fn main() -> std::result::Result<(), Box> { .mask_host .clone() .unwrap_or_else(|| config.censorship.tls_domain.clone()); + let mask_unix_sock = config.censorship.mask_unix_sock.clone(); let fetch_timeout = Duration::from_secs(5); let cache_initial = cache.clone(); let domains_initial = tls_domains.clone(); let host_initial = mask_host.clone(); + let unix_sock_initial = mask_unix_sock.clone(); let upstream_initial = upstream_manager.clone(); tokio::spawn(async move { let mut join = tokio::task::JoinSet::new(); for domain in domains_initial { let cache_domain = cache_initial.clone(); let host_domain = host_initial.clone(); + let unix_sock_domain = unix_sock_initial.clone(); let upstream_domain = upstream_initial.clone(); join.spawn(async move { match crate::tls_front::fetcher::fetch_real_tls( @@ -305,6 +308,7 @@ async fn main() -> std::result::Result<(), Box> { fetch_timeout, Some(upstream_domain), proxy_protocol, + unix_sock_domain.as_deref(), ) .await { @@ -344,6 +348,7 @@ async fn main() -> std::result::Result<(), Box> { let cache_refresh = cache.clone(); let domains_refresh = tls_domains.clone(); let host_refresh = mask_host.clone(); + let unix_sock_refresh = mask_unix_sock.clone(); let upstream_refresh = upstream_manager.clone(); tokio::spawn(async move { loop { @@ -355,6 +360,7 @@ async fn main() -> std::result::Result<(), Box> { for domain in domains_refresh.clone() { let cache_domain = cache_refresh.clone(); let host_domain = host_refresh.clone(); + let unix_sock_domain = unix_sock_refresh.clone(); let upstream_domain = upstream_refresh.clone(); join.spawn(async move { match crate::tls_front::fetcher::fetch_real_tls( @@ -364,6 +370,7 @@ async fn main() -> std::result::Result<(), Box> { fetch_timeout, Some(upstream_domain), proxy_protocol, + unix_sock_domain.as_deref(), ) .await { diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index ba80332..1731cdc 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -2,8 +2,10 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{Result, anyhow}; -use tokio::io::{AsyncReadExt, AsyncWriteExt}; +use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use tokio::net::TcpStream; +#[cfg(unix)] +use tokio::net::UnixStream; use tokio::time::timeout; use tokio_rustls::client::TlsStream; use tokio_rustls::TlsConnector; @@ -212,7 +214,10 @@ fn gen_key_share(rng: &SecureRandom) -> [u8; 32] { key } -async fn read_tls_record(stream: &mut TcpStream) -> Result<(u8, Vec)> { +async fn read_tls_record(stream: &mut S) -> Result<(u8, Vec)> +where + S: AsyncRead + Unpin, +{ let mut header = [0u8; 5]; stream.read_exact(&mut header).await?; let len = u16::from_be_bytes([header[3], header[4]]) as usize; @@ -345,6 +350,44 @@ async fn connect_with_dns_override( Ok(timeout(connect_timeout, TcpStream::connect((host, port))).await??) } +async fn connect_tcp_with_upstream( + host: &str, + port: u16, + connect_timeout: Duration, + upstream: Option>, +) -> Result { + if let Some(manager) = upstream { + if let Some(addr) = resolve_socket_addr(host, port) { + match manager.connect(addr, None, None).await { + Ok(stream) => return Ok(stream), + Err(e) => { + warn!( + host = %host, + port = port, + error = %e, + "Upstream connect failed, using direct connect" + ); + } + } + } else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { + if let Some(addr) = addrs.find(|a| a.is_ipv4()) { + match manager.connect(addr, None, None).await { + Ok(stream) => return Ok(stream), + Err(e) => { + warn!( + host = %host, + port = port, + error = %e, + "Upstream connect failed, using direct connect" + ); + } + } + } + } + } + connect_with_dns_override(host, port, connect_timeout).await +} + fn encode_tls13_certificate_message(cert_chain_der: &[Vec]) -> Option> { if cert_chain_der.is_empty() { return None; @@ -374,15 +417,15 @@ fn encode_tls13_certificate_message(cert_chain_der: &[Vec]) -> Option( + mut stream: S, sni: &str, connect_timeout: Duration, proxy_protocol: u8, -) -> Result { - let mut stream = connect_with_dns_override(host, port, connect_timeout).await?; - +) -> Result +where + S: AsyncRead + AsyncWrite + Unpin, +{ let rng = SecureRandom::new(); let client_hello = build_client_hello(sni, &rng); timeout(connect_timeout, async { @@ -438,43 +481,61 @@ async fn fetch_via_raw_tls( }) } -async fn fetch_via_rustls( +async fn fetch_via_raw_tls( host: &str, port: u16, sni: &str, connect_timeout: Duration, upstream: Option>, proxy_protocol: u8, + unix_sock: Option<&str>, ) -> Result { - // rustls handshake path for certificate and basic negotiated metadata. - let mut stream = if let Some(manager) = upstream { - if let Some(addr) = resolve_socket_addr(host, port) { - match manager.connect(addr, None, None).await { - Ok(s) => s, - Err(e) => { - warn!(sni = %sni, error = %e, "Upstream connect failed, using direct connect"); - connect_with_dns_override(host, port, connect_timeout).await? - } + #[cfg(unix)] + if let Some(sock_path) = unix_sock { + match timeout(connect_timeout, UnixStream::connect(sock_path)).await { + Ok(Ok(stream)) => { + debug!( + sni = %sni, + sock = %sock_path, + "Raw TLS fetch using mask unix socket" + ); + return fetch_via_raw_tls_stream(stream, sni, connect_timeout, 0).await; } - } else if let Ok(mut addrs) = tokio::net::lookup_host((host, port)).await { - if let Some(addr) = addrs.find(|a| a.is_ipv4()) { - match manager.connect(addr, None, None).await { - Ok(s) => s, - Err(e) => { - warn!(sni = %sni, error = %e, "Upstream connect failed, using direct connect"); - connect_with_dns_override(host, port, connect_timeout).await? - } - } - } else { - connect_with_dns_override(host, port, connect_timeout).await? + Ok(Err(e)) => { + warn!( + sni = %sni, + sock = %sock_path, + error = %e, + "Raw TLS unix socket connect failed, falling back to TCP" + ); + } + Err(_) => { + warn!( + sni = %sni, + sock = %sock_path, + "Raw TLS unix socket connect timed out, falling back to TCP" + ); } - } else { - connect_with_dns_override(host, port, connect_timeout).await? } - } else { - connect_with_dns_override(host, port, connect_timeout).await? - }; + } + #[cfg(not(unix))] + let _ = unix_sock; + + let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?; + fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await +} + +async fn fetch_via_rustls_stream( + mut stream: S, + host: &str, + sni: &str, + proxy_protocol: u8, +) -> Result +where + S: AsyncRead + AsyncWrite + Unpin, +{ + // rustls handshake path for certificate and basic negotiated metadata. if proxy_protocol > 0 { let header = match proxy_protocol { 2 => ProxyProtocolV2Builder::new().build(), @@ -491,7 +552,7 @@ async fn fetch_via_rustls( .or_else(|_| ServerName::try_from(host.to_owned())) .map_err(|_| RustlsError::General("invalid SNI".into()))?; - let tls_stream: TlsStream = connector.connect(server_name, stream).await?; + let tls_stream: TlsStream = connector.connect(server_name, stream).await?; // Extract negotiated parameters and certificates let (_io, session) = tls_stream.get_ref(); @@ -552,6 +613,51 @@ async fn fetch_via_rustls( }) } +async fn fetch_via_rustls( + host: &str, + port: u16, + sni: &str, + connect_timeout: Duration, + upstream: Option>, + proxy_protocol: u8, + unix_sock: Option<&str>, +) -> Result { + #[cfg(unix)] + if let Some(sock_path) = unix_sock { + match timeout(connect_timeout, UnixStream::connect(sock_path)).await { + Ok(Ok(stream)) => { + debug!( + sni = %sni, + sock = %sock_path, + "Rustls fetch using mask unix socket" + ); + return fetch_via_rustls_stream(stream, host, sni, 0).await; + } + Ok(Err(e)) => { + warn!( + sni = %sni, + sock = %sock_path, + error = %e, + "Rustls unix socket connect failed, falling back to TCP" + ); + } + Err(_) => { + warn!( + sni = %sni, + sock = %sock_path, + "Rustls unix socket connect timed out, falling back to TCP" + ); + } + } + } + + #[cfg(not(unix))] + let _ = unix_sock; + + let stream = connect_tcp_with_upstream(host, port, connect_timeout, upstream).await?; + fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await +} + /// Fetch real TLS metadata for the given SNI. /// /// Strategy: @@ -565,8 +671,19 @@ pub async fn fetch_real_tls( connect_timeout: Duration, upstream: Option>, proxy_protocol: u8, + unix_sock: Option<&str>, ) -> Result { - let raw_result = match fetch_via_raw_tls(host, port, sni, connect_timeout, proxy_protocol).await { + let raw_result = match fetch_via_raw_tls( + host, + port, + sni, + connect_timeout, + upstream.clone(), + proxy_protocol, + unix_sock, + ) + .await + { Ok(res) => Some(res), Err(e) => { warn!(sni = %sni, error = %e, "Raw TLS fetch failed"); @@ -574,7 +691,17 @@ pub async fn fetch_real_tls( } }; - match fetch_via_rustls(host, port, sni, connect_timeout, upstream, proxy_protocol).await { + match fetch_via_rustls( + host, + port, + sni, + connect_timeout, + upstream, + proxy_protocol, + unix_sock, + ) + .await + { Ok(rustls_result) => { if let Some(mut raw) = raw_result { raw.cert_info = rustls_result.cert_info; From fa2423dadfecda9ea61191835aad5b0f9caa60cb Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 03:21:22 +0300 Subject: [PATCH 84/98] ME/DC Method Detection fixes Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/main.rs | 11 +- src/transport/middle_proxy/mod.rs | 2 +- src/transport/middle_proxy/ping.rs | 158 +++++++++++++++++++++++++++++ src/transport/upstream.rs | 18 +++- 4 files changed, 185 insertions(+), 4 deletions(-) diff --git a/src/main.rs b/src/main.rs index e759095..e985051 100644 --- a/src/main.rs +++ b/src/main.rs @@ -40,6 +40,7 @@ use crate::stats::{ReplayChecker, Stats}; use crate::stream::BufferPool; use crate::transport::middle_proxy::{ MePool, fetch_proxy_config, run_me_ping, MePingFamily, MePingSample, format_sample_line, + format_me_route, }; use crate::transport::{ListenOptions, UpstreamManager, create_listener, find_listener_processes}; use crate::tls_front::TlsFrontCache; @@ -624,7 +625,15 @@ async fn main() -> std::result::Result<(), Box> { } else { info!(" No ME connectivity"); } - info!(" via direct"); + let me_route = format_me_route( + &config.upstreams, + &me_results, + prefer_ipv6, + v4_ok, + v6_ok, + ) + .await; + info!(" via {}", me_route); info!("============================================================"); use std::collections::BTreeMap; diff --git a/src/transport/middle_proxy/mod.rs b/src/transport/middle_proxy/mod.rs index 3a4ff16..1072ec8 100644 --- a/src/transport/middle_proxy/mod.rs +++ b/src/transport/middle_proxy/mod.rs @@ -23,7 +23,7 @@ use bytes::Bytes; pub use health::me_health_monitor; #[allow(unused_imports)] -pub use ping::{run_me_ping, format_sample_line, MePingReport, MePingSample, MePingFamily}; +pub use ping::{run_me_ping, format_sample_line, format_me_route, MePingReport, MePingSample, MePingFamily}; pub use pool::MePool; #[allow(unused_imports)] pub use pool_nat::{stun_probe, detect_public_ip}; diff --git a/src/transport/middle_proxy/ping.rs b/src/transport/middle_proxy/ping.rs index aae11e6..e90d98f 100644 --- a/src/transport/middle_proxy/ping.rs +++ b/src/transport/middle_proxy/ping.rs @@ -2,6 +2,9 @@ use std::collections::HashMap; use std::net::{IpAddr, SocketAddr}; use std::sync::Arc; +use tokio::net::UdpSocket; + +use crate::config::{UpstreamConfig, UpstreamType}; use crate::crypto::SecureRandom; use crate::error::ProxyError; @@ -50,6 +53,161 @@ pub fn format_sample_line(sample: &MePingSample) -> String { } } +fn format_direct_with_config( + interface: &Option, + bind_addresses: &Option>, +) -> Option { + let mut direct_parts: Vec = Vec::new(); + if let Some(dev) = interface.as_deref().filter(|v| !v.is_empty()) { + direct_parts.push(format!("dev={dev}")); + } + if let Some(src) = bind_addresses.as_ref().filter(|v| !v.is_empty()) { + direct_parts.push(format!("src={}", src.join(","))); + } + if direct_parts.is_empty() { + None + } else { + Some(format!("direct {}", direct_parts.join(" "))) + } +} + +fn pick_target_for_family(reports: &[MePingReport], family: MePingFamily) -> Option { + reports.iter().find_map(|report| { + if report.family != family { + return None; + } + report + .samples + .iter() + .find(|s| s.error.is_none() && s.handshake_ms.is_some()) + .map(|s| s.addr) + }) +} + +#[cfg(unix)] +fn detect_interface_for_ip(ip: IpAddr) -> Option { + use nix::ifaddrs::getifaddrs; + + if let Ok(addrs) = getifaddrs() { + for iface in addrs { + if let Some(address) = iface.address { + if let Some(v4) = address.as_sockaddr_in() { + if IpAddr::V4(v4.ip()) == ip { + return Some(iface.interface_name); + } + } else if let Some(v6) = address.as_sockaddr_in6() { + if IpAddr::V6(v6.ip()) == ip { + return Some(iface.interface_name); + } + } + } + } + } + None +} + +#[cfg(not(unix))] +fn detect_interface_for_ip(_ip: IpAddr) -> Option { + None +} + +async fn detect_direct_route_details( + reports: &[MePingReport], + prefer_ipv6: bool, + v4_ok: bool, + v6_ok: bool, +) -> Option { + let target_addr = if prefer_ipv6 && v6_ok { + pick_target_for_family(reports, MePingFamily::V6) + .or_else(|| pick_target_for_family(reports, MePingFamily::V4)) + } else if v4_ok { + pick_target_for_family(reports, MePingFamily::V4) + .or_else(|| pick_target_for_family(reports, MePingFamily::V6)) + } else { + pick_target_for_family(reports, MePingFamily::V6) + .or_else(|| pick_target_for_family(reports, MePingFamily::V4)) + }?; + + let local_ip = if target_addr.is_ipv4() { + let sock = UdpSocket::bind("0.0.0.0:0").await.ok()?; + sock.connect(target_addr).await.ok()?; + sock.local_addr().ok().map(|a| a.ip()) + } else { + let sock = UdpSocket::bind("[::]:0").await.ok()?; + sock.connect(target_addr).await.ok()?; + sock.local_addr().ok().map(|a| a.ip()) + }; + + let mut parts = Vec::new(); + if let Some(ip) = local_ip { + if let Some(dev) = detect_interface_for_ip(ip) { + parts.push(format!("dev={dev}")); + } + parts.push(format!("src={ip}")); + } + + if parts.is_empty() { + None + } else { + Some(format!("direct {}", parts.join(" "))) + } +} + +pub async fn format_me_route( + upstreams: &[UpstreamConfig], + reports: &[MePingReport], + prefer_ipv6: bool, + v4_ok: bool, + v6_ok: bool, +) -> String { + let enabled_upstreams: Vec<_> = upstreams.iter().filter(|u| u.enabled).collect(); + if enabled_upstreams.is_empty() { + return detect_direct_route_details(reports, prefer_ipv6, v4_ok, v6_ok) + .await + .unwrap_or_else(|| "direct".to_string()); + } + + if enabled_upstreams.len() == 1 { + return match &enabled_upstreams[0].upstream_type { + UpstreamType::Direct { + interface, + bind_addresses, + } => { + if let Some(route) = format_direct_with_config(interface, bind_addresses) { + route + } else { + detect_direct_route_details(reports, prefer_ipv6, v4_ok, v6_ok) + .await + .unwrap_or_else(|| "direct".to_string()) + } + } + UpstreamType::Socks4 { address, .. } => format!("socks4://{address}"), + UpstreamType::Socks5 { address, .. } => format!("socks5://{address}"), + }; + } + + let has_direct = enabled_upstreams + .iter() + .any(|u| matches!(u.upstream_type, UpstreamType::Direct { .. })); + let has_socks4 = enabled_upstreams + .iter() + .any(|u| matches!(u.upstream_type, UpstreamType::Socks4 { .. })); + let has_socks5 = enabled_upstreams + .iter() + .any(|u| matches!(u.upstream_type, UpstreamType::Socks5 { .. })); + let mut kinds = Vec::new(); + if has_direct { + kinds.push("direct"); + } + if has_socks4 { + kinds.push("socks4"); + } + if has_socks5 { + kinds.push("socks5"); + } + format!("mixed upstreams ({})", kinds.join(", ")) +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index edcf476..5ab198c 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -707,8 +707,22 @@ impl UpstreamManager { for (upstream_idx, upstream_config, bind_rr) in &upstreams { let upstream_name = match &upstream_config.upstream_type { - UpstreamType::Direct { interface, .. } => { - format!("direct{}", interface.as_ref().map(|i| format!(" ({})", i)).unwrap_or_default()) + UpstreamType::Direct { + interface, + bind_addresses, + } => { + let mut direct_parts = Vec::new(); + if let Some(dev) = interface.as_deref().filter(|v| !v.is_empty()) { + direct_parts.push(format!("dev={dev}")); + } + if let Some(src) = bind_addresses.as_ref().filter(|v| !v.is_empty()) { + direct_parts.push(format!("src={}", src.join(","))); + } + if direct_parts.is_empty() { + "direct".to_string() + } else { + format!("direct {}", direct_parts.join(" ")) + } } UpstreamType::Socks4 { address, .. } => format!("socks4://{}", address), UpstreamType::Socks5 { address, .. } => format!("socks5://{}", address), From 8b39a4ef6d924acd35935f9b4c3d1849f6d9d546 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 13:18:31 +0300 Subject: [PATCH 85/98] Statistics on ME + Dynamic backpressure + KDF with SOCKS Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/config/defaults.rs | 12 + src/config/hot_reload.rs | 52 ++- src/config/load.rs | 20 + src/config/types.rs | 117 ++++++ src/main.rs | 27 ++ src/metrics.rs | 482 ++++++++++++++++++++---- src/stats/mod.rs | 292 ++++++++++++-- src/stats/telemetry.rs | 29 ++ src/transport/middle_proxy/handshake.rs | 37 +- src/transport/middle_proxy/ping.rs | 44 +++ src/transport/middle_proxy/pool.rs | 37 +- src/transport/middle_proxy/reader.rs | 18 +- src/transport/middle_proxy/registry.rs | 72 +++- 13 files changed, 1108 insertions(+), 131 deletions(-) create mode 100644 src/stats/telemetry.rs diff --git a/src/config/defaults.rs b/src/config/defaults.rs index d82f8ed..dbc251c 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -170,6 +170,18 @@ pub(crate) fn default_desync_all_full() -> bool { false } +pub(crate) fn default_me_route_backpressure_base_timeout_ms() -> u64 { + 25 +} + +pub(crate) fn default_me_route_backpressure_high_timeout_ms() -> u64 { + 120 +} + +pub(crate) fn default_me_route_backpressure_high_watermark_pct() -> u8 { + 80 +} + pub(crate) fn default_beobachten_minutes() -> u64 { 10 } diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index acc64cd..579a9cb 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -16,6 +16,7 @@ //! | `general` | `me_pool_drain_ttl_secs` | Applied on next ME map update | //! | `general` | `me_pool_min_fresh_ratio` | Applied on next ME map update | //! | `general` | `me_reinit_drain_timeout_secs`| Applied on next ME map update | +//! | `general` | `telemetry` / `me_*_policy` | Applied immediately | //! | `network` | `dns_overrides` | Applied immediately | //! | `access` | All user/quota fields | Effective immediately | //! @@ -30,7 +31,7 @@ use notify::{EventKind, RecursiveMode, Watcher, recommended_watcher}; use tokio::sync::{mpsc, watch}; use tracing::{error, info, warn}; -use crate::config::LogLevel; +use crate::config::{LogLevel, MeSocksKdfPolicy, MeTelemetryLevel}; use super::load::ProxyConfig; // ── Hot fields ──────────────────────────────────────────────────────────────── @@ -52,6 +53,13 @@ pub struct HotFields { pub me_keepalive_interval_secs: u64, pub me_keepalive_jitter_secs: u64, pub me_keepalive_payload_random: bool, + pub telemetry_core_enabled: bool, + pub telemetry_user_enabled: bool, + pub telemetry_me_level: MeTelemetryLevel, + pub me_socks_kdf_policy: MeSocksKdfPolicy, + pub me_route_backpressure_base_timeout_ms: u64, + pub me_route_backpressure_high_timeout_ms: u64, + pub me_route_backpressure_high_watermark_pct: u8, pub access: crate::config::AccessConfig, } @@ -72,6 +80,13 @@ impl HotFields { me_keepalive_interval_secs: cfg.general.me_keepalive_interval_secs, me_keepalive_jitter_secs: cfg.general.me_keepalive_jitter_secs, me_keepalive_payload_random: cfg.general.me_keepalive_payload_random, + telemetry_core_enabled: cfg.general.telemetry.core_enabled, + telemetry_user_enabled: cfg.general.telemetry.user_enabled, + telemetry_me_level: cfg.general.telemetry.me_level, + me_socks_kdf_policy: cfg.general.me_socks_kdf_policy, + me_route_backpressure_base_timeout_ms: cfg.general.me_route_backpressure_base_timeout_ms, + me_route_backpressure_high_timeout_ms: cfg.general.me_route_backpressure_high_timeout_ms, + me_route_backpressure_high_watermark_pct: cfg.general.me_route_backpressure_high_watermark_pct, access: cfg.access.clone(), } } @@ -262,6 +277,41 @@ fn log_changes( ); } + if old_hot.telemetry_core_enabled != new_hot.telemetry_core_enabled + || old_hot.telemetry_user_enabled != new_hot.telemetry_user_enabled + || old_hot.telemetry_me_level != new_hot.telemetry_me_level + { + info!( + "config reload: telemetry: core_enabled={} user_enabled={} me_level={}", + new_hot.telemetry_core_enabled, + new_hot.telemetry_user_enabled, + new_hot.telemetry_me_level, + ); + } + + if old_hot.me_socks_kdf_policy != new_hot.me_socks_kdf_policy { + info!( + "config reload: me_socks_kdf_policy: {:?} → {:?}", + old_hot.me_socks_kdf_policy, + new_hot.me_socks_kdf_policy, + ); + } + + if old_hot.me_route_backpressure_base_timeout_ms + != new_hot.me_route_backpressure_base_timeout_ms + || old_hot.me_route_backpressure_high_timeout_ms + != new_hot.me_route_backpressure_high_timeout_ms + || old_hot.me_route_backpressure_high_watermark_pct + != new_hot.me_route_backpressure_high_watermark_pct + { + info!( + "config reload: me_route_backpressure: base={}ms high={}ms watermark={}%", + new_hot.me_route_backpressure_base_timeout_ms, + new_hot.me_route_backpressure_high_timeout_ms, + new_hot.me_route_backpressure_high_watermark_pct, + ); + } + if old_hot.access.users != new_hot.access.users { let mut added: Vec<&String> = new_hot.access.users.keys() .filter(|u| !old_hot.access.users.contains_key(*u)) diff --git a/src/config/load.rs b/src/config/load.rs index c1bbdef..7c578a3 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -311,6 +311,26 @@ impl ProxyConfig { )); } + if config.general.me_route_backpressure_base_timeout_ms == 0 { + return Err(ProxyError::Config( + "general.me_route_backpressure_base_timeout_ms must be > 0".to_string(), + )); + } + + if config.general.me_route_backpressure_high_timeout_ms + < config.general.me_route_backpressure_base_timeout_ms + { + return Err(ProxyError::Config( + "general.me_route_backpressure_high_timeout_ms must be >= general.me_route_backpressure_base_timeout_ms".to_string(), + )); + } + + if !(1..=100).contains(&config.general.me_route_backpressure_high_watermark_pct) { + return Err(ProxyError::Config( + "general.me_route_backpressure_high_watermark_pct must be within [1, 100]".to_string(), + )); + } + if config.general.effective_me_pool_force_close_secs() > 0 && config.general.effective_me_pool_force_close_secs() < config.general.me_pool_drain_ttl_secs diff --git a/src/config/types.rs b/src/config/types.rs index 7d9f13a..902d816 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -59,6 +59,98 @@ impl std::fmt::Display for LogLevel { } } +/// Middle-End telemetry verbosity level. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(rename_all = "lowercase")] +pub enum MeTelemetryLevel { + #[default] + Normal, + Silent, + Debug, +} + +impl MeTelemetryLevel { + pub fn as_u8(self) -> u8 { + match self { + MeTelemetryLevel::Silent => 0, + MeTelemetryLevel::Normal => 1, + MeTelemetryLevel::Debug => 2, + } + } + + pub fn from_u8(raw: u8) -> Self { + match raw { + 0 => MeTelemetryLevel::Silent, + 2 => MeTelemetryLevel::Debug, + _ => MeTelemetryLevel::Normal, + } + } + + pub fn allows_normal(self) -> bool { + !matches!(self, MeTelemetryLevel::Silent) + } + + pub fn allows_debug(self) -> bool { + matches!(self, MeTelemetryLevel::Debug) + } +} + +impl std::fmt::Display for MeTelemetryLevel { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + MeTelemetryLevel::Silent => write!(f, "silent"), + MeTelemetryLevel::Normal => write!(f, "normal"), + MeTelemetryLevel::Debug => write!(f, "debug"), + } + } +} + +/// Middle-End SOCKS KDF fallback policy. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, Default)] +#[serde(rename_all = "lowercase")] +pub enum MeSocksKdfPolicy { + #[default] + Strict, + Compat, +} + +impl MeSocksKdfPolicy { + pub fn as_u8(self) -> u8 { + match self { + MeSocksKdfPolicy::Strict => 0, + MeSocksKdfPolicy::Compat => 1, + } + } + + pub fn from_u8(raw: u8) -> Self { + match raw { + 1 => MeSocksKdfPolicy::Compat, + _ => MeSocksKdfPolicy::Strict, + } + } +} + +/// Telemetry controls for hot-path counters and ME diagnostics. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct TelemetryConfig { + #[serde(default = "default_true")] + pub core_enabled: bool, + #[serde(default = "default_true")] + pub user_enabled: bool, + #[serde(default)] + pub me_level: MeTelemetryLevel, +} + +impl Default for TelemetryConfig { + fn default() -> Self { + Self { + core_enabled: default_true(), + user_enabled: default_true(), + me_level: MeTelemetryLevel::Normal, + } + } +} + // ============= Sub-Configs ============= #[derive(Debug, Clone, Serialize, Deserialize)] @@ -288,6 +380,26 @@ pub struct GeneralConfig { #[serde(default)] pub disable_colors: bool, + /// Runtime telemetry controls for counters/metrics in hot paths. + #[serde(default)] + pub telemetry: TelemetryConfig, + + /// SOCKS-bound KDF policy for Middle-End handshake. + #[serde(default)] + pub me_socks_kdf_policy: MeSocksKdfPolicy, + + /// Base backpressure timeout in milliseconds for ME route channel send. + #[serde(default = "default_me_route_backpressure_base_timeout_ms")] + pub me_route_backpressure_base_timeout_ms: u64, + + /// High backpressure timeout in milliseconds when queue occupancy is above watermark. + #[serde(default = "default_me_route_backpressure_high_timeout_ms")] + pub me_route_backpressure_high_timeout_ms: u64, + + /// Queue occupancy percent threshold for high backpressure timeout. + #[serde(default = "default_me_route_backpressure_high_watermark_pct")] + pub me_route_backpressure_high_watermark_pct: u8, + /// [general.links] — proxy link generation overrides. #[serde(default)] pub links: LinksConfig, @@ -414,6 +526,11 @@ impl Default for GeneralConfig { unknown_dc_log_path: default_unknown_dc_log_path(), log_level: LogLevel::Normal, disable_colors: false, + telemetry: TelemetryConfig::default(), + me_socks_kdf_policy: MeSocksKdfPolicy::Strict, + me_route_backpressure_base_timeout_ms: default_me_route_backpressure_base_timeout_ms(), + me_route_backpressure_high_timeout_ms: default_me_route_backpressure_high_timeout_ms(), + me_route_backpressure_high_watermark_pct: default_me_route_backpressure_high_watermark_pct(), links: LinksConfig::default(), crypto_pending_buffer: default_crypto_pending_buffer(), max_client_frame: default_max_client_frame(), diff --git a/src/main.rs b/src/main.rs index e985051..4d4d3f5 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,6 +36,7 @@ use crate::ip_tracker::UserIpTracker; use crate::network::probe::{decide_network_capabilities, log_probe_result, run_probe}; use crate::proxy::ClientHandler; use crate::stats::beobachten::BeobachtenStore; +use crate::stats::telemetry::TelemetryPolicy; use crate::stats::{ReplayChecker, Stats}; use crate::stream::BufferPool; use crate::transport::middle_proxy::{ @@ -406,6 +407,7 @@ async fn main() -> std::result::Result<(), Box> { let prefer_ipv6 = decision.prefer_ipv6(); let mut use_middle_proxy = config.general.use_middle_proxy && (decision.ipv4_me || decision.ipv6_me); let stats = Arc::new(Stats::new()); + stats.apply_telemetry_policy(TelemetryPolicy::from_config(&config.general.telemetry)); let beobachten = Arc::new(BeobachtenStore::new()); let rng = Arc::new(SecureRandom::new()); @@ -539,6 +541,10 @@ async fn main() -> std::result::Result<(), Box> { config.general.me_hardswap_warmup_delay_max_ms, config.general.me_hardswap_warmup_extra_passes, config.general.me_hardswap_warmup_pass_backoff_base_ms, + config.general.me_socks_kdf_policy, + config.general.me_route_backpressure_base_timeout_ms, + config.general.me_route_backpressure_high_timeout_ms, + config.general.me_route_backpressure_high_watermark_pct, ); let pool_size = config.general.middle_proxy_pool_size.max(1); @@ -794,6 +800,27 @@ async fn main() -> std::result::Result<(), Box> { detected_ip_v6, ); + let stats_policy = stats.clone(); + let mut config_rx_policy = config_rx.clone(); + let me_pool_policy = me_pool.clone(); + tokio::spawn(async move { + loop { + if config_rx_policy.changed().await.is_err() { + break; + } + let cfg = config_rx_policy.borrow_and_update().clone(); + stats_policy.apply_telemetry_policy(TelemetryPolicy::from_config(&cfg.general.telemetry)); + if let Some(pool) = &me_pool_policy { + pool.update_runtime_transport_policy( + cfg.general.me_socks_kdf_policy, + cfg.general.me_route_backpressure_base_timeout_ms, + cfg.general.me_route_backpressure_high_timeout_ms, + cfg.general.me_route_backpressure_high_watermark_pct, + ); + } + } + }); + let beobachten_writer = beobachten.clone(); let config_rx_beobachten = config_rx.clone(); tokio::spawn(async move { diff --git a/src/metrics.rs b/src/metrics.rs index 63b337b..35f29ca 100644 --- a/src/metrics.rs +++ b/src/metrics.rs @@ -118,120 +118,394 @@ fn render_beobachten(beobachten: &BeobachtenStore, config: &ProxyConfig) -> Stri async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIpTracker) -> String { use std::fmt::Write; let mut out = String::with_capacity(4096); + let telemetry = stats.telemetry_policy(); + let core_enabled = telemetry.core_enabled; + let user_enabled = telemetry.user_enabled; + let me_allows_normal = telemetry.me_level.allows_normal(); + let me_allows_debug = telemetry.me_level.allows_debug(); let _ = writeln!(out, "# HELP telemt_uptime_seconds Proxy uptime"); let _ = writeln!(out, "# TYPE telemt_uptime_seconds gauge"); let _ = writeln!(out, "telemt_uptime_seconds {:.1}", stats.uptime_secs()); + let _ = writeln!(out, "# HELP telemt_telemetry_core_enabled Runtime core telemetry switch"); + let _ = writeln!(out, "# TYPE telemt_telemetry_core_enabled gauge"); + let _ = writeln!( + out, + "telemt_telemetry_core_enabled {}", + if core_enabled { 1 } else { 0 } + ); + + let _ = writeln!(out, "# HELP telemt_telemetry_user_enabled Runtime per-user telemetry switch"); + let _ = writeln!(out, "# TYPE telemt_telemetry_user_enabled gauge"); + let _ = writeln!( + out, + "telemt_telemetry_user_enabled {}", + if user_enabled { 1 } else { 0 } + ); + + let _ = writeln!(out, "# HELP telemt_telemetry_me_level Runtime ME telemetry level flag"); + let _ = writeln!(out, "# TYPE telemt_telemetry_me_level gauge"); + let _ = writeln!( + out, + "telemt_telemetry_me_level{{level=\"silent\"}} {}", + if matches!(telemetry.me_level, crate::config::MeTelemetryLevel::Silent) { + 1 + } else { + 0 + } + ); + let _ = writeln!( + out, + "telemt_telemetry_me_level{{level=\"normal\"}} {}", + if matches!(telemetry.me_level, crate::config::MeTelemetryLevel::Normal) { + 1 + } else { + 0 + } + ); + let _ = writeln!( + out, + "telemt_telemetry_me_level{{level=\"debug\"}} {}", + if matches!(telemetry.me_level, crate::config::MeTelemetryLevel::Debug) { + 1 + } else { + 0 + } + ); + let _ = writeln!(out, "# HELP telemt_connections_total Total accepted connections"); let _ = writeln!(out, "# TYPE telemt_connections_total counter"); - let _ = writeln!(out, "telemt_connections_total {}", stats.get_connects_all()); + let _ = writeln!( + out, + "telemt_connections_total {}", + if core_enabled { stats.get_connects_all() } else { 0 } + ); let _ = writeln!(out, "# HELP telemt_connections_bad_total Bad/rejected connections"); let _ = writeln!(out, "# TYPE telemt_connections_bad_total counter"); - let _ = writeln!(out, "telemt_connections_bad_total {}", stats.get_connects_bad()); + let _ = writeln!( + out, + "telemt_connections_bad_total {}", + if core_enabled { stats.get_connects_bad() } else { 0 } + ); let _ = writeln!(out, "# HELP telemt_handshake_timeouts_total Handshake timeouts"); let _ = writeln!(out, "# TYPE telemt_handshake_timeouts_total counter"); - let _ = writeln!(out, "telemt_handshake_timeouts_total {}", stats.get_handshake_timeouts()); + let _ = writeln!( + out, + "telemt_handshake_timeouts_total {}", + if core_enabled { + stats.get_handshake_timeouts() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_keepalive_sent_total ME keepalive frames sent"); let _ = writeln!(out, "# TYPE telemt_me_keepalive_sent_total counter"); - let _ = writeln!(out, "telemt_me_keepalive_sent_total {}", stats.get_me_keepalive_sent()); + let _ = writeln!( + out, + "telemt_me_keepalive_sent_total {}", + if me_allows_debug { + stats.get_me_keepalive_sent() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_keepalive_failed_total ME keepalive send failures"); let _ = writeln!(out, "# TYPE telemt_me_keepalive_failed_total counter"); - let _ = writeln!(out, "telemt_me_keepalive_failed_total {}", stats.get_me_keepalive_failed()); + let _ = writeln!( + out, + "telemt_me_keepalive_failed_total {}", + if me_allows_normal { + stats.get_me_keepalive_failed() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_keepalive_pong_total ME keepalive pong replies"); let _ = writeln!(out, "# TYPE telemt_me_keepalive_pong_total counter"); - let _ = writeln!(out, "telemt_me_keepalive_pong_total {}", stats.get_me_keepalive_pong()); + let _ = writeln!( + out, + "telemt_me_keepalive_pong_total {}", + if me_allows_debug { + stats.get_me_keepalive_pong() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_keepalive_timeout_total ME keepalive ping timeouts"); let _ = writeln!(out, "# TYPE telemt_me_keepalive_timeout_total counter"); - let _ = writeln!(out, "telemt_me_keepalive_timeout_total {}", stats.get_me_keepalive_timeout()); + let _ = writeln!( + out, + "telemt_me_keepalive_timeout_total {}", + if me_allows_normal { + stats.get_me_keepalive_timeout() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_reconnect_attempts_total ME reconnect attempts"); let _ = writeln!(out, "# TYPE telemt_me_reconnect_attempts_total counter"); - let _ = writeln!(out, "telemt_me_reconnect_attempts_total {}", stats.get_me_reconnect_attempts()); + let _ = writeln!( + out, + "telemt_me_reconnect_attempts_total {}", + if me_allows_normal { + stats.get_me_reconnect_attempts() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_reconnect_success_total ME reconnect successes"); let _ = writeln!(out, "# TYPE telemt_me_reconnect_success_total counter"); - let _ = writeln!(out, "telemt_me_reconnect_success_total {}", stats.get_me_reconnect_success()); + let _ = writeln!( + out, + "telemt_me_reconnect_success_total {}", + if me_allows_normal { + stats.get_me_reconnect_success() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_crc_mismatch_total ME CRC mismatches"); let _ = writeln!(out, "# TYPE telemt_me_crc_mismatch_total counter"); - let _ = writeln!(out, "telemt_me_crc_mismatch_total {}", stats.get_me_crc_mismatch()); + let _ = writeln!( + out, + "telemt_me_crc_mismatch_total {}", + if me_allows_normal { + stats.get_me_crc_mismatch() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_seq_mismatch_total ME sequence mismatches"); let _ = writeln!(out, "# TYPE telemt_me_seq_mismatch_total counter"); - let _ = writeln!(out, "telemt_me_seq_mismatch_total {}", stats.get_me_seq_mismatch()); + let _ = writeln!( + out, + "telemt_me_seq_mismatch_total {}", + if me_allows_normal { + stats.get_me_seq_mismatch() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_route_drop_no_conn_total ME route drops: no conn"); let _ = writeln!(out, "# TYPE telemt_me_route_drop_no_conn_total counter"); - let _ = writeln!(out, "telemt_me_route_drop_no_conn_total {}", stats.get_me_route_drop_no_conn()); + let _ = writeln!( + out, + "telemt_me_route_drop_no_conn_total {}", + if me_allows_normal { + stats.get_me_route_drop_no_conn() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_route_drop_channel_closed_total ME route drops: channel closed"); let _ = writeln!(out, "# TYPE telemt_me_route_drop_channel_closed_total counter"); - let _ = writeln!(out, "telemt_me_route_drop_channel_closed_total {}", stats.get_me_route_drop_channel_closed()); + let _ = writeln!( + out, + "telemt_me_route_drop_channel_closed_total {}", + if me_allows_normal { + stats.get_me_route_drop_channel_closed() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_me_route_drop_queue_full_total ME route drops: queue full"); let _ = writeln!(out, "# TYPE telemt_me_route_drop_queue_full_total counter"); - let _ = writeln!(out, "telemt_me_route_drop_queue_full_total {}", stats.get_me_route_drop_queue_full()); + let _ = writeln!( + out, + "telemt_me_route_drop_queue_full_total {}", + if me_allows_normal { + stats.get_me_route_drop_queue_full() + } else { + 0 + } + ); + + let _ = writeln!( + out, + "# HELP telemt_me_route_drop_queue_full_profile_total ME route drops: queue full by adaptive profile" + ); + let _ = writeln!( + out, + "# TYPE telemt_me_route_drop_queue_full_profile_total counter" + ); + let _ = writeln!( + out, + "telemt_me_route_drop_queue_full_profile_total{{profile=\"base\"}} {}", + if me_allows_normal { + stats.get_me_route_drop_queue_full_base() + } else { + 0 + } + ); + let _ = writeln!( + out, + "telemt_me_route_drop_queue_full_profile_total{{profile=\"high\"}} {}", + if me_allows_normal { + stats.get_me_route_drop_queue_full_high() + } else { + 0 + } + ); + + let _ = writeln!( + out, + "# HELP telemt_me_socks_kdf_policy_total SOCKS KDF policy outcomes" + ); + let _ = writeln!(out, "# TYPE telemt_me_socks_kdf_policy_total counter"); + let _ = writeln!( + out, + "telemt_me_socks_kdf_policy_total{{policy=\"strict\",outcome=\"reject\"}} {}", + if me_allows_normal { + stats.get_me_socks_kdf_strict_reject() + } else { + 0 + } + ); + let _ = writeln!( + out, + "telemt_me_socks_kdf_policy_total{{policy=\"compat\",outcome=\"fallback\"}} {}", + if me_allows_debug { + stats.get_me_socks_kdf_compat_fallback() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_secure_padding_invalid_total Invalid secure frame lengths"); let _ = writeln!(out, "# TYPE telemt_secure_padding_invalid_total counter"); - let _ = writeln!(out, "telemt_secure_padding_invalid_total {}", stats.get_secure_padding_invalid()); + let _ = writeln!( + out, + "telemt_secure_padding_invalid_total {}", + if me_allows_normal { + stats.get_secure_padding_invalid() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_desync_total Total crypto-desync detections"); let _ = writeln!(out, "# TYPE telemt_desync_total counter"); - let _ = writeln!(out, "telemt_desync_total {}", stats.get_desync_total()); + let _ = writeln!( + out, + "telemt_desync_total {}", + if me_allows_normal { + stats.get_desync_total() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_desync_full_logged_total Full forensic desync logs emitted"); let _ = writeln!(out, "# TYPE telemt_desync_full_logged_total counter"); - let _ = writeln!(out, "telemt_desync_full_logged_total {}", stats.get_desync_full_logged()); + let _ = writeln!( + out, + "telemt_desync_full_logged_total {}", + if me_allows_normal { + stats.get_desync_full_logged() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_desync_suppressed_total Suppressed desync forensic events"); let _ = writeln!(out, "# TYPE telemt_desync_suppressed_total counter"); - let _ = writeln!(out, "telemt_desync_suppressed_total {}", stats.get_desync_suppressed()); + let _ = writeln!( + out, + "telemt_desync_suppressed_total {}", + if me_allows_normal { + stats.get_desync_suppressed() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_desync_frames_bucket_total Desync count by frames_ok bucket"); let _ = writeln!(out, "# TYPE telemt_desync_frames_bucket_total counter"); let _ = writeln!( out, "telemt_desync_frames_bucket_total{{bucket=\"0\"}} {}", - stats.get_desync_frames_bucket_0() + if me_allows_normal { + stats.get_desync_frames_bucket_0() + } else { + 0 + } ); let _ = writeln!( out, "telemt_desync_frames_bucket_total{{bucket=\"1_2\"}} {}", - stats.get_desync_frames_bucket_1_2() + if me_allows_normal { + stats.get_desync_frames_bucket_1_2() + } else { + 0 + } ); let _ = writeln!( out, "telemt_desync_frames_bucket_total{{bucket=\"3_10\"}} {}", - stats.get_desync_frames_bucket_3_10() + if me_allows_normal { + stats.get_desync_frames_bucket_3_10() + } else { + 0 + } ); let _ = writeln!( out, "telemt_desync_frames_bucket_total{{bucket=\"gt_10\"}} {}", - stats.get_desync_frames_bucket_gt_10() + if me_allows_normal { + stats.get_desync_frames_bucket_gt_10() + } else { + 0 + } ); let _ = writeln!(out, "# HELP telemt_pool_swap_total Successful ME pool swaps"); let _ = writeln!(out, "# TYPE telemt_pool_swap_total counter"); - let _ = writeln!(out, "telemt_pool_swap_total {}", stats.get_pool_swap_total()); + let _ = writeln!( + out, + "telemt_pool_swap_total {}", + if me_allows_debug { + stats.get_pool_swap_total() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_pool_drain_active Active draining ME writers"); let _ = writeln!(out, "# TYPE telemt_pool_drain_active gauge"); - let _ = writeln!(out, "telemt_pool_drain_active {}", stats.get_pool_drain_active()); + let _ = writeln!( + out, + "telemt_pool_drain_active {}", + if me_allows_debug { + stats.get_pool_drain_active() + } else { + 0 + } + ); let _ = writeln!(out, "# HELP telemt_pool_force_close_total Forced close events for draining writers"); let _ = writeln!(out, "# TYPE telemt_pool_force_close_total counter"); let _ = writeln!( out, "telemt_pool_force_close_total {}", - stats.get_pool_force_close_total() + if me_allows_normal { + stats.get_pool_force_close_total() + } else { + 0 + } ); let _ = writeln!(out, "# HELP telemt_pool_stale_pick_total Stale writer fallback picks for new binds"); @@ -239,7 +513,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_pool_stale_pick_total {}", - stats.get_pool_stale_pick_total() + if me_allows_normal { + stats.get_pool_stale_pick_total() + } else { + 0 + } ); let _ = writeln!(out, "# HELP telemt_me_writer_removed_total Total ME writer removals"); @@ -247,7 +525,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_writer_removed_total {}", - stats.get_me_writer_removed_total() + if me_allows_debug { + stats.get_me_writer_removed_total() + } else { + 0 + } ); let _ = writeln!( @@ -258,7 +540,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_writer_removed_unexpected_total {}", - stats.get_me_writer_removed_unexpected_total() + if me_allows_normal { + stats.get_me_writer_removed_unexpected_total() + } else { + 0 + } ); let _ = writeln!(out, "# HELP telemt_me_refill_triggered_total Immediate ME refill runs started"); @@ -266,7 +552,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_refill_triggered_total {}", - stats.get_me_refill_triggered_total() + if me_allows_debug { + stats.get_me_refill_triggered_total() + } else { + 0 + } ); let _ = writeln!( @@ -277,7 +567,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_refill_skipped_inflight_total {}", - stats.get_me_refill_skipped_inflight_total() + if me_allows_debug { + stats.get_me_refill_skipped_inflight_total() + } else { + 0 + } ); let _ = writeln!(out, "# HELP telemt_me_refill_failed_total Immediate ME refill failures"); @@ -285,7 +579,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_refill_failed_total {}", - stats.get_me_refill_failed_total() + if me_allows_normal { + stats.get_me_refill_failed_total() + } else { + 0 + } ); let _ = writeln!( @@ -296,7 +594,11 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_writer_restored_same_endpoint_total {}", - stats.get_me_writer_restored_same_endpoint_total() + if me_allows_normal { + stats.get_me_writer_restored_same_endpoint_total() + } else { + 0 + } ); let _ = writeln!( @@ -307,16 +609,24 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!( out, "telemt_me_writer_restored_fallback_total {}", - stats.get_me_writer_restored_fallback_total() + if me_allows_normal { + stats.get_me_writer_restored_fallback_total() + } else { + 0 + } ); - let unresolved_writer_losses = stats - .get_me_writer_removed_unexpected_total() - .saturating_sub( - stats - .get_me_writer_restored_same_endpoint_total() - .saturating_add(stats.get_me_writer_restored_fallback_total()), - ); + let unresolved_writer_losses = if me_allows_normal { + stats + .get_me_writer_removed_unexpected_total() + .saturating_sub( + stats + .get_me_writer_restored_same_endpoint_total() + .saturating_add(stats.get_me_writer_restored_fallback_total()), + ) + } else { + 0 + }; let _ = writeln!( out, "# HELP telemt_me_writer_removed_unexpected_minus_restored_total Unexpected writer removals not yet compensated by restore" @@ -343,51 +653,63 @@ async fn render_metrics(stats: &Stats, config: &ProxyConfig, ip_tracker: &UserIp let _ = writeln!(out, "# TYPE telemt_user_msgs_from_client counter"); let _ = writeln!(out, "# HELP telemt_user_msgs_to_client Per-user messages sent"); let _ = writeln!(out, "# TYPE telemt_user_msgs_to_client counter"); + let _ = writeln!( + out, + "# HELP telemt_telemetry_user_series_suppressed User-labeled metric series suppression flag" + ); + let _ = writeln!(out, "# TYPE telemt_telemetry_user_series_suppressed gauge"); + let _ = writeln!( + out, + "telemt_telemetry_user_series_suppressed {}", + if user_enabled { 0 } else { 1 } + ); - for entry in stats.iter_user_stats() { - let user = entry.key(); - let s = entry.value(); - let _ = writeln!(out, "telemt_user_connections_total{{user=\"{}\"}} {}", user, s.connects.load(std::sync::atomic::Ordering::Relaxed)); - let _ = writeln!(out, "telemt_user_connections_current{{user=\"{}\"}} {}", user, s.curr_connects.load(std::sync::atomic::Ordering::Relaxed)); - let _ = writeln!(out, "telemt_user_octets_from_client{{user=\"{}\"}} {}", user, s.octets_from_client.load(std::sync::atomic::Ordering::Relaxed)); - let _ = writeln!(out, "telemt_user_octets_to_client{{user=\"{}\"}} {}", user, s.octets_to_client.load(std::sync::atomic::Ordering::Relaxed)); - let _ = writeln!(out, "telemt_user_msgs_from_client{{user=\"{}\"}} {}", user, s.msgs_from_client.load(std::sync::atomic::Ordering::Relaxed)); - let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed)); - } + if user_enabled { + for entry in stats.iter_user_stats() { + let user = entry.key(); + let s = entry.value(); + let _ = writeln!(out, "telemt_user_connections_total{{user=\"{}\"}} {}", user, s.connects.load(std::sync::atomic::Ordering::Relaxed)); + let _ = writeln!(out, "telemt_user_connections_current{{user=\"{}\"}} {}", user, s.curr_connects.load(std::sync::atomic::Ordering::Relaxed)); + let _ = writeln!(out, "telemt_user_octets_from_client{{user=\"{}\"}} {}", user, s.octets_from_client.load(std::sync::atomic::Ordering::Relaxed)); + let _ = writeln!(out, "telemt_user_octets_to_client{{user=\"{}\"}} {}", user, s.octets_to_client.load(std::sync::atomic::Ordering::Relaxed)); + let _ = writeln!(out, "telemt_user_msgs_from_client{{user=\"{}\"}} {}", user, s.msgs_from_client.load(std::sync::atomic::Ordering::Relaxed)); + let _ = writeln!(out, "telemt_user_msgs_to_client{{user=\"{}\"}} {}", user, s.msgs_to_client.load(std::sync::atomic::Ordering::Relaxed)); + } - let ip_stats = ip_tracker.get_stats().await; - let ip_counts: HashMap = ip_stats - .into_iter() - .map(|(user, count, _)| (user, count)) - .collect(); + let ip_stats = ip_tracker.get_stats().await; + let ip_counts: HashMap = ip_stats + .into_iter() + .map(|(user, count, _)| (user, count)) + .collect(); - let mut unique_users = BTreeSet::new(); - unique_users.extend(config.access.user_max_unique_ips.keys().cloned()); - unique_users.extend(ip_counts.keys().cloned()); + let mut unique_users = BTreeSet::new(); + unique_users.extend(config.access.user_max_unique_ips.keys().cloned()); + unique_users.extend(ip_counts.keys().cloned()); - let _ = writeln!(out, "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs"); - let _ = writeln!(out, "# TYPE telemt_user_unique_ips_current gauge"); - let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)"); - let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge"); - let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)"); - let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge"); + let _ = writeln!(out, "# HELP telemt_user_unique_ips_current Per-user current number of unique active IPs"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_current gauge"); + let _ = writeln!(out, "# HELP telemt_user_unique_ips_limit Per-user configured unique IP limit (0 means unlimited)"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_limit gauge"); + let _ = writeln!(out, "# HELP telemt_user_unique_ips_utilization Per-user unique IP usage ratio (0 for unlimited)"); + let _ = writeln!(out, "# TYPE telemt_user_unique_ips_utilization gauge"); - for user in unique_users { - let current = ip_counts.get(&user).copied().unwrap_or(0); - let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0); - let utilization = if limit > 0 { - current as f64 / limit as f64 - } else { - 0.0 - }; - let _ = writeln!(out, "telemt_user_unique_ips_current{{user=\"{}\"}} {}", user, current); - let _ = writeln!(out, "telemt_user_unique_ips_limit{{user=\"{}\"}} {}", user, limit); - let _ = writeln!( - out, - "telemt_user_unique_ips_utilization{{user=\"{}\"}} {:.6}", - user, - utilization - ); + for user in unique_users { + let current = ip_counts.get(&user).copied().unwrap_or(0); + let limit = config.access.user_max_unique_ips.get(&user).copied().unwrap_or(0); + let utilization = if limit > 0 { + current as f64 / limit as f64 + } else { + 0.0 + }; + let _ = writeln!(out, "telemt_user_unique_ips_current{{user=\"{}\"}} {}", user, current); + let _ = writeln!(out, "telemt_user_unique_ips_limit{{user=\"{}\"}} {}", user, limit); + let _ = writeln!( + out, + "telemt_user_unique_ips_utilization{{user=\"{}\"}} {:.6}", + user, + utilization + ); + } } out diff --git a/src/stats/mod.rs b/src/stats/mod.rs index 1e32bb7..f5aa2b7 100644 --- a/src/stats/mod.rs +++ b/src/stats/mod.rs @@ -3,8 +3,9 @@ #![allow(dead_code)] pub mod beobachten; +pub mod telemetry; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::atomic::{AtomicBool, AtomicU8, AtomicU64, Ordering}; use std::time::{Instant, Duration}; use dashmap::DashMap; use parking_lot::Mutex; @@ -15,6 +16,9 @@ use std::collections::hash_map::DefaultHasher; use std::collections::VecDeque; use tracing::debug; +use crate::config::MeTelemetryLevel; +use self::telemetry::TelemetryPolicy; + // ============= Stats ============= #[derive(Default)] @@ -33,6 +37,10 @@ pub struct Stats { me_route_drop_no_conn: AtomicU64, me_route_drop_channel_closed: AtomicU64, me_route_drop_queue_full: AtomicU64, + me_route_drop_queue_full_base: AtomicU64, + me_route_drop_queue_full_high: AtomicU64, + me_socks_kdf_strict_reject: AtomicU64, + me_socks_kdf_compat_fallback: AtomicU64, secure_padding_invalid: AtomicU64, desync_total: AtomicU64, desync_full_logged: AtomicU64, @@ -52,6 +60,9 @@ pub struct Stats { me_refill_failed_total: AtomicU64, me_writer_restored_same_endpoint_total: AtomicU64, me_writer_restored_fallback_total: AtomicU64, + telemetry_core_enabled: AtomicBool, + telemetry_user_enabled: AtomicBool, + telemetry_me_level: AtomicU8, user_stats: DashMap, start_time: parking_lot::RwLock>, } @@ -69,44 +80,167 @@ pub struct UserStats { impl Stats { pub fn new() -> Self { let stats = Self::default(); + stats.apply_telemetry_policy(TelemetryPolicy::default()); *stats.start_time.write() = Some(Instant::now()); stats } - - pub fn increment_connects_all(&self) { self.connects_all.fetch_add(1, Ordering::Relaxed); } - pub fn increment_connects_bad(&self) { self.connects_bad.fetch_add(1, Ordering::Relaxed); } - pub fn increment_handshake_timeouts(&self) { self.handshake_timeouts.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_keepalive_sent(&self) { self.me_keepalive_sent.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_keepalive_failed(&self) { self.me_keepalive_failed.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_keepalive_pong(&self) { self.me_keepalive_pong.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_keepalive_timeout(&self) { self.me_keepalive_timeout.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_keepalive_timeout_by(&self, value: u64) { - self.me_keepalive_timeout.fetch_add(value, Ordering::Relaxed); + + fn telemetry_me_level(&self) -> MeTelemetryLevel { + MeTelemetryLevel::from_u8(self.telemetry_me_level.load(Ordering::Relaxed)) + } + + fn telemetry_core_enabled(&self) -> bool { + self.telemetry_core_enabled.load(Ordering::Relaxed) + } + + fn telemetry_user_enabled(&self) -> bool { + self.telemetry_user_enabled.load(Ordering::Relaxed) + } + + fn telemetry_me_allows_normal(&self) -> bool { + self.telemetry_me_level().allows_normal() + } + + fn telemetry_me_allows_debug(&self) -> bool { + self.telemetry_me_level().allows_debug() + } + + pub fn apply_telemetry_policy(&self, policy: TelemetryPolicy) { + self.telemetry_core_enabled + .store(policy.core_enabled, Ordering::Relaxed); + self.telemetry_user_enabled + .store(policy.user_enabled, Ordering::Relaxed); + self.telemetry_me_level + .store(policy.me_level.as_u8(), Ordering::Relaxed); + } + + pub fn telemetry_policy(&self) -> TelemetryPolicy { + TelemetryPolicy { + core_enabled: self.telemetry_core_enabled(), + user_enabled: self.telemetry_user_enabled(), + me_level: self.telemetry_me_level(), + } + } + + pub fn increment_connects_all(&self) { + if self.telemetry_core_enabled() { + self.connects_all.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_connects_bad(&self) { + if self.telemetry_core_enabled() { + self.connects_bad.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_handshake_timeouts(&self) { + if self.telemetry_core_enabled() { + self.handshake_timeouts.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_keepalive_sent(&self) { + if self.telemetry_me_allows_debug() { + self.me_keepalive_sent.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_keepalive_failed(&self) { + if self.telemetry_me_allows_normal() { + self.me_keepalive_failed.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_keepalive_pong(&self) { + if self.telemetry_me_allows_debug() { + self.me_keepalive_pong.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_keepalive_timeout(&self) { + if self.telemetry_me_allows_normal() { + self.me_keepalive_timeout.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_keepalive_timeout_by(&self, value: u64) { + if self.telemetry_me_allows_normal() { + self.me_keepalive_timeout.fetch_add(value, Ordering::Relaxed); + } + } + pub fn increment_me_reconnect_attempt(&self) { + if self.telemetry_me_allows_normal() { + self.me_reconnect_attempts.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_reconnect_success(&self) { + if self.telemetry_me_allows_normal() { + self.me_reconnect_success.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_crc_mismatch(&self) { + if self.telemetry_me_allows_normal() { + self.me_crc_mismatch.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_seq_mismatch(&self) { + if self.telemetry_me_allows_normal() { + self.me_seq_mismatch.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_route_drop_no_conn(&self) { + if self.telemetry_me_allows_normal() { + self.me_route_drop_no_conn.fetch_add(1, Ordering::Relaxed); + } } - pub fn increment_me_reconnect_attempt(&self) { self.me_reconnect_attempts.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_reconnect_success(&self) { self.me_reconnect_success.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_crc_mismatch(&self) { self.me_crc_mismatch.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_seq_mismatch(&self) { self.me_seq_mismatch.fetch_add(1, Ordering::Relaxed); } - pub fn increment_me_route_drop_no_conn(&self) { self.me_route_drop_no_conn.fetch_add(1, Ordering::Relaxed); } pub fn increment_me_route_drop_channel_closed(&self) { - self.me_route_drop_channel_closed.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_route_drop_channel_closed.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_route_drop_queue_full(&self) { - self.me_route_drop_queue_full.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_route_drop_queue_full.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_route_drop_queue_full_base(&self) { + if self.telemetry_me_allows_normal() { + self.me_route_drop_queue_full_base.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_route_drop_queue_full_high(&self) { + if self.telemetry_me_allows_normal() { + self.me_route_drop_queue_full_high.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_socks_kdf_strict_reject(&self) { + if self.telemetry_me_allows_normal() { + self.me_socks_kdf_strict_reject.fetch_add(1, Ordering::Relaxed); + } + } + pub fn increment_me_socks_kdf_compat_fallback(&self) { + if self.telemetry_me_allows_debug() { + self.me_socks_kdf_compat_fallback.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_secure_padding_invalid(&self) { - self.secure_padding_invalid.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.secure_padding_invalid.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_desync_total(&self) { - self.desync_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.desync_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_desync_full_logged(&self) { - self.desync_full_logged.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.desync_full_logged.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_desync_suppressed(&self) { - self.desync_suppressed.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.desync_suppressed.fetch_add(1, Ordering::Relaxed); + } } pub fn observe_desync_frames_ok(&self, frames_ok: u64) { + if !self.telemetry_me_allows_normal() { + return; + } match frames_ok { 0 => { self.desync_frames_bucket_0.fetch_add(1, Ordering::Relaxed); @@ -123,12 +257,19 @@ impl Stats { } } pub fn increment_pool_swap_total(&self) { - self.pool_swap_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_debug() { + self.pool_swap_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_pool_drain_active(&self) { - self.pool_drain_active.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_debug() { + self.pool_drain_active.fetch_add(1, Ordering::Relaxed); + } } pub fn decrement_pool_drain_active(&self) { + if !self.telemetry_me_allows_debug() { + return; + } let mut current = self.pool_drain_active.load(Ordering::Relaxed); loop { if current == 0 { @@ -146,31 +287,51 @@ impl Stats { } } pub fn increment_pool_force_close_total(&self) { - self.pool_force_close_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.pool_force_close_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_pool_stale_pick_total(&self) { - self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.pool_stale_pick_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_writer_removed_total(&self) { - self.me_writer_removed_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_debug() { + self.me_writer_removed_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_writer_removed_unexpected_total(&self) { - self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_writer_removed_unexpected_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_refill_triggered_total(&self) { - self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_debug() { + self.me_refill_triggered_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_refill_skipped_inflight_total(&self) { - self.me_refill_skipped_inflight_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_debug() { + self.me_refill_skipped_inflight_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_refill_failed_total(&self) { - self.me_refill_failed_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_refill_failed_total.fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_writer_restored_same_endpoint_total(&self) { - self.me_writer_restored_same_endpoint_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_writer_restored_same_endpoint_total + .fetch_add(1, Ordering::Relaxed); + } } pub fn increment_me_writer_restored_fallback_total(&self) { - self.me_writer_restored_fallback_total.fetch_add(1, Ordering::Relaxed); + if self.telemetry_me_allows_normal() { + self.me_writer_restored_fallback_total + .fetch_add(1, Ordering::Relaxed); + } } pub fn get_connects_all(&self) -> u64 { self.connects_all.load(Ordering::Relaxed) } pub fn get_connects_bad(&self) -> u64 { self.connects_bad.load(Ordering::Relaxed) } @@ -189,6 +350,18 @@ impl Stats { pub fn get_me_route_drop_queue_full(&self) -> u64 { self.me_route_drop_queue_full.load(Ordering::Relaxed) } + pub fn get_me_route_drop_queue_full_base(&self) -> u64 { + self.me_route_drop_queue_full_base.load(Ordering::Relaxed) + } + pub fn get_me_route_drop_queue_full_high(&self) -> u64 { + self.me_route_drop_queue_full_high.load(Ordering::Relaxed) + } + pub fn get_me_socks_kdf_strict_reject(&self) -> u64 { + self.me_socks_kdf_strict_reject.load(Ordering::Relaxed) + } + pub fn get_me_socks_kdf_compat_fallback(&self) -> u64 { + self.me_socks_kdf_compat_fallback.load(Ordering::Relaxed) + } pub fn get_secure_padding_invalid(&self) -> u64 { self.secure_padding_invalid.load(Ordering::Relaxed) } @@ -248,11 +421,17 @@ impl Stats { } pub fn increment_user_connects(&self, user: &str) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .connects.fetch_add(1, Ordering::Relaxed); } pub fn increment_user_curr_connects(&self, user: &str) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .curr_connects.fetch_add(1, Ordering::Relaxed); } @@ -285,21 +464,33 @@ impl Stats { } pub fn add_user_octets_from(&self, user: &str, bytes: u64) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .octets_from_client.fetch_add(bytes, Ordering::Relaxed); } pub fn add_user_octets_to(&self, user: &str, bytes: u64) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .octets_to_client.fetch_add(bytes, Ordering::Relaxed); } pub fn increment_user_msgs_from(&self, user: &str) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .msgs_from_client.fetch_add(1, Ordering::Relaxed); } pub fn increment_user_msgs_to(&self, user: &str) { + if !self.telemetry_user_enabled() { + return; + } self.user_stats.entry(user.to_string()).or_default() .msgs_to_client.fetch_add(1, Ordering::Relaxed); } @@ -548,6 +739,7 @@ impl ReplayStats { #[cfg(test)] mod tests { use super::*; + use crate::config::MeTelemetryLevel; use std::sync::Arc; #[test] @@ -558,6 +750,40 @@ mod tests { stats.increment_connects_all(); assert_eq!(stats.get_connects_all(), 3); } + + #[test] + fn test_telemetry_policy_disables_core_and_user_counters() { + let stats = Stats::new(); + stats.apply_telemetry_policy(TelemetryPolicy { + core_enabled: false, + user_enabled: false, + me_level: MeTelemetryLevel::Normal, + }); + + stats.increment_connects_all(); + stats.increment_user_connects("alice"); + stats.add_user_octets_from("alice", 1024); + assert_eq!(stats.get_connects_all(), 0); + assert_eq!(stats.get_user_curr_connects("alice"), 0); + assert_eq!(stats.get_user_total_octets("alice"), 0); + } + + #[test] + fn test_telemetry_policy_me_silent_blocks_me_counters() { + let stats = Stats::new(); + stats.apply_telemetry_policy(TelemetryPolicy { + core_enabled: true, + user_enabled: true, + me_level: MeTelemetryLevel::Silent, + }); + + stats.increment_me_crc_mismatch(); + stats.increment_me_keepalive_sent(); + stats.increment_me_route_drop_queue_full(); + assert_eq!(stats.get_me_crc_mismatch(), 0); + assert_eq!(stats.get_me_keepalive_sent(), 0); + assert_eq!(stats.get_me_route_drop_queue_full(), 0); + } #[test] fn test_replay_checker_basic() { diff --git a/src/stats/telemetry.rs b/src/stats/telemetry.rs new file mode 100644 index 0000000..e29fa44 --- /dev/null +++ b/src/stats/telemetry.rs @@ -0,0 +1,29 @@ +use crate::config::{MeTelemetryLevel, TelemetryConfig}; + +/// Runtime telemetry policy used by hot-path counters. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct TelemetryPolicy { + pub core_enabled: bool, + pub user_enabled: bool, + pub me_level: MeTelemetryLevel, +} + +impl Default for TelemetryPolicy { + fn default() -> Self { + Self { + core_enabled: true, + user_enabled: true, + me_level: MeTelemetryLevel::Normal, + } + } +} + +impl TelemetryPolicy { + pub fn from_config(cfg: &TelemetryConfig) -> Self { + Self { + core_enabled: cfg.core_enabled, + user_enabled: cfg.user_enabled, + me_level: cfg.me_level, + } + } +} diff --git a/src/transport/middle_proxy/handshake.rs b/src/transport/middle_proxy/handshake.rs index 988834a..384ecc9 100644 --- a/src/transport/middle_proxy/handshake.rs +++ b/src/transport/middle_proxy/handshake.rs @@ -14,6 +14,7 @@ use tokio::net::{TcpStream, TcpSocket}; use tokio::time::timeout; use tracing::{debug, info, warn}; +use crate::config::MeSocksKdfPolicy; use crate::crypto::{SecureRandom, build_middleproxy_prekey, derive_middleproxy_keys, sha256}; use crate::error::{ProxyError, Result}; use crate::network::IpFamily; @@ -117,6 +118,13 @@ impl MePool { Some(bound) } + fn is_socks_route(upstream_egress: Option) -> bool { + matches!( + upstream_egress.map(|info| info.route_kind), + Some(UpstreamRouteKind::Socks4 | UpstreamRouteKind::Socks5) + ) + } + /// TCP connect with timeout + return RTT in milliseconds. pub(crate) async fn connect_tcp( &self, @@ -125,14 +133,7 @@ impl MePool { let start = Instant::now(); let (stream, upstream_egress) = if let Some(upstream) = &self.upstream { let dc_idx = self.resolve_dc_idx_for_endpoint(addr).await; - let (stream, egress) = timeout( - Duration::from_secs(ME_CONNECT_TIMEOUT_SECS), - upstream.connect_with_details(addr, dc_idx, None), - ) - .await - .map_err(|_| ProxyError::ConnectionTimeout { - addr: addr.to_string(), - })??; + let (stream, egress) = upstream.connect_with_details(addr, dc_idx, None).await?; (stream, Some(egress)) } else { let connect_fut = async { @@ -226,9 +227,29 @@ impl MePool { } else { IpFamily::V6 }; + let is_socks_route = Self::is_socks_route(upstream_egress); let socks_bound_addr = Self::select_socks_bound_addr(family, upstream_egress); let reflected = if let Some(bound) = socks_bound_addr { Some(bound) + } else if is_socks_route { + match self.socks_kdf_policy() { + MeSocksKdfPolicy::Strict => { + self.stats.increment_me_socks_kdf_strict_reject(); + return Err(ProxyError::InvalidHandshake( + "SOCKS route returned no valid BND.ADDR for ME KDF (strict policy)" + .to_string(), + )); + } + MeSocksKdfPolicy::Compat => { + self.stats.increment_me_socks_kdf_compat_fallback(); + if self.nat_probe { + let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress); + self.maybe_reflect_public_addr(family, bind_ip).await + } else { + None + } + } + } } else if self.nat_probe { let bind_ip = Self::direct_bind_ip_for_stun(family, upstream_egress); self.maybe_reflect_public_addr(family, bind_ip).await diff --git a/src/transport/middle_proxy/ping.rs b/src/transport/middle_proxy/ping.rs index e90d98f..a05e44d 100644 --- a/src/transport/middle_proxy/ping.rs +++ b/src/transport/middle_proxy/ping.rs @@ -7,6 +7,7 @@ use tokio::net::UdpSocket; use crate::config::{UpstreamConfig, UpstreamType}; use crate::crypto::SecureRandom; use crate::error::ProxyError; +use crate::transport::{UpstreamEgressInfo, UpstreamRouteKind}; use super::MePool; @@ -20,6 +21,7 @@ pub enum MePingFamily { pub struct MePingSample { pub dc: i32, pub addr: SocketAddr, + pub route: Option, pub connect_ms: Option, pub handshake_ms: Option, pub error: Option, @@ -84,6 +86,34 @@ fn pick_target_for_family(reports: &[MePingReport], family: MePingFamily) -> Opt }) } +fn route_from_egress(egress: Option) -> Option { + let info = egress?; + match info.route_kind { + UpstreamRouteKind::Direct => { + let src_ip = info + .direct_bind_ip + .or_else(|| info.local_addr.map(|addr| addr.ip())); + let ip = src_ip?; + let mut parts = Vec::new(); + if let Some(dev) = detect_interface_for_ip(ip) { + parts.push(format!("dev={dev}")); + } + parts.push(format!("src={ip}")); + Some(format!("direct {}", parts.join(" "))) + } + UpstreamRouteKind::Socks4 => Some( + info.socks_bound_addr + .map(|addr| format!("socks4 bnd={addr}")) + .unwrap_or_else(|| "socks4".to_string()), + ), + UpstreamRouteKind::Socks5 => Some( + info.socks_bound_addr + .map(|addr| format!("socks5 bnd={addr}")) + .unwrap_or_else(|| "socks5".to_string()), + ), + } +} + #[cfg(unix)] fn detect_interface_for_ip(ip: IpAddr) -> Option { use nix::ifaddrs::getifaddrs; @@ -160,6 +190,15 @@ pub async fn format_me_route( v4_ok: bool, v6_ok: bool, ) -> String { + if let Some(route) = reports + .iter() + .flat_map(|report| report.samples.iter()) + .find(|sample| sample.error.is_none() && sample.handshake_ms.is_some()) + .and_then(|sample| sample.route.clone()) + { + return route; + } + let enabled_upstreams: Vec<_> = upstreams.iter().filter(|u| u.enabled).collect(); if enabled_upstreams.is_empty() { return detect_direct_route_details(reports, prefer_ipv6, v4_ok, v6_ok) @@ -222,6 +261,7 @@ mod tests { let s = sample(MePingSample { dc: 4, addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(1, 2, 3, 4)), 8888), + route: Some("direct src=1.2.3.4".to_string()), connect_ms: Some(12.3), handshake_ms: Some(34.7), error: None, @@ -238,6 +278,7 @@ mod tests { let s = sample(MePingSample { dc: -5, addr: SocketAddr::new(IpAddr::V4(Ipv4Addr::new(5, 6, 7, 8)), 80), + route: Some("socks5".to_string()), connect_ms: Some(10.0), handshake_ms: None, error: Some("handshake timeout".to_string()), @@ -278,10 +319,12 @@ pub async fn run_me_ping(pool: &Arc, rng: &SecureRandom) -> Vec { connect_ms = Some(conn_rtt); + route = route_from_egress(upstream_egress); match pool.handshake_only(stream, addr, upstream_egress, rng).await { Ok(hs) => { handshake_ms = Some(hs.handshake_ms); @@ -302,6 +345,7 @@ pub async fn run_me_ping(pool: &Arc, rng: &SecureRandom) -> Vec Arc { + let registry = Arc::new(ConnRegistry::new()); + registry.update_route_backpressure_policy( + me_route_backpressure_base_timeout_ms, + me_route_backpressure_high_timeout_ms, + me_route_backpressure_high_watermark_pct, + ); Arc::new(Self { - registry: Arc::new(ConnRegistry::new()), + registry, writers: Arc::new(RwLock::new(Vec::new())), rr: AtomicU64::new(0), decision, @@ -204,6 +216,7 @@ impl MePool { me_hardswap_warmup_pass_backoff_base_ms: AtomicU64::new( me_hardswap_warmup_pass_backoff_base_ms, ), + me_socks_kdf_policy: AtomicU8::new(me_socks_kdf_policy.as_u8()), }) } @@ -260,6 +273,26 @@ impl MePool { &self.registry } + pub fn update_runtime_transport_policy( + &self, + socks_kdf_policy: MeSocksKdfPolicy, + route_backpressure_base_timeout_ms: u64, + route_backpressure_high_timeout_ms: u64, + route_backpressure_high_watermark_pct: u8, + ) { + self.me_socks_kdf_policy + .store(socks_kdf_policy.as_u8(), Ordering::Relaxed); + self.registry.update_route_backpressure_policy( + route_backpressure_base_timeout_ms, + route_backpressure_high_timeout_ms, + route_backpressure_high_watermark_pct, + ); + } + + pub(super) fn socks_kdf_policy(&self) -> MeSocksKdfPolicy { + MeSocksKdfPolicy::from_u8(self.me_socks_kdf_policy.load(Ordering::Relaxed)) + } + pub(super) fn writers_arc(&self) -> Arc>> { self.writers.clone() } diff --git a/src/transport/middle_proxy/reader.rs b/src/transport/middle_proxy/reader.rs index 95bd0d8..ea0dd75 100644 --- a/src/transport/middle_proxy/reader.rs +++ b/src/transport/middle_proxy/reader.rs @@ -124,7 +124,14 @@ pub(crate) async fn reader_loop( match routed { RouteResult::NoConn => stats.increment_me_route_drop_no_conn(), RouteResult::ChannelClosed => stats.increment_me_route_drop_channel_closed(), - RouteResult::QueueFull => stats.increment_me_route_drop_queue_full(), + RouteResult::QueueFullBase => { + stats.increment_me_route_drop_queue_full(); + stats.increment_me_route_drop_queue_full_base(); + } + RouteResult::QueueFullHigh => { + stats.increment_me_route_drop_queue_full(); + stats.increment_me_route_drop_queue_full_high(); + } RouteResult::Routed => {} } reg.unregister(cid).await; @@ -140,7 +147,14 @@ pub(crate) async fn reader_loop( match routed { RouteResult::NoConn => stats.increment_me_route_drop_no_conn(), RouteResult::ChannelClosed => stats.increment_me_route_drop_channel_closed(), - RouteResult::QueueFull => stats.increment_me_route_drop_queue_full(), + RouteResult::QueueFullBase => { + stats.increment_me_route_drop_queue_full(); + stats.increment_me_route_drop_queue_full_base(); + } + RouteResult::QueueFullHigh => { + stats.increment_me_route_drop_queue_full(); + stats.increment_me_route_drop_queue_full_high(); + } RouteResult::Routed => {} } reg.unregister(cid).await; diff --git a/src/transport/middle_proxy/registry.rs b/src/transport/middle_proxy/registry.rs index 2122ed8..223fa71 100644 --- a/src/transport/middle_proxy/registry.rs +++ b/src/transport/middle_proxy/registry.rs @@ -1,6 +1,6 @@ use std::collections::{HashMap, HashSet}; use std::net::SocketAddr; -use std::sync::atomic::{AtomicU64, Ordering}; +use std::sync::atomic::{AtomicU8, AtomicU64, Ordering}; use std::time::Duration; use tokio::sync::{mpsc, RwLock}; @@ -10,14 +10,17 @@ use super::codec::WriterCommand; use super::MeResponse; const ROUTE_CHANNEL_CAPACITY: usize = 4096; -const ROUTE_BACKPRESSURE_TIMEOUT: Duration = Duration::from_millis(25); +const ROUTE_BACKPRESSURE_BASE_TIMEOUT_MS: u64 = 25; +const ROUTE_BACKPRESSURE_HIGH_TIMEOUT_MS: u64 = 120; +const ROUTE_BACKPRESSURE_HIGH_WATERMARK_PCT: u8 = 80; #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum RouteResult { Routed, NoConn, ChannelClosed, - QueueFull, + QueueFullBase, + QueueFullHigh, } #[derive(Clone)] @@ -65,6 +68,9 @@ impl RegistryInner { pub struct ConnRegistry { inner: RwLock, next_id: AtomicU64, + route_backpressure_base_timeout_ms: AtomicU64, + route_backpressure_high_timeout_ms: AtomicU64, + route_backpressure_high_watermark_pct: AtomicU8, } impl ConnRegistry { @@ -73,9 +79,35 @@ impl ConnRegistry { Self { inner: RwLock::new(RegistryInner::new()), next_id: AtomicU64::new(start), + route_backpressure_base_timeout_ms: AtomicU64::new( + ROUTE_BACKPRESSURE_BASE_TIMEOUT_MS, + ), + route_backpressure_high_timeout_ms: AtomicU64::new( + ROUTE_BACKPRESSURE_HIGH_TIMEOUT_MS, + ), + route_backpressure_high_watermark_pct: AtomicU8::new( + ROUTE_BACKPRESSURE_HIGH_WATERMARK_PCT, + ), } } + pub fn update_route_backpressure_policy( + &self, + base_timeout_ms: u64, + high_timeout_ms: u64, + high_watermark_pct: u8, + ) { + let base = base_timeout_ms.max(1); + let high = high_timeout_ms.max(base); + let watermark = high_watermark_pct.clamp(1, 100); + self.route_backpressure_base_timeout_ms + .store(base, Ordering::Relaxed); + self.route_backpressure_high_timeout_ms + .store(high, Ordering::Relaxed); + self.route_backpressure_high_watermark_pct + .store(watermark, Ordering::Relaxed); + } + pub async fn register(&self) -> (u64, mpsc::Receiver) { let id = self.next_id.fetch_add(1, Ordering::Relaxed); let (tx, rx) = mpsc::channel(ROUTE_CHANNEL_CAPACITY); @@ -112,10 +144,40 @@ impl ConnRegistry { Err(TrySendError::Closed(_)) => RouteResult::ChannelClosed, Err(TrySendError::Full(resp)) => { // Absorb short bursts without dropping/closing the session immediately. - match tokio::time::timeout(ROUTE_BACKPRESSURE_TIMEOUT, tx.send(resp)).await { + let base_timeout_ms = + self.route_backpressure_base_timeout_ms.load(Ordering::Relaxed).max(1); + let high_timeout_ms = self + .route_backpressure_high_timeout_ms + .load(Ordering::Relaxed) + .max(base_timeout_ms); + let high_watermark_pct = self + .route_backpressure_high_watermark_pct + .load(Ordering::Relaxed) + .clamp(1, 100); + let used = ROUTE_CHANNEL_CAPACITY.saturating_sub(tx.capacity()); + let used_pct = if ROUTE_CHANNEL_CAPACITY == 0 { + 100 + } else { + (used.saturating_mul(100) / ROUTE_CHANNEL_CAPACITY) as u8 + }; + let high_profile = used_pct >= high_watermark_pct; + let timeout_ms = if high_profile { + high_timeout_ms + } else { + base_timeout_ms + }; + let timeout_dur = Duration::from_millis(timeout_ms); + + match tokio::time::timeout(timeout_dur, tx.send(resp)).await { Ok(Ok(())) => RouteResult::Routed, Ok(Err(_)) => RouteResult::ChannelClosed, - Err(_) => RouteResult::QueueFull, + Err(_) => { + if high_profile { + RouteResult::QueueFullHigh + } else { + RouteResult::QueueFullBase + } + } } } } From 6c12af2b941f7d66deb0c2879f5c932ca6b29cf0 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 13:38:30 +0300 Subject: [PATCH 86/98] ME Connectivity: socks-url Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/transport/middle_proxy/ping.rs | 30 ++++++++++++++++++++---------- src/transport/upstream.rs | 6 ++++++ 2 files changed, 26 insertions(+), 10 deletions(-) diff --git a/src/transport/middle_proxy/ping.rs b/src/transport/middle_proxy/ping.rs index a05e44d..b9f0836 100644 --- a/src/transport/middle_proxy/ping.rs +++ b/src/transport/middle_proxy/ping.rs @@ -101,16 +101,26 @@ fn route_from_egress(egress: Option) -> Option { parts.push(format!("src={ip}")); Some(format!("direct {}", parts.join(" "))) } - UpstreamRouteKind::Socks4 => Some( - info.socks_bound_addr - .map(|addr| format!("socks4 bnd={addr}")) - .unwrap_or_else(|| "socks4".to_string()), - ), - UpstreamRouteKind::Socks5 => Some( - info.socks_bound_addr - .map(|addr| format!("socks5 bnd={addr}")) - .unwrap_or_else(|| "socks5".to_string()), - ), + UpstreamRouteKind::Socks4 => { + let route = info + .socks_proxy_addr + .map(|addr| format!("socks4://{addr}")) + .unwrap_or_else(|| "socks4://unknown".to_string()); + Some(match info.socks_bound_addr { + Some(bound) => format!("{route} bnd={bound}"), + None => route, + }) + } + UpstreamRouteKind::Socks5 => { + let route = info + .socks_proxy_addr + .map(|addr| format!("socks5://{addr}")) + .unwrap_or_else(|| "socks5://unknown".to_string()); + Some(match info.socks_bound_addr { + Some(bound) => format!("{route} bnd={bound}"), + None => route, + }) + } } } diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index 5ab198c..1918fdc 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -164,6 +164,7 @@ pub struct UpstreamEgressInfo { pub local_addr: Option, pub direct_bind_ip: Option, pub socks_bound_addr: Option, + pub socks_proxy_addr: Option, } // ============= Upstream Manager ============= @@ -523,6 +524,7 @@ impl UpstreamManager { local_addr, direct_bind_ip: bind_ip, socks_bound_addr: None, + socks_proxy_addr: None, }, )) }, @@ -593,6 +595,7 @@ impl UpstreamManager { } }; let local_addr = stream.local_addr().ok(); + let socks_proxy_addr = stream.peer_addr().ok(); Ok(( stream, UpstreamEgressInfo { @@ -600,6 +603,7 @@ impl UpstreamManager { local_addr, direct_bind_ip: None, socks_bound_addr: Some(bound.addr), + socks_proxy_addr, }, )) }, @@ -672,6 +676,7 @@ impl UpstreamManager { } }; let local_addr = stream.local_addr().ok(); + let socks_proxy_addr = stream.peer_addr().ok(); Ok(( stream, UpstreamEgressInfo { @@ -679,6 +684,7 @@ impl UpstreamManager { local_addr, direct_bind_ip: None, socks_bound_addr: Some(bound.addr), + socks_proxy_addr, }, )) }, From 9afaa28add0df19e40e196bb6e08583ad93e4ed3 Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 14:21:09 +0300 Subject: [PATCH 87/98] UpstreamManager: Backoff Retries --- src/config/defaults.rs | 14 +++++ src/config/hot_reload.rs | 8 +++ src/config/load.rs | 76 +++++++++++++++++++++++ src/config/types.rs | 15 +++++ src/main.rs | 7 ++- src/transport/upstream.rs | 127 ++++++++++++++++++++++++++++---------- 6 files changed, 214 insertions(+), 33 deletions(-) diff --git a/src/config/defaults.rs b/src/config/defaults.rs index dbc251c..ab087fd 100644 --- a/src/config/defaults.rs +++ b/src/config/defaults.rs @@ -8,6 +8,8 @@ const DEFAULT_STUN_TCP_FALLBACK: bool = true; const DEFAULT_MIDDLE_PROXY_WARM_STANDBY: usize = 16; const DEFAULT_ME_RECONNECT_MAX_CONCURRENT_PER_DC: u32 = 8; const DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT: u32 = 16; +const DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS: u32 = 3; +const DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD: u32 = 4; const DEFAULT_LISTEN_ADDR_IPV6: &str = "::"; const DEFAULT_ACCESS_USER: &str = "default"; const DEFAULT_ACCESS_SECRET: &str = "00000000000000000000000000000000"; @@ -158,6 +160,18 @@ pub(crate) fn default_me_reconnect_fast_retry_count() -> u32 { DEFAULT_ME_RECONNECT_FAST_RETRY_COUNT } +pub(crate) fn default_upstream_connect_retry_attempts() -> u32 { + DEFAULT_UPSTREAM_CONNECT_RETRY_ATTEMPTS +} + +pub(crate) fn default_upstream_connect_retry_backoff_ms() -> u64 { + 250 +} + +pub(crate) fn default_upstream_unhealthy_fail_threshold() -> u32 { + DEFAULT_UPSTREAM_UNHEALTHY_FAIL_THRESHOLD +} + pub(crate) fn default_crypto_pending_buffer() -> usize { 256 * 1024 } diff --git a/src/config/hot_reload.rs b/src/config/hot_reload.rs index 579a9cb..eec6b8c 100644 --- a/src/config/hot_reload.rs +++ b/src/config/hot_reload.rs @@ -117,6 +117,14 @@ fn warn_non_hot_changes(old: &ProxyConfig, new: &ProxyConfig) { if old.general.stun_nat_probe_concurrency != new.general.stun_nat_probe_concurrency { warn!("config reload: general.stun_nat_probe_concurrency changed; restart required"); } + if old.general.upstream_connect_retry_attempts != new.general.upstream_connect_retry_attempts + || old.general.upstream_connect_retry_backoff_ms + != new.general.upstream_connect_retry_backoff_ms + || old.general.upstream_unhealthy_fail_threshold + != new.general.upstream_unhealthy_fail_threshold + { + warn!("config reload: general.upstream_* changed; restart required"); + } } /// Resolve the public host for link generation — mirrors the logic in main.rs. diff --git a/src/config/load.rs b/src/config/load.rs index 7c578a3..3aafda2 100644 --- a/src/config/load.rs +++ b/src/config/load.rs @@ -237,6 +237,18 @@ impl ProxyConfig { )); } + if config.general.upstream_connect_retry_attempts == 0 { + return Err(ProxyError::Config( + "general.upstream_connect_retry_attempts must be > 0".to_string(), + )); + } + + if config.general.upstream_unhealthy_fail_threshold == 0 { + return Err(ProxyError::Config( + "general.upstream_unhealthy_fail_threshold must be > 0".to_string(), + )); + } + if config.general.me_reinit_every_secs == 0 { return Err(ProxyError::Config( "general.me_reinit_every_secs must be > 0".to_string(), @@ -567,6 +579,18 @@ mod tests { cfg.general.me_reconnect_fast_retry_count, default_me_reconnect_fast_retry_count() ); + assert_eq!( + cfg.general.upstream_connect_retry_attempts, + default_upstream_connect_retry_attempts() + ); + assert_eq!( + cfg.general.upstream_connect_retry_backoff_ms, + default_upstream_connect_retry_backoff_ms() + ); + assert_eq!( + cfg.general.upstream_unhealthy_fail_threshold, + default_upstream_unhealthy_fail_threshold() + ); assert_eq!(cfg.general.update_every, default_update_every()); assert_eq!(cfg.server.listen_addr_ipv4, default_listen_addr_ipv4()); assert_eq!(cfg.server.listen_addr_ipv6, default_listen_addr_ipv6_opt()); @@ -593,6 +617,18 @@ mod tests { general.me_reconnect_fast_retry_count, default_me_reconnect_fast_retry_count() ); + assert_eq!( + general.upstream_connect_retry_attempts, + default_upstream_connect_retry_attempts() + ); + assert_eq!( + general.upstream_connect_retry_backoff_ms, + default_upstream_connect_retry_backoff_ms() + ); + assert_eq!( + general.upstream_unhealthy_fail_threshold, + default_upstream_unhealthy_fail_threshold() + ); assert_eq!(general.update_every, default_update_every()); let server = ServerConfig::default(); @@ -765,6 +801,46 @@ mod tests { let _ = std::fs::remove_file(path); } + #[test] + fn upstream_connect_retry_attempts_zero_is_rejected() { + let toml = r#" + [general] + upstream_connect_retry_attempts = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_upstream_connect_retry_attempts_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.upstream_connect_retry_attempts must be > 0")); + let _ = std::fs::remove_file(path); + } + + #[test] + fn upstream_unhealthy_fail_threshold_zero_is_rejected() { + let toml = r#" + [general] + upstream_unhealthy_fail_threshold = 0 + + [censorship] + tls_domain = "example.com" + + [access.users] + user = "00000000000000000000000000000000" + "#; + let dir = std::env::temp_dir(); + let path = dir.join("telemt_upstream_unhealthy_fail_threshold_zero_test.toml"); + std::fs::write(&path, toml).unwrap(); + let err = ProxyConfig::load(&path).unwrap_err().to_string(); + assert!(err.contains("general.upstream_unhealthy_fail_threshold must be > 0")); + let _ = std::fs::remove_file(path); + } + #[test] fn me_hardswap_warmup_defaults_are_set() { let toml = r#" diff --git a/src/config/types.rs b/src/config/types.rs index 902d816..7a3f6e9 100644 --- a/src/config/types.rs +++ b/src/config/types.rs @@ -365,6 +365,18 @@ pub struct GeneralConfig { #[serde(default = "default_me_reconnect_fast_retry_count")] pub me_reconnect_fast_retry_count: u32, + /// Connect attempts for the selected upstream before returning error/fallback. + #[serde(default = "default_upstream_connect_retry_attempts")] + pub upstream_connect_retry_attempts: u32, + + /// Delay in milliseconds between upstream connect attempts. + #[serde(default = "default_upstream_connect_retry_backoff_ms")] + pub upstream_connect_retry_backoff_ms: u64, + + /// Consecutive failed requests before upstream is marked unhealthy. + #[serde(default = "default_upstream_unhealthy_fail_threshold")] + pub upstream_unhealthy_fail_threshold: u32, + /// Ignore STUN/interface IP mismatch (keep using Middle Proxy even if NAT detected). #[serde(default)] pub stun_iface_mismatch_ignore: bool, @@ -522,6 +534,9 @@ impl Default for GeneralConfig { me_reconnect_backoff_base_ms: default_reconnect_backoff_base_ms(), me_reconnect_backoff_cap_ms: default_reconnect_backoff_cap_ms(), me_reconnect_fast_retry_count: default_me_reconnect_fast_retry_count(), + upstream_connect_retry_attempts: default_upstream_connect_retry_attempts(), + upstream_connect_retry_backoff_ms: default_upstream_connect_retry_backoff_ms(), + upstream_unhealthy_fail_threshold: default_upstream_unhealthy_fail_threshold(), stun_iface_mismatch_ignore: false, unknown_dc_log_path: default_unknown_dc_log_path(), log_level: LogLevel::Normal, diff --git a/src/main.rs b/src/main.rs index 4d4d3f5..a87dd99 100644 --- a/src/main.rs +++ b/src/main.rs @@ -261,7 +261,12 @@ async fn main() -> std::result::Result<(), Box> { warn!("Using default tls_domain. Consider setting a custom domain."); } - let upstream_manager = Arc::new(UpstreamManager::new(config.upstreams.clone())); + let upstream_manager = Arc::new(UpstreamManager::new( + config.upstreams.clone(), + config.general.upstream_connect_retry_attempts, + config.general.upstream_connect_retry_backoff_ms, + config.general.upstream_unhealthy_fail_threshold, + )); let mut tls_domains = Vec::with_capacity(1 + config.censorship.tls_domains.len()); tls_domains.push(config.censorship.tls_domain.clone()); diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index 1918fdc..8411f5a 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -172,10 +172,18 @@ pub struct UpstreamEgressInfo { #[derive(Clone)] pub struct UpstreamManager { upstreams: Arc>>, + connect_retry_attempts: u32, + connect_retry_backoff: Duration, + unhealthy_fail_threshold: u32, } impl UpstreamManager { - pub fn new(configs: Vec) -> Self { + pub fn new( + configs: Vec, + connect_retry_attempts: u32, + connect_retry_backoff_ms: u64, + unhealthy_fail_threshold: u32, + ) -> Self { let states = configs.into_iter() .filter(|c| c.enabled) .map(UpstreamState::new) @@ -183,6 +191,9 @@ impl UpstreamManager { Self { upstreams: Arc::new(RwLock::new(states)), + connect_retry_attempts: connect_retry_attempts.max(1), + connect_retry_backoff: Duration::from_millis(connect_retry_backoff_ms), + unhealthy_fail_threshold: unhealthy_fail_threshold.max(1), } } @@ -430,43 +441,83 @@ impl UpstreamManager { upstream.selected_scope = s.to_string(); } - let start = Instant::now(); - let bind_rr = { let guard = self.upstreams.read().await; guard.get(idx).map(|u| u.bind_rr.clone()) }; - match self.connect_via_upstream(&upstream, target, bind_rr).await { - Ok((stream, egress)) => { - let rtt_ms = start.elapsed().as_secs_f64() * 1000.0; - let mut guard = self.upstreams.write().await; - if let Some(u) = guard.get_mut(idx) { - if !u.healthy { - debug!(rtt_ms = format!("{:.1}", rtt_ms), "Upstream recovered"); - } - u.healthy = true; - u.fails = 0; + let mut last_error: Option = None; + for attempt in 1..=self.connect_retry_attempts { + let start = Instant::now(); + match self + .connect_via_upstream(&upstream, target, bind_rr.clone()) + .await + { + Ok((stream, egress)) => { + let rtt_ms = start.elapsed().as_secs_f64() * 1000.0; + let mut guard = self.upstreams.write().await; + if let Some(u) = guard.get_mut(idx) { + if !u.healthy { + debug!(rtt_ms = format!("{:.1}", rtt_ms), "Upstream recovered"); + } + if attempt > 1 { + debug!( + attempt, + attempts = self.connect_retry_attempts, + rtt_ms = format!("{:.1}", rtt_ms), + "Upstream connect recovered after retry" + ); + } + u.healthy = true; + u.fails = 0; - if let Some(di) = dc_idx.and_then(UpstreamState::dc_array_idx) { - u.dc_latency[di].update(rtt_ms); + if let Some(di) = dc_idx.and_then(UpstreamState::dc_array_idx) { + u.dc_latency[di].update(rtt_ms); + } } + return Ok((stream, egress)); } - Ok((stream, egress)) - }, - Err(e) => { - let mut guard = self.upstreams.write().await; - if let Some(u) = guard.get_mut(idx) { - u.fails += 1; - warn!(fails = u.fails, "Upstream failed: {}", e); - if u.fails > 3 { - u.healthy = false; - warn!("Upstream marked unhealthy"); + Err(e) => { + if attempt < self.connect_retry_attempts { + debug!( + attempt, + attempts = self.connect_retry_attempts, + target = %target, + error = %e, + "Upstream connect attempt failed, retrying" + ); + if !self.connect_retry_backoff.is_zero() { + tokio::time::sleep(self.connect_retry_backoff).await; + } } + last_error = Some(e); } - Err(e) } } + + let error = last_error.unwrap_or_else(|| { + ProxyError::Config("Upstream connect attempts exhausted".to_string()) + }); + + let mut guard = self.upstreams.write().await; + if let Some(u) = guard.get_mut(idx) { + u.fails += 1; + warn!( + fails = u.fails, + attempts = self.connect_retry_attempts, + "Upstream failed after retries: {}", + error + ); + if u.fails >= self.unhealthy_fail_threshold { + u.healthy = false; + warn!( + fails = u.fails, + threshold = self.unhealthy_fail_threshold, + "Upstream marked unhealthy" + ); + } + } + Err(error) } async fn connect_via_upstream( @@ -1035,18 +1086,26 @@ impl UpstreamManager { u.fails += 1; debug!(dc = dc_zero_idx + 1, fails = u.fails, "Health check failed (both): {}", e); - if u.fails > 3 { + if u.fails >= self.unhealthy_fail_threshold { u.healthy = false; - warn!("Upstream unhealthy (fails)"); + warn!( + fails = u.fails, + threshold = self.unhealthy_fail_threshold, + "Upstream unhealthy (fails)" + ); } } Err(_) => { u.fails += 1; debug!(dc = dc_zero_idx + 1, fails = u.fails, "Health check timeout (both)"); - if u.fails > 3 { + if u.fails >= self.unhealthy_fail_threshold { u.healthy = false; - warn!("Upstream unhealthy (timeout)"); + warn!( + fails = u.fails, + threshold = self.unhealthy_fail_threshold, + "Upstream unhealthy (timeout)" + ); } } } @@ -1057,9 +1116,13 @@ impl UpstreamManager { let mut guard = self.upstreams.write().await; let u = &mut guard[i]; u.fails += 1; - if u.fails > 3 { + if u.fails >= self.unhealthy_fail_threshold { u.healthy = false; - warn!("Upstream unhealthy (no fallback family)"); + warn!( + fails = u.fails, + threshold = self.unhealthy_fail_threshold, + "Upstream unhealthy (no fallback family)" + ); } u.last_check = std::time::Instant::now(); } From 12e68f805f364e4170ff2d8a41ef0b3e12200c8b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sat, 28 Feb 2026 15:51:15 +0300 Subject: [PATCH 88/98] Update Cargo.toml --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 1d135f3..5fce3d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "telemt" -version = "3.1.2" +version = "3.1.3" edition = "2024" [dependencies] From 9ce26d16cb14e6309120e3c16641326a4995449d Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Sat, 28 Feb 2026 17:04:06 +0300 Subject: [PATCH 89/98] Add files via upload --- install.sh | 72 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 72 insertions(+) create mode 100644 install.sh diff --git a/install.sh b/install.sh new file mode 100644 index 0000000..6649ef1 --- /dev/null +++ b/install.sh @@ -0,0 +1,72 @@ +sudo bash -c ' +set -e + +# --- Проверка на существующую установку --- +if systemctl list-unit-files | grep -q telemt.service; then + # --- РЕЖИМ ОБНОВЛЕНИЯ --- + echo "--- Обнаружена существующая установка Telemt. Запускаю обновление... ---" + + echo "[*] Остановка службы telemt..." + systemctl stop telemt || true # Игнорируем ошибку, если служба уже остановлена + + echo "[1/2] Скачивание последней версии Telemt..." + wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz + + echo "[1/2] Замена исполняемого файла в /usr/local/bin..." + mv telemt /usr/local/bin/telemt + chmod +x /usr/local/bin/telemt + + echo "[2/2] Запуск службы..." + systemctl start telemt + + echo "--- Обновление Telemt успешно завершено! ---" + echo + echo "Для проверки статуса службы выполните:" + echo " systemctl status telemt" + +else + # --- РЕЖИМ НОВОЙ УСТАНОВКИ --- + echo "--- Начало автоматической установки Telemt ---" + + # Шаг 1: Скачивание и установка бинарного файла + echo "[1/5] Скачивание последней версии Telemt..." + wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz + + echo "[1/5] Перемещение исполняемого файла в /usr/local/bin и установка прав..." + mv telemt /usr/local/bin/telemt + chmod +x /usr/local/bin/telemt + + # Шаг 2: Генерация секрета + echo "[2/5] Генерация секретного ключа..." + SECRET=$(openssl rand -hex 16) + + # Шаг 3: Создание файла конфигурации + echo "[3/5] Создание файла конфигурации /etc/telemt.toml..." + printf "# === General Settings ===\n[general]\n[general.modes]\nclassic = false\nsecure = false\ntls = true\n\n# === Anti-Censorship & Masking ===\n[censorship]\n# !!! ВАЖНО: Замените на ваш домен или домен, который вы хотите использовать для маскировки !!!\ntls_domain = \"petrovich.ru\"\n\n[access.users]\nhello = \"%s\"\n" "$SECRET" > /etc/telemt.toml + + # Шаг 4: Создание службы Systemd + echo "[4/5] Создание службы systemd..." + printf "[Unit]\nDescription=Telemt Proxy\nAfter=network.target\n\n[Service]\nType=simple\nExecStart=/usr/local/bin/telemt /etc/telemt.toml\nRestart=on-failure\nRestartSec=5\nLimitNOFILE=65536\n\n[Install]\nWantedBy=multi-user.target\n" > /etc/systemd/system/telemt.service + + # Шаг 5: Запуск службы + echo "[5/5] Перезагрузка systemd, запуск и включение службы telemt..." + systemctl daemon-reload + systemctl start telemt + systemctl enable telemt + + echo "--- Установка и запуск Telemt успешно завершены! ---" + echo + echo "ВАЖНАЯ ИНФОРМАЦИЯ:" + echo "===================" + echo "1. Вам НЕОБХОДИМО отредактировать файл /etc/telemt.toml и заменить '\''petrovich.ru'\'' на другой домен" + echo " с команды: nano /etc/telemt.toml" + echo " После редактирования файла, перезапустите службу командой:" + echo " sudo systemctl restart telemt" + echo + echo "2. Для проверки статуса службы выполните:" + echo " systemctl status telemt" + echo + echo "3. Для получения ссылок на подключение выполните команду:" + echo " journalctl -u telemt -n -g '\''links'\'' --no-pager -o cat | tac" +fi +' From ccacf78890c9333a938d375d5e0cadad6ff83a33 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Sat, 28 Feb 2026 17:17:50 +0300 Subject: [PATCH 90/98] Create QUICK_START_GUIDE.ru.md --- docs/QUICK_START_GUIDE.ru.md | 150 +++++++++++++++++++++++++++++++++++ 1 file changed, 150 insertions(+) create mode 100644 docs/QUICK_START_GUIDE.ru.md diff --git a/docs/QUICK_START_GUIDE.ru.md b/docs/QUICK_START_GUIDE.ru.md new file mode 100644 index 0000000..d057548 --- /dev/null +++ b/docs/QUICK_START_GUIDE.ru.md @@ -0,0 +1,150 @@ +# Telemt через Systemd + +## Установка + +Это программное обеспечение разработано для ОС на базе Debian: помимо Debian, это Ubuntu, Mint, Kali, MX и многие другие Linux + +**1. Скачать** +```bash +wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz +``` +**2. Переместить в папку Bin** +```bash +mv telemt /bin +``` +**3. Сделать файл исполняемым** +```bash +chmod +x /bin/telemt +``` + +## Как правильно использовать? + +**Эта инструкция "предполагает", что вы:** +- Авторизовались как пользователь root или выполнил `su -` / `sudo su` +- У вас уже есть исполняемый файл "telemt" в папке /bin. Читайте раздел **[Установка](#установка)** + +--- + +**0. Проверьте порт и сгенерируйте секреты** + +Порт, который вы выбрали для использования, должен отсутствовать в списке: +```bash +netstat -lnp +``` + +Сгенерируйте 16 bytes/32 символа в шестнадцатеричном формате с помощью OpenSSL или другим способом: +```bash +openssl rand -hex 16 +``` +ИЛИ +```bash +xxd -l 16 -p /dev/urandom +``` +ИЛИ +```bash +python3 -c 'import os; print(os.urandom(16).hex())' +``` +Полученный результат сохраняем где-нибудь. Он понадобиться вам дальше! + +--- + +**1. Поместите свою конфигурацию в файл /etc/telemt.toml** + +Открываем nano +```bash +nano /etc/telemt.toml +``` +Вставьте свою конфигурацию + +```toml +# === General Settings === +[general] +# ad_tag = "00000000000000000000000000000000" + +[general.modes] +classic = false +secure = false +tls = true + +# === Anti-Censorship & Masking === +[censorship] +tls_domain = "petrovich.ru" + +[access.users] +# format: "username" = "32_hex_chars_secret" +hello = "00000000000000000000000000000000" +``` +> [!WARNING] +> Замените значение параметра hello на значение, которое вы получили в пункте 0. +> Так же замените значение параметра tls_domain на другой сайт. + +--- + +**2. Создайте службу в /etc/systemd/system/telemt.service** + +Открываем nano +```bash +nano /etc/systemd/system/telemt.service +``` + +Вставьте этот модуль Systemd +```bash +[Unit] +Description=Telemt +After=network.target + +[Service] +Type=simple +WorkingDirectory=/bin +ExecStart=/bin/telemt /etc/telemt.toml +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +``` +Затем нажмите Ctrl+X -> Y -> Enter, чтобы сохранить + + +**3.** Для запуска введите команду `systemctl start telemt` + +**4.** Для получения информации о статусе введите `systemctl status telemt` + +**5.** Для автоматического запуска при запуске системы в введите `systemctl enable telemt` + +**6.** Для получите ссылки введите `journalctl -u telemt -n -g "links" --no-pager -o cat | tac` + +--- + +# Telemt через Docker Compose + +**1. Отредактируйте `config.toml` в корневом каталоге репозитория (как минимум: порт, пользовательские секреты, tls_domain)** +**2. Запустите контейнер:** +```bash +docker compose up -d --build +``` +**3. Проверьте логи:** +```bash +docker compose logs -f telemt +``` +**4. Остановите контейнер:** +```bash +docker compose down +``` +> [!NOTE] +> - В `docker-compose.yml` файл `./config.toml` монтируется в `/app/config.toml` (доступно только для чтения) +> - По умолчанию публикуются порты 443:443, а контейнер запускается со сброшенными привилегиями (добавлена только `NET_BIND_SERVICE`) +> - Если вам действительно нужна сеть хоста (обычно это требуется только для некоторых конфигураций IPv6), раскомментируйте `network_mode: host` + +**Запуск в Docker Compose** +```bash +docker build -t telemt:local . +docker run --name telemt --restart unless-stopped \ + -p 443:443 \ + -e RUST_LOG=info \ + -v "$PWD/config.toml:/app/config.toml:ro" \ + --read-only \ + --cap-drop ALL --cap-add NET_BIND_SERVICE \ + --ulimit nofile=65536:65536 \ + telemt:local +``` From 291c22583f9b5b495d02febf232db9785dd8ac95 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Sat, 28 Feb 2026 17:39:12 +0300 Subject: [PATCH 91/98] Update QUICK_START_GUIDE.ru.md --- docs/QUICK_START_GUIDE.ru.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/QUICK_START_GUIDE.ru.md b/docs/QUICK_START_GUIDE.ru.md index d057548..fe4bd91 100644 --- a/docs/QUICK_START_GUIDE.ru.md +++ b/docs/QUICK_START_GUIDE.ru.md @@ -74,6 +74,8 @@ tls_domain = "petrovich.ru" # format: "username" = "32_hex_chars_secret" hello = "00000000000000000000000000000000" ``` +Затем нажмите Ctrl+X -> Y -> Enter, чтобы сохранить + > [!WARNING] > Замените значение параметра hello на значение, которое вы получили в пункте 0. > Так же замените значение параметра tls_domain на другой сайт. From 036f0e156931b1fdb146f084fee3ce36dcba7841 Mon Sep 17 00:00:00 2001 From: Dimasssss Date: Sat, 28 Feb 2026 17:46:11 +0300 Subject: [PATCH 92/98] Add files via upload --- docs/QUICK_START_GUIDE.en.md | 152 +++++++++++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) create mode 100644 docs/QUICK_START_GUIDE.en.md diff --git a/docs/QUICK_START_GUIDE.en.md b/docs/QUICK_START_GUIDE.en.md new file mode 100644 index 0000000..fd4e735 --- /dev/null +++ b/docs/QUICK_START_GUIDE.en.md @@ -0,0 +1,152 @@ +# Telemt via Systemd + +## Installation + +This software is designed for Debian-based OS: in addition to Debian, these are Ubuntu, Mint, Kali, MX and many other Linux + +**1. Download** +```bash +wget -qO- "https://github.com/telemt/telemt/releases/latest/download/telemt-$(uname -m)-linux-$(ldd --version 2>&1 | grep -iq musl && echo musl || echo gnu).tar.gz" | tar -xz +``` +**2. Move to the Bin folder** +```bash +mv telemt /bin +``` +**3. Make the file executable** +```bash +chmod +x /bin/telemt +``` + +## How to use? + +**This guide "assumes" that you:** +- logged in as root or executed `su -` / `sudo su` +- Already have the "telemt" executable file in the /bin folder. Read the **[Installation](#Installation)** section. + +--- + +**0. Check port and generate secrets** + +The port you have selected for use should be MISSING from the list, when: +```bash +netstat -lnp +``` + +Generate 16 bytes/32 characters HEX with OpenSSL or another way: +```bash +openssl rand -hex 16 +``` +OR +```bash +xxd -l 16 -p /dev/urandom +``` +OR +```bash +python3 -c 'import os; print(os.urandom(16).hex())' +``` +Save the obtained result somewhere. You will need it later! + +--- + +**1. Place your config to /etc/telemt.toml** + +Open nano +```bash +nano /etc/telemt.toml +``` +paste your config + +```toml +# === General Settings === +[general] +# ad_tag = "00000000000000000000000000000000" + +[general.modes] +classic = false +secure = false +tls = true + +# === Anti-Censorship & Masking === +[censorship] +tls_domain = "petrovich.ru" + +[access.users] +# format: "username" = "32_hex_chars_secret" +hello = "00000000000000000000000000000000" +``` +then Ctrl+X -> Y -> Enter to save + +> [!WARNING] +> Replace the value of the hello parameter with the value you obtained in step 0. +> Replace the value of the tls_domain parameter with another website. + +--- + +**2. Create service on /etc/systemd/system/telemt.service** + +Open nano +```bash +nano /etc/systemd/system/telemt.service +``` + +paste this Systemd Module +```bash +[Unit] +Description=Telemt +After=network.target + +[Service] +Type=simple +WorkingDirectory=/bin +ExecStart=/bin/telemt /etc/telemt.toml +Restart=on-failure +LimitNOFILE=65536 + +[Install] +WantedBy=multi-user.target +``` +then Ctrl+X -> Y -> Enter to save + + +**3.** To start it, enter the command `systemctl start telemt` + +**4.** To get status information, enter `systemctl status telemt` + +**5.** For automatic startup at system boot, enter `systemctl enable telemt` + +**6.** To get the links, enter `journalctl -u telemt -n -g "links" --no-pager -o cat | tac` + +--- + +# Telemt via Docker Compose + +**1. Edit `config.toml` in repo root (at least: port, users secrets, tls_domain)** +**2. Start container:** +```bash +docker compose up -d --build +``` +**3. Check logs:** +```bash +docker compose logs -f telemt +``` +**4. Stop:** +```bash +docker compose down +``` +> [!NOTE] +> - `docker-compose.yml` maps `./config.toml` to `/app/config.toml` (read-only) +> - By default it publishes `443:443` and runs with dropped capabilities (only `NET_BIND_SERVICE` is added) +> - If you really need host networking (usually only for some IPv6 setups) uncomment `network_mode: host` + +**Run without Compose** +```bash +docker build -t telemt:local . +docker run --name telemt --restart unless-stopped \ + -p 443:443 \ + -e RUST_LOG=info \ + -v "$PWD/config.toml:/app/config.toml:ro" \ + --read-only \ + --cap-drop ALL --cap-add NET_BIND_SERVICE \ + --ulimit nofile=65536:65536 \ + telemt:local +``` From 685bfafe74f92cdd95a5fbb5770cf7e702f4be67 Mon Sep 17 00:00:00 2001 From: Pavel Frolov Date: Sat, 28 Feb 2026 19:02:00 +0300 Subject: [PATCH 93/98] Update install.sh MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Попытался привести к единообразию текст. --- install.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/install.sh b/install.sh index 6649ef1..09b160b 100644 --- a/install.sh +++ b/install.sh @@ -59,11 +59,12 @@ else echo "ВАЖНАЯ ИНФОРМАЦИЯ:" echo "===================" echo "1. Вам НЕОБХОДИМО отредактировать файл /etc/telemt.toml и заменить '\''petrovich.ru'\'' на другой домен" - echo " с команды: nano /etc/telemt.toml" - echo " После редактирования файла, перезапустите службу командой:" + echo " с помощью команды:" + echo " nano /etc/telemt.toml" + echo " После редактирования файла перезапустите службу командой:" echo " sudo systemctl restart telemt" echo - echo "2. Для проверки статуса службы выполните:" + echo "2. Для проверки статуса службы выполните команду:" echo " systemctl status telemt" echo echo "3. Для получения ссылок на подключение выполните команду:" From cf7e2ebf4b59f1e44084bb7cc23960109a8ed655 Mon Sep 17 00:00:00 2001 From: An0nX <80145620+An0nX@users.noreply.github.com> Date: Sat, 28 Feb 2026 21:36:56 +0300 Subject: [PATCH 94/98] refactor: rewrite telemt config as self-documenting deployment reference - Reorganize all sections with clear visual block separators - Move inline comments to dedicated lines above each parameter - Add Quick Start guide in the file header explaining 7-step deployment - Add Modes of Operation explanation (Direct vs Middle-Proxy) - Group related parameters under labeled subsections with separators - Expand every comment to full plain-English explanation - Remove all inline comments to prevent TOML parser edge cases - Tune anti-censorship defaults for maximum DPI resistance: fast_mode_min_tls_record=1400, server_hello_delay=50-150ms, tls_new_session_tickets=2, tls_full_cert_ttl_secs=0, tls_emulation=true, desync_all_full=true, beobachten_minutes=30, me_reinit_every_secs=600 --- config.full.toml | 676 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 584 insertions(+), 92 deletions(-) diff --git a/config.full.toml b/config.full.toml index ac55167..8f8adff 100644 --- a/config.full.toml +++ b/config.full.toml @@ -1,176 +1,669 @@ -# Telemt full config with default values. -# Examples are kept in comments after '#'. +# ============================================================================== +# +# TELEMT — Advanced Rust-based Telegram MTProto Proxy +# Full Configuration Reference +# +# This file is both a working config and a complete documentation. +# Every parameter is explained. Read it top to bottom before deploying. +# +# Quick Start: +# 1. Set [server].port to your desired port (443 recommended) +# 2. Generate a secret: openssl rand -hex 16 +# 3. Put it in [access.users] under a name you choose +# 4. Set [censorship].tls_domain to a popular unblocked HTTPS site +# 5. Set your public IP in [general].middle_proxy_nat_ip +# and [general.links].public_host +# 6. Set announce IP in [[server.listeners]] +# 7. Run Telemt. It prints a tg:// link. Send it to your users. +# +# Modes of Operation: +# Direct Mode (use_middle_proxy = false) +# Connects straight to Telegram DCs via TCP. Simple, fast, low overhead. +# No ad_tag support. No CDN DC support (203, etc). +# +# Middle-Proxy Mode (use_middle_proxy = true) +# Connects to Telegram Middle-End servers via RPC protocol. +# Required for ad_tag monetization and CDN support. +# Requires proxy_secret_path and a valid public IP. +# +# ============================================================================== -# Top-level legacy field. -show_link = [] # example: "*" or ["alice", "bob"] -# default_dc = 2 # example: default DC for unmapped non-standard DCs + +# ============================================================================== +# LEGACY TOP-LEVEL FIELDS +# ============================================================================== + +# Deprecated. Use [general.links].show instead. +# Accepts "*" for all users, or an array like ["alice", "bob"]. +show_link = [] + +# Fallback Datacenter index (1-5) when a client requests an unknown DC ID. +# DC 2 is Amsterdam (Europe), closest for most CIS users. +default_dc = 2 + + +# ============================================================================== +# GENERAL SETTINGS +# ============================================================================== [general] + +# ------------------------------------------------------------------------------ +# Core Protocol +# ------------------------------------------------------------------------------ + +# Coalesce the MTProto handshake and first data payload into a single TCP packet. +# Significantly reduces connection latency. No reason to disable. fast_mode = true -use_middle_proxy = false -# ad_tag = "00000000000000000000000000000000" # example -# proxy_secret_path = "proxy-secret" # example custom path -# middle_proxy_nat_ip = "203.0.113.10" # example public NAT IP override + +# How the proxy connects to Telegram servers. +# false = Direct TCP to Telegram DCs (simple, low overhead) +# true = Middle-End RPC protocol (required for ad_tag and CDN DCs) +use_middle_proxy = true + +# 32-char hex Ad-Tag from @MTProxybot for sponsored channel injection. +# Only works when use_middle_proxy = true. +# Obtain yours: message @MTProxybot on Telegram, register your proxy. +# ad_tag = "00000000000000000000000000000000" + +# ------------------------------------------------------------------------------ +# Middle-End Authentication +# ------------------------------------------------------------------------------ + +# Path to the Telegram infrastructure AES key file. +# Auto-downloaded from https://core.telegram.org/getProxySecret on first run. +# This key authenticates your proxy with Middle-End servers. +proxy_secret_path = "proxy-secret" + +# ------------------------------------------------------------------------------ +# Public IP Configuration (Critical for Middle-Proxy Mode) +# ------------------------------------------------------------------------------ + +# Your server's PUBLIC IPv4 address. +# Middle-End servers need this for the cryptographic Key Derivation Function. +# If your server has a direct public IP, set it here. +# If behind NAT (AWS, Docker, etc.), this MUST be your external IP. +# If omitted, Telemt uses STUN to auto-detect (see middle_proxy_nat_probe). +# middle_proxy_nat_ip = "203.0.113.10" + +# Auto-detect public IP via STUN servers defined in [network]. +# Set to false if you hardcoded middle_proxy_nat_ip above. +# Set to true if you want automatic detection. middle_proxy_nat_probe = true -# middle_proxy_nat_stun = "stun.l.google.com:19302" # example -# middle_proxy_nat_stun_servers = [] # example: ["stun1.l.google.com:19302", "stun2.l.google.com:19302"] + +# ------------------------------------------------------------------------------ +# Middle-End Connection Pool +# ------------------------------------------------------------------------------ + +# Number of persistent multiplexed RPC connections to ME servers. +# All client traffic is routed through these "fat pipes". +# 8 handles thousands of concurrent users comfortably. middle_proxy_pool_size = 8 + +# Legacy field. Connections kept initialized but idle as warm standby. middle_proxy_warm_standby = 16 + +# ------------------------------------------------------------------------------ +# Middle-End Keepalive +# Telegram ME servers aggressively kill idle TCP connections. +# These settings send periodic RPC_PING frames to keep pipes alive. +# ------------------------------------------------------------------------------ + me_keepalive_enabled = true + +# Base interval between pings in seconds. me_keepalive_interval_secs = 25 + +# Random jitter added to interval to prevent all connections pinging simultaneously. me_keepalive_jitter_secs = 5 + +# Randomize ping payload bytes to prevent DPI from fingerprinting ping patterns. me_keepalive_payload_random = true + +# ------------------------------------------------------------------------------ +# Client-Side Limits +# ------------------------------------------------------------------------------ + +# Max buffered ciphertext per client (bytes) when upstream is slow. +# Acts as backpressure to prevent memory exhaustion. 256KB is safe. crypto_pending_buffer = 262144 + +# Maximum single MTProto frame size from client. 16MB is protocol standard. max_client_frame = 16777216 -desync_all_full = false + +# ------------------------------------------------------------------------------ +# Crypto Desynchronization Logging +# Desync errors usually mean DPI/GFW is tampering with connections. +# ------------------------------------------------------------------------------ + +# true = full forensics (trace ID, IP hash, hex dumps) for EVERY desync event +# false = deduplicated logging, one entry per time window (prevents log spam) +# Set true if you are actively debugging DPI interference. +desync_all_full = true + +# ------------------------------------------------------------------------------ +# Beobachten — Built-in Honeypot / Active Probe Tracker +# Tracks IPs that fail handshakes or behave like TLS scanners. +# Output file can be fed into fail2ban or iptables for auto-blocking. +# ------------------------------------------------------------------------------ + beobachten = true -beobachten_minutes = 10 + +# How long (minutes) to remember a suspicious IP before expiring it. +beobachten_minutes = 30 + +# How often (seconds) to flush tracker state to disk. beobachten_flush_secs = 15 + +# File path for the tracker output. beobachten_file = "cache/beobachten.txt" + +# ------------------------------------------------------------------------------ +# Hardswap — Zero-Downtime ME Pool Rotation +# When Telegram updates ME server IPs, Hardswap creates a completely new pool, +# waits until it is fully ready, migrates traffic, then kills the old pool. +# Users experience zero interruption. +# ------------------------------------------------------------------------------ + hardswap = true + +# ------------------------------------------------------------------------------ +# ME Pool Warmup Staggering +# When creating a new pool, connections are opened one by one with delays +# to avoid a burst of SYN packets that could trigger ISP flood protection. +# ------------------------------------------------------------------------------ + me_warmup_stagger_enabled = true + +# Delay between each connection creation (milliseconds). me_warmup_step_delay_ms = 500 + +# Random jitter added to the delay (milliseconds). me_warmup_step_jitter_ms = 300 + +# ------------------------------------------------------------------------------ +# ME Reconnect Backoff +# If an ME server drops the connection, Telemt retries with this strategy. +# ------------------------------------------------------------------------------ + +# Max simultaneous reconnect attempts per DC. me_reconnect_max_concurrent_per_dc = 8 + +# Exponential backoff base (milliseconds). me_reconnect_backoff_base_ms = 500 + +# Backoff ceiling (milliseconds). Will never wait longer than this. me_reconnect_backoff_cap_ms = 30000 + +# Number of instant retries before switching to exponential backoff. me_reconnect_fast_retry_count = 12 + +# ------------------------------------------------------------------------------ +# NAT Mismatch Behavior +# If STUN-detected IP differs from local interface IP (you are behind NAT). +# false = abort ME mode (safe default) +# true = force ME mode anyway (use if you know your NAT setup is correct) +# ------------------------------------------------------------------------------ + stun_iface_mismatch_ignore = false -unknown_dc_log_path = "unknown-dc.txt" # to disable: set to null -log_level = "normal" # debug | verbose | normal | silent + +# ------------------------------------------------------------------------------ +# Logging +# ------------------------------------------------------------------------------ + +# File to log unknown DC requests (DC IDs outside standard 1-5). +unknown_dc_log_path = "unknown-dc.txt" + +# Verbosity: "debug" | "verbose" | "normal" | "silent" +log_level = "normal" + +# Disable ANSI color codes in log output (useful for file logging). disable_colors = false -fast_mode_min_tls_record = 0 + +# ------------------------------------------------------------------------------ +# FakeTLS Record Sizing +# Buffer small MTProto packets into larger TLS records to mimic real HTTPS. +# Real HTTPS servers send records close to MTU size (~1400 bytes). +# A stream of tiny TLS records is a strong DPI signal. +# Set to 0 to disable. Set to 1400 for realistic HTTPS emulation. +# ------------------------------------------------------------------------------ + +fast_mode_min_tls_record = 1400 + +# ------------------------------------------------------------------------------ +# Periodic Updates +# ------------------------------------------------------------------------------ + +# How often (seconds) to re-fetch ME server lists and proxy secrets +# from core.telegram.org. Keeps your proxy in sync with Telegram infrastructure. update_every = 300 -me_reinit_every_secs = 900 + +# How often (seconds) to force a Hardswap even if the ME map is unchanged. +# Shorter intervals mean shorter-lived TCP flows, harder for DPI to profile. +me_reinit_every_secs = 600 + +# ------------------------------------------------------------------------------ +# Hardswap Warmup Tuning +# Fine-grained control over how the new pool is warmed up before traffic switch. +# ------------------------------------------------------------------------------ + me_hardswap_warmup_delay_min_ms = 1000 me_hardswap_warmup_delay_max_ms = 2000 me_hardswap_warmup_extra_passes = 3 me_hardswap_warmup_pass_backoff_base_ms = 500 + +# ------------------------------------------------------------------------------ +# Config Update Debouncing +# Telegram sometimes pushes transient/broken configs. Debouncing requires +# N consecutive identical fetches before applying a change. +# ------------------------------------------------------------------------------ + +# ME server list must be identical for this many fetches before applying. me_config_stable_snapshots = 2 + +# Minimum seconds between config applications. me_config_apply_cooldown_secs = 300 + +# Proxy secret must be identical for this many fetches before applying. proxy_secret_stable_snapshots = 2 + +# ------------------------------------------------------------------------------ +# Proxy Secret Rotation +# ------------------------------------------------------------------------------ + +# Apply newly downloaded secrets at runtime without restart. proxy_secret_rotate_runtime = true + +# Maximum acceptable secret length (bytes). Rejects abnormally large secrets. proxy_secret_len_max = 256 + +# ------------------------------------------------------------------------------ +# Hardswap Drain Settings +# Controls graceful shutdown of old ME connections during pool rotation. +# ------------------------------------------------------------------------------ + +# Seconds to keep old connections alive for in-flight data before force-closing. me_pool_drain_ttl_secs = 90 + +# Minimum ratio of healthy connections in new pool before draining old pool. +# 0.8 = at least 80% of new pool must be ready. me_pool_min_fresh_ratio = 0.8 + +# Maximum seconds to wait for drain to complete before force-killing. me_reinit_drain_timeout_secs = 120 -# Legacy compatibility fields used when update_every is omitted. -proxy_secret_auto_reload_secs = 3600 -proxy_config_auto_reload_secs = 3600 + +# ------------------------------------------------------------------------------ +# NTP Clock Check +# MTProto uses timestamps. Clock drift > 30 seconds breaks handshakes. +# Telemt checks on startup and warns if out of sync. +# ------------------------------------------------------------------------------ + ntp_check = true -ntp_servers = ["pool.ntp.org"] # example: ["pool.ntp.org", "time.cloudflare.com"] +ntp_servers = ["pool.ntp.org"] + +# ------------------------------------------------------------------------------ +# Auto-Degradation +# If ME servers become completely unreachable (ISP blocking), +# automatically fall back to Direct Mode so users stay connected. +# ------------------------------------------------------------------------------ + auto_degradation_enabled = true + +# Number of DC groups that must be unreachable before triggering fallback. degradation_min_unavailable_dc_groups = 2 + +# ============================================================================== +# ALLOWED CLIENT PROTOCOLS +# Only enable what you need. In censored regions, TLS-only is safest. +# ============================================================================== + [general.modes] + +# Classic MTProto. Unobfuscated length prefixes. Trivially detected by DPI. +# No reason to enable unless you have ancient clients. classic = false + +# Obfuscated MTProto with randomized padding. Better than classic, but +# still detectable by statistical analysis of packet sizes. secure = false + +# FakeTLS (ee-secrets). Wraps MTProto in TLS 1.3 framing. +# To DPI, it looks like a normal HTTPS connection. +# This should be the ONLY enabled mode in censored environments. tls = true + +# ============================================================================== +# STARTUP LINK GENERATION +# Controls what tg:// invite links are printed to console on startup. +# ============================================================================== + [general.links] -show ="*" # example: "*" or ["alice", "bob"] -# public_host = "proxy.example.com" # example explicit host/IP for tg:// links -# public_port = 443 # example explicit port for tg:// links + +# Which users to generate links for. +# "*" = all users, or an array like ["alice", "bob"]. +show = "*" + +# IP or domain to embed in the tg:// link. +# If omitted, Telemt uses STUN to auto-detect. +# Set this to your server's public IP or domain for reliable links. +# public_host = "proxy.example.com" + +# Port to embed in the tg:// link. +# If omitted, uses [server].port. +# public_port = 443 + + +# ============================================================================== +# NETWORK & IP RESOLUTION +# ============================================================================== [network] + +# Enable IPv4 for outbound connections to Telegram. ipv4 = true -ipv6 = false # set true to enable IPv6 -prefer = 4 # 4 or 6 + +# Enable IPv6 for outbound connections to Telegram. +ipv6 = false + +# Prefer IPv4 (4) or IPv6 (6) when both are available. +prefer = 4 + +# Experimental: use both IPv4 and IPv6 ME servers simultaneously. +# May improve reliability but doubles connection count. multipath = false + +# STUN servers for external IP discovery. +# Used for Middle-Proxy KDF (if nat_probe=true) and link generation. stun_servers = [ - "stun.l.google.com:5349", - "stun1.l.google.com:3478", - "stun.gmx.net:3478", - "stun.l.google.com:19302", - "stun.1und1.de:3478", - "stun1.l.google.com:19302", - "stun2.l.google.com:19302", - "stun3.l.google.com:19302", - "stun4.l.google.com:19302", - "stun.services.mozilla.com:3478", - "stun.stunprotocol.org:3478", - "stun.nextcloud.com:3478", - "stun.voip.eutelia.it:3478", + "stun.l.google.com:5349", + "stun1.l.google.com:3478", + "stun.gmx.net:3478", + "stun.l.google.com:19302" ] + +# If UDP STUN is blocked, attempt TCP-based STUN as fallback. stun_tcp_fallback = true -http_ip_detect_urls = ["https://ifconfig.me/ip", "https://api.ipify.org"] + +# If all STUN fails, use HTTP APIs to discover public IP. +http_ip_detect_urls = [ + "https://ifconfig.me/ip", + "https://api.ipify.org" +] + +# Cache discovered public IP to this file to survive restarts. cache_public_ip_path = "cache/public_ip.txt" + +# ============================================================================== +# SERVER BINDING & METRICS +# ============================================================================== + [server] + +# TCP port to listen on. +# 443 is recommended (looks like normal HTTPS traffic). port = 443 + +# IPv4 bind address. "0.0.0.0" = all interfaces. listen_addr_ipv4 = "0.0.0.0" + +# IPv6 bind address. "::" = all interfaces. listen_addr_ipv6 = "::" -# listen_unix_sock = "/var/run/telemt.sock" # example -# listen_unix_sock_perm = "0660" # example unix socket mode -# listen_tcp = true # example explicit override (auto-detected when omitted) + +# Unix socket listener (for reverse proxy setups with Nginx/HAProxy). +# listen_unix_sock = "/var/run/telemt.sock" +# listen_unix_sock_perm = "0660" + +# Enable PROXY protocol header parsing. +# Set true ONLY if Telemt is behind HAProxy/Nginx that injects PROXY headers. +# If enabled without a proxy in front, clients will fail to connect. proxy_protocol = false -# metrics_port = 9090 # example -metrics_whitelist = ["127.0.0.1/32", "::1/128"] -# Example explicit listeners (default: omitted, auto-generated from listen_addr_*): + +# Prometheus metrics HTTP endpoint port. +# Uncomment to enable. Access at http://your-server:9090/metrics +# metrics_port = 9090 + +# IP ranges allowed to access the metrics endpoint. +metrics_whitelist = [ + "127.0.0.1/32", + "::1/128" +] + +# ------------------------------------------------------------------------------ +# Listener Overrides +# Define explicit listeners with specific bind IPs and announce IPs. +# The announce IP is what gets embedded in tg:// links and sent to ME servers. +# You MUST set announce to your server's public IP for ME mode to work. +# ------------------------------------------------------------------------------ + # [[server.listeners]] # ip = "0.0.0.0" -# announce = "proxy-v4.example.com" -# # announce_ip = "203.0.113.10" # deprecated alias -# proxy_protocol = false -# reuse_allow = false -# -# [[server.listeners]] -# ip = "::" -# announce = "proxy-v6.example.com" -# proxy_protocol = false +# announce = "203.0.113.10" # reuse_allow = false + +# ============================================================================== +# TIMEOUTS (seconds unless noted) +# ============================================================================== + [timeouts] + +# Maximum time for client to complete FakeTLS + MTProto handshake. client_handshake = 15 + +# Maximum time to establish TCP connection to upstream Telegram DC. tg_connect = 10 + +# TCP keepalive interval for client connections. client_keepalive = 60 + +# Maximum client inactivity before dropping the connection. client_ack = 300 + +# Instant retry count for a single ME endpoint before giving up on it. me_one_retry = 3 + +# Timeout (milliseconds) for a single ME endpoint connection attempt. me_one_timeout_ms = 1500 + +# ============================================================================== +# ANTI-CENSORSHIP / FAKETLS / MASKING +# This is where Telemt becomes invisible to Deep Packet Inspection. +# ============================================================================== + [censorship] -tls_domain = "petrovich.ru" -# tls_domains = ["example.com", "cdn.example.net"] # Additional domains for EE links + +# ------------------------------------------------------------------------------ +# TLS Domain Fronting +# The SNI (Server Name Indication) your proxy presents to connecting clients. +# Must be a popular, unblocked HTTPS website in your target country. +# DPI sees traffic to this domain. Choose carefully. +# Good choices: major CDNs, banks, government sites, search engines. +# Bad choices: obscure sites, already-blocked domains. +# ------------------------------------------------------------------------------ + +tls_domain = "www.google.com" + +# ------------------------------------------------------------------------------ +# Active Probe Masking +# When someone connects but fails the MTProto handshake (wrong secret), +# they might be an ISP active prober testing if this is a proxy. +# +# mask = false: drop the connection (prober knows something is here) +# mask = true: transparently proxy them to mask_host (prober sees a real website) +# +# With mask enabled, your server is indistinguishable from a real web server +# to anyone who doesn't have the correct secret. +# ------------------------------------------------------------------------------ + mask = true -# mask_host = "www.google.com" # example, defaults to tls_domain when both mask_host/mask_unix_sock are unset -# mask_unix_sock = "/var/run/nginx.sock" # example, mutually exclusive with mask_host + +# The real web server to forward failed handshakes to. +# If omitted, defaults to tls_domain. +# mask_host = "www.google.com" + +# Port on the mask host to connect to. mask_port = 443 -# mask_proxy_protocol = 0 # Send PROXY protocol header to mask_host: 0 = off, 1 = v1 (text), 2 = v2 (binary) -fake_cert_len = 2048 # if tls_emulation=false and default value is used, loader may randomize this value at runtime + +# Inject PROXY protocol header when forwarding to mask host. +# 0 = disabled, 1 = v1, 2 = v2. Leave disabled unless mask_host expects it. +# mask_proxy_protocol = 0 + +# ------------------------------------------------------------------------------ +# TLS Certificate Emulation +# ------------------------------------------------------------------------------ + +# Size (bytes) of the locally generated fake TLS certificate. +# Only used when tls_emulation is disabled. +fake_cert_len = 2048 + +# KILLER FEATURE: Real-Time TLS Emulation. +# Telemt connects to tls_domain, fetches its actual TLS 1.3 certificate chain, +# and exactly replicates the byte sizes of ServerHello and Certificate records. +# Defeats DPI that uses TLS record length heuristics to detect proxies. +# Strongly recommended in censored environments. tls_emulation = true + +# Directory to cache fetched TLS certificates. tls_front_dir = "tlsfront" -server_hello_delay_min_ms = 0 -server_hello_delay_max_ms = 0 -tls_new_session_tickets = 0 -tls_full_cert_ttl_secs = 90 + +# ------------------------------------------------------------------------------ +# ServerHello Timing +# Real web servers take 30-150ms to respond to ClientHello due to network +# latency and crypto processing. A proxy responding in <1ms is suspicious. +# These settings add realistic delay to mimic genuine server behavior. +# ------------------------------------------------------------------------------ + +# Minimum delay before sending ServerHello (milliseconds). +server_hello_delay_min_ms = 50 + +# Maximum delay before sending ServerHello (milliseconds). +server_hello_delay_max_ms = 150 + +# ------------------------------------------------------------------------------ +# TLS Session Tickets +# Real TLS 1.3 servers send 1-2 NewSessionTicket messages after handshake. +# A server that sends zero tickets is anomalous and may trigger DPI flags. +# Set this to match your tls_domain's behavior (usually 2). +# ------------------------------------------------------------------------------ + +tls_new_session_tickets = 2 + +# ------------------------------------------------------------------------------ +# Full Certificate Frequency +# When tls_emulation is enabled, this controls how often (per client IP) +# to send the complete emulated certificate chain. +# +# > 0: Subsequent connections within TTL seconds get a smaller cached version. +# Saves bandwidth but creates a detectable size difference between +# first and repeat connections. +# +# = 0: Every connection gets the full certificate. More bandwidth but +# perfectly consistent behavior, no anomalies for DPI to detect. +# ------------------------------------------------------------------------------ + +tls_full_cert_ttl_secs = 0 + +# ------------------------------------------------------------------------------ +# ALPN Enforcement +# Ensure ServerHello responds with the exact ALPN protocol the client requested. +# Mismatched ALPN (e.g., client asks h2, server says http/1.1) is a DPI red flag. +# ------------------------------------------------------------------------------ + alpn_enforce = true + +# ============================================================================== +# ACCESS CONTROL & USERS +# ============================================================================== + [access] + +# ------------------------------------------------------------------------------ +# Replay Attack Protection +# DPI can record a legitimate user's handshake and replay it later to probe +# whether the server is a proxy. Telemt remembers recent handshake nonces +# and rejects duplicates. +# ------------------------------------------------------------------------------ + +# Number of nonce slots in the replay detection buffer. replay_check_len = 65536 + +# How long (seconds) to remember nonces before expiring them. replay_window_secs = 1800 + +# Allow clients with incorrect system clocks to connect. +# false = reject clients with significant time skew (more secure) +# true = accept anyone regardless of clock (more permissive) ignore_time_skew = false +# ------------------------------------------------------------------------------ +# User Secrets +# Each user needs a unique 32-character hex string as their secret. +# Generate with: openssl rand -hex 16 +# +# This secret is embedded in the tg:// link. Anyone with it can connect. +# Format: username = "hex_secret" +# ------------------------------------------------------------------------------ + [access.users] -# format: "username" = "32_hex_chars_secret" -hello = "00000000000000000000000000000000" -# alice = "11111111111111111111111111111111" # example +# alice = "0123456789abcdef0123456789abcdef" +# bob = "fedcba9876543210fedcba9876543210" + +# ------------------------------------------------------------------------------ +# Per-User Connection Limits +# Limits concurrent TCP connections per user to prevent secret sharing. +# Uncomment and set for each user as needed. +# ------------------------------------------------------------------------------ [access.user_max_tcp_conns] -# alice = 100 # example +# alice = 100 +# bob = 50 + +# ------------------------------------------------------------------------------ +# Per-User Expiration Dates +# Automatically revoke access after the specified date (ISO 8601 format). +# ------------------------------------------------------------------------------ [access.user_expirations] -# alice = "2078-01-01T00:00:00Z" # example +# alice = "2025-12-31T23:59:59Z" +# bob = "2026-06-15T00:00:00Z" + +# ------------------------------------------------------------------------------ +# Per-User Data Quotas +# Maximum total bytes transferred per user. Connection refused after limit. +# ------------------------------------------------------------------------------ [access.user_data_quota] -# hello = 10737418240 # example bytes -# alice = 10737418240 # example bytes +# alice = 107374182400 +# bob = 53687091200 + +# ------------------------------------------------------------------------------ +# Per-User Unique IP Limits +# Maximum number of different IP addresses that can use this secret +# at the same time. Highly effective against secret leaking/sharing. +# Set to 1 for single-device, 2-3 for phone+desktop, etc. +# ------------------------------------------------------------------------------ [access.user_max_unique_ips] -# hello = 10 # example -# alice = 100 # example +# alice = 3 +# bob = 2 + + +# ============================================================================== +# UPSTREAM ROUTING +# Controls how Telemt connects to Telegram servers (or ME servers). +# If omitted entirely, uses the OS default route. +# ============================================================================== + +# ------------------------------------------------------------------------------ +# Direct upstream: use the server's own network interface. +# You can optionally bind to a specific interface or local IP. +# ------------------------------------------------------------------------------ -# Default behavior if [[upstreams]] is omitted: loader injects one direct upstream. -# Example explicit upstreams: # [[upstreams]] # type = "direct" # interface = "eth0" @@ -178,28 +671,27 @@ hello = "00000000000000000000000000000000" # weight = 1 # enabled = true # scopes = "*" -# -# [[upstreams]] -# type = "socks4" -# address = "198.51.100.20:1080" -# interface = "eth0" -# user_id = "telemt" -# weight = 1 -# enabled = true -# scopes = "*" -# + +# ------------------------------------------------------------------------------ +# SOCKS5 upstream: route Telegram traffic through a SOCKS5 proxy. +# Useful if your server's IP is blocked from reaching Telegram DCs. +# ------------------------------------------------------------------------------ + # [[upstreams]] # type = "socks5" # address = "198.51.100.30:1080" -# interface = "eth0" # username = "proxy-user" # password = "proxy-pass" # weight = 1 # enabled = true -# scopes = "*" -# === DC Address Overrides === + +# ============================================================================== +# DATACENTER OVERRIDES +# Force specific DC IDs to route to specific IP:Port combinations. +# DC 203 (CDN) is auto-injected by Telemt if not specified here. +# ============================================================================== + # [dc_overrides] -# "201" = "149.154.175.50:443" # example -# "202" = ["149.154.167.51:443", "149.154.175.100:443"] # example -# "203" = "91.105.192.100:443" # loader auto-adds this one when omitted +# "201" = "149.154.175.50:443" +# "202" = ["149.154.167.51:443", "149.154.175.100:443"] From e27ef04c3d0b9ce0f308cd10c5a8ee4583621fc6 Mon Sep 17 00:00:00 2001 From: ivulit Date: Sat, 28 Feb 2026 19:09:32 +0300 Subject: [PATCH 95/98] fix: pass correct dst address to outgoing PROXY protocol header Previously handle_bad_client used stream.local_addr() (the ephemeral socket to the mask backend) as the dst in the outgoing PROXY protocol header. This is wrong: the dst should be the address telemt is listening on, or the dst from the incoming PROXY protocol header if one was present. - handle_bad_client now receives local_addr from the caller - handle_client_stream resolves local_addr from PROXY protocol info.dst_addr or falls back to a synthetic address based on config.server.port - RunningClientHandler.do_handshake resolves local_addr from stream.local_addr() overridden by PROXY protocol info.dst_addr when present, and passes it down to handle_tls_client / handle_direct_client - masking.rs uses the caller-supplied local_addr directly, eliminating the stream.local_addr() call --- src/proxy/client.rs | 36 +++++++++++++++++++++++++----------- src/proxy/masking.rs | 28 +++++++++++----------------- 2 files changed, 36 insertions(+), 28 deletions(-) diff --git a/src/proxy/client.rs b/src/proxy/client.rs index d8bbc48..4bc4b65 100644 --- a/src/proxy/client.rs +++ b/src/proxy/client.rs @@ -91,6 +91,11 @@ where stats.increment_connects_all(); let mut real_peer = normalize_ip(peer); + // For non-TCP streams, use a synthetic local address; may be overridden by PROXY protocol dst + let mut local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port) + .parse() + .unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap()); + if proxy_protocol_enabled { match parse_proxy_protocol(&mut stream, peer).await { Ok(info) => { @@ -101,6 +106,9 @@ where "PROXY protocol header parsed" ); real_peer = normalize_ip(info.src_addr); + if let Some(dst) = info.dst_addr { + local_addr = dst; + } } Err(e) => { stats.increment_connects_bad(); @@ -119,11 +127,6 @@ where let beobachten_for_timeout = beobachten.clone(); let peer_for_timeout = real_peer.ip(); - // For non-TCP streams, use a synthetic local address - let local_addr: SocketAddr = format!("0.0.0.0:{}", config.server.port) - .parse() - .unwrap_or_else(|_| "0.0.0.0:443".parse().unwrap()); - // Phase 1: handshake (with timeout) let outcome = match timeout(handshake_timeout, async { let mut first_bytes = [0u8; 5]; @@ -144,6 +147,7 @@ where writer, &first_bytes, real_peer, + local_addr, &config, &beobachten, ) @@ -169,6 +173,7 @@ where writer, &handshake, real_peer, + local_addr, &config, &beobachten, ) @@ -213,6 +218,7 @@ where writer, &first_bytes, real_peer, + local_addr, &config, &beobachten, ) @@ -238,6 +244,7 @@ where writer, &handshake, real_peer, + local_addr, &config, &beobachten, ) @@ -405,6 +412,8 @@ impl RunningClientHandler { } async fn do_handshake(mut self) -> Result { + let mut local_addr = self.stream.local_addr().map_err(ProxyError::Io)?; + if self.proxy_protocol_enabled { match parse_proxy_protocol(&mut self.stream, self.peer).await { Ok(info) => { @@ -415,6 +424,9 @@ impl RunningClientHandler { "PROXY protocol header parsed" ); self.peer = normalize_ip(info.src_addr); + if let Some(dst) = info.dst_addr { + local_addr = dst; + } } Err(e) => { self.stats.increment_connects_bad(); @@ -440,13 +452,13 @@ impl RunningClientHandler { debug!(peer = %peer, is_tls = is_tls, "Handshake type detected"); if is_tls { - self.handle_tls_client(first_bytes).await + self.handle_tls_client(first_bytes, local_addr).await } else { - self.handle_direct_client(first_bytes).await + self.handle_direct_client(first_bytes, local_addr).await } } - async fn handle_tls_client(mut self, first_bytes: [u8; 5]) -> Result { + async fn handle_tls_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result { let peer = self.peer; let _ip_tracker = self.ip_tracker.clone(); @@ -463,6 +475,7 @@ impl RunningClientHandler { writer, &first_bytes, peer, + local_addr, &self.config, &self.beobachten, ) @@ -479,7 +492,6 @@ impl RunningClientHandler { let stats = self.stats.clone(); let buffer_pool = self.buffer_pool.clone(); - let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?; let (read_half, write_half) = self.stream.into_split(); let (mut tls_reader, tls_writer, _tls_user) = match handle_tls_handshake( @@ -502,6 +514,7 @@ impl RunningClientHandler { writer, &handshake, peer, + local_addr, &config, &self.beobachten, ) @@ -558,7 +571,7 @@ impl RunningClientHandler { ))) } - async fn handle_direct_client(mut self, first_bytes: [u8; 5]) -> Result { + async fn handle_direct_client(mut self, first_bytes: [u8; 5], local_addr: SocketAddr) -> Result { let peer = self.peer; let _ip_tracker = self.ip_tracker.clone(); @@ -571,6 +584,7 @@ impl RunningClientHandler { writer, &first_bytes, peer, + local_addr, &self.config, &self.beobachten, ) @@ -587,7 +601,6 @@ impl RunningClientHandler { let stats = self.stats.clone(); let buffer_pool = self.buffer_pool.clone(); - let local_addr = self.stream.local_addr().map_err(ProxyError::Io)?; let (read_half, write_half) = self.stream.into_split(); let (crypto_reader, crypto_writer, success) = match handle_mtproto_handshake( @@ -609,6 +622,7 @@ impl RunningClientHandler { writer, &handshake, peer, + local_addr, &config, &self.beobachten, ) diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index 8f19b40..b1e69d4 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -55,6 +55,7 @@ pub async fn handle_bad_client( writer: W, initial_data: &[u8], peer: SocketAddr, + local_addr: SocketAddr, config: &ProxyConfig, beobachten: &BeobachtenStore, ) @@ -126,23 +127,16 @@ where let proxy_header: Option> = match config.censorship.mask_proxy_protocol { 0 => None, version => { - let header = if let Ok(local_addr) = stream.local_addr() { - match version { - 2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(), - _ => match (peer, local_addr) { - (SocketAddr::V4(src), SocketAddr::V4(dst)) => - ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(), - (SocketAddr::V6(src), SocketAddr::V6(dst)) => - ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(), - _ => - ProxyProtocolV1Builder::new().build(), - }, - } - } else { - match version { - 2 => ProxyProtocolV2Builder::new().build(), - _ => ProxyProtocolV1Builder::new().build(), - } + let header = match version { + 2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(), + _ => match (peer, local_addr) { + (SocketAddr::V4(src), SocketAddr::V4(dst)) => + ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(), + (SocketAddr::V6(src), SocketAddr::V6(dst)) => + ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(), + _ => + ProxyProtocolV1Builder::new().build(), + }, }; Some(header) } From ed93b0a030623b2fda179fc1a02c69c9800dae78 Mon Sep 17 00:00:00 2001 From: ivulit Date: Sun, 1 Mar 2026 00:14:55 +0300 Subject: [PATCH 96/98] fix: send PROXY protocol header to mask unix socket When mask_unix_sock is configured, mask_proxy_protocol was silently ignored and no PROXY protocol header was sent to the backend. Apply the same header-building logic as the TCP path in both masking relay and TLS fetcher (raw and rustls). --- src/proxy/masking.rs | 24 +++++++++++++++++++++++- src/tls_front/fetcher.rs | 4 ++-- 2 files changed, 25 insertions(+), 3 deletions(-) diff --git a/src/proxy/masking.rs b/src/proxy/masking.rs index b1e69d4..318071b 100644 --- a/src/proxy/masking.rs +++ b/src/proxy/masking.rs @@ -88,7 +88,29 @@ where let connect_result = timeout(MASK_TIMEOUT, UnixStream::connect(sock_path)).await; match connect_result { Ok(Ok(stream)) => { - let (mask_read, mask_write) = stream.into_split(); + let (mask_read, mut mask_write) = stream.into_split(); + let proxy_header: Option> = match config.censorship.mask_proxy_protocol { + 0 => None, + version => { + let header = match version { + 2 => ProxyProtocolV2Builder::new().with_addrs(peer, local_addr).build(), + _ => match (peer, local_addr) { + (SocketAddr::V4(src), SocketAddr::V4(dst)) => + ProxyProtocolV1Builder::new().tcp4(src.into(), dst.into()).build(), + (SocketAddr::V6(src), SocketAddr::V6(dst)) => + ProxyProtocolV1Builder::new().tcp6(src.into(), dst.into()).build(), + _ => + ProxyProtocolV1Builder::new().build(), + }, + }; + Some(header) + } + }; + if let Some(header) = proxy_header { + if mask_write.write_all(&header).await.is_err() { + return; + } + } if timeout(MASK_RELAY_TIMEOUT, relay_to_mask(reader, writer, mask_read, mask_write, initial_data)).await.is_err() { debug!("Mask relay timed out (unix socket)"); } diff --git a/src/tls_front/fetcher.rs b/src/tls_front/fetcher.rs index 1731cdc..4d9067c 100644 --- a/src/tls_front/fetcher.rs +++ b/src/tls_front/fetcher.rs @@ -499,7 +499,7 @@ async fn fetch_via_raw_tls( sock = %sock_path, "Raw TLS fetch using mask unix socket" ); - return fetch_via_raw_tls_stream(stream, sni, connect_timeout, 0).await; + return fetch_via_raw_tls_stream(stream, sni, connect_timeout, proxy_protocol).await; } Ok(Err(e)) => { warn!( @@ -631,7 +631,7 @@ async fn fetch_via_rustls( sock = %sock_path, "Rustls fetch using mask unix socket" ); - return fetch_via_rustls_stream(stream, host, sni, 0).await; + return fetch_via_rustls_stream(stream, host, sni, proxy_protocol).await; } Ok(Err(e)) => { warn!( From 44cdfd4b23e25fa978b473000b74bd580c12458b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sun, 1 Mar 2026 03:36:00 +0300 Subject: [PATCH 97/98] ME Pool improvements Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/proxy/middle_relay.rs | 118 ++++++++++++++++++++++++++++- src/stream/crypto_stream.rs | 95 +++++++++++++++++++++-- src/transport/middle_proxy/send.rs | 73 +++++++++++------- 3 files changed, 249 insertions(+), 37 deletions(-) diff --git a/src/proxy/middle_relay.rs b/src/proxy/middle_relay.rs index f089442..a4942ba 100644 --- a/src/proxy/middle_relay.rs +++ b/src/proxy/middle_relay.rs @@ -26,6 +26,9 @@ enum C2MeCommand { const DESYNC_DEDUP_WINDOW: Duration = Duration::from_secs(60); const DESYNC_ERROR_CLASS: &str = "frame_too_large_crypto_desync"; +const C2ME_CHANNEL_CAPACITY: usize = 1024; +const C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS: usize = 64; +const C2ME_SENDER_FAIRNESS_BUDGET: usize = 32; static DESYNC_DEDUP: OnceLock>> = OnceLock::new(); struct RelayForensicsState { @@ -166,6 +169,27 @@ fn report_desync_frame_too_large( )) } +fn should_yield_c2me_sender(sent_since_yield: usize, has_backlog: bool) -> bool { + has_backlog && sent_since_yield >= C2ME_SENDER_FAIRNESS_BUDGET +} + +async fn enqueue_c2me_command( + tx: &mpsc::Sender, + cmd: C2MeCommand, +) -> std::result::Result<(), mpsc::error::SendError> { + match tx.try_send(cmd) { + Ok(()) => Ok(()), + Err(mpsc::error::TrySendError::Closed(cmd)) => Err(mpsc::error::SendError(cmd)), + Err(mpsc::error::TrySendError::Full(cmd)) => { + // Cooperative yield reduces burst catch-up when the per-conn queue is near saturation. + if tx.capacity() <= C2ME_SOFT_PRESSURE_MIN_FREE_SLOTS { + tokio::task::yield_now().await; + } + tx.send(cmd).await + } + } +} + pub(crate) async fn handle_via_middle_proxy( mut crypto_reader: CryptoReader, crypto_writer: CryptoWriter, @@ -230,9 +254,10 @@ where let frame_limit = config.general.max_client_frame; - let (c2me_tx, mut c2me_rx) = mpsc::channel::(1024); + let (c2me_tx, mut c2me_rx) = mpsc::channel::(C2ME_CHANNEL_CAPACITY); let me_pool_c2me = me_pool.clone(); let c2me_sender = tokio::spawn(async move { + let mut sent_since_yield = 0usize; while let Some(cmd) = c2me_rx.recv().await { match cmd { C2MeCommand::Data { payload, flags } => { @@ -244,6 +269,11 @@ where &payload, flags, ).await?; + sent_since_yield = sent_since_yield.saturating_add(1); + if should_yield_c2me_sender(sent_since_yield, !c2me_rx.is_empty()) { + sent_since_yield = 0; + tokio::task::yield_now().await; + } } C2MeCommand::Close => { let _ = me_pool_c2me.send_close(conn_id).await; @@ -360,8 +390,7 @@ where flags |= RPC_FLAG_NOT_ENCRYPTED; } // Keep client read loop lightweight: route heavy ME send path via a dedicated task. - if c2me_tx - .send(C2MeCommand::Data { payload, flags }) + if enqueue_c2me_command(&c2me_tx, C2MeCommand::Data { payload, flags }) .await .is_err() { @@ -372,7 +401,7 @@ where Ok(None) => { debug!(conn_id, "Client EOF"); client_closed = true; - let _ = c2me_tx.send(C2MeCommand::Close).await; + let _ = enqueue_c2me_command(&c2me_tx, C2MeCommand::Close).await; break; } Err(e) => { @@ -647,3 +676,84 @@ where // ACK should remain low-latency. client_writer.flush().await.map_err(ProxyError::Io) } + +#[cfg(test)] +mod tests { + use super::*; + use tokio::time::{Duration as TokioDuration, timeout}; + + #[test] + fn should_yield_sender_only_on_budget_with_backlog() { + assert!(!should_yield_c2me_sender(0, true)); + assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET - 1, true)); + assert!(!should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, false)); + assert!(should_yield_c2me_sender(C2ME_SENDER_FAIRNESS_BUDGET, true)); + } + + #[tokio::test] + async fn enqueue_c2me_command_uses_try_send_fast_path() { + let (tx, mut rx) = mpsc::channel::(2); + enqueue_c2me_command( + &tx, + C2MeCommand::Data { + payload: vec![1, 2, 3], + flags: 0, + }, + ) + .await + .unwrap(); + + let recv = timeout(TokioDuration::from_millis(50), rx.recv()) + .await + .unwrap() + .unwrap(); + match recv { + C2MeCommand::Data { payload, flags } => { + assert_eq!(payload, vec![1, 2, 3]); + assert_eq!(flags, 0); + } + C2MeCommand::Close => panic!("unexpected close command"), + } + } + + #[tokio::test] + async fn enqueue_c2me_command_falls_back_to_send_when_queue_is_full() { + let (tx, mut rx) = mpsc::channel::(1); + tx.send(C2MeCommand::Data { + payload: vec![9], + flags: 9, + }) + .await + .unwrap(); + + let tx2 = tx.clone(); + let producer = tokio::spawn(async move { + enqueue_c2me_command( + &tx2, + C2MeCommand::Data { + payload: vec![7, 7], + flags: 7, + }, + ) + .await + .unwrap(); + }); + + let _ = timeout(TokioDuration::from_millis(100), rx.recv()) + .await + .unwrap(); + producer.await.unwrap(); + + let recv = timeout(TokioDuration::from_millis(100), rx.recv()) + .await + .unwrap() + .unwrap(); + match recv { + C2MeCommand::Data { payload, flags } => { + assert_eq!(payload, vec![7, 7]); + assert_eq!(flags, 7); + } + C2MeCommand::Close => panic!("unexpected close command"), + } + } +} diff --git a/src/stream/crypto_stream.rs b/src/stream/crypto_stream.rs index 5303fe5..744b186 100644 --- a/src/stream/crypto_stream.rs +++ b/src/stream/crypto_stream.rs @@ -336,22 +336,35 @@ impl PendingCiphertext { } fn remaining_capacity(&self) -> usize { - self.max_len.saturating_sub(self.buf.len()) + self.max_len.saturating_sub(self.pending_len()) + } + + fn compact_consumed_prefix(&mut self) { + if self.pos == 0 { + return; + } + + if self.pos >= self.buf.len() { + self.buf.clear(); + self.pos = 0; + return; + } + + let _ = self.buf.split_to(self.pos); + self.pos = 0; } fn advance(&mut self, n: usize) { self.pos = (self.pos + n).min(self.buf.len()); if self.pos == self.buf.len() { - self.buf.clear(); - self.pos = 0; + self.compact_consumed_prefix(); return; } // Compact when a large prefix was consumed. if self.pos >= 16 * 1024 { - let _ = self.buf.split_to(self.pos); - self.pos = 0; + self.compact_consumed_prefix(); } } @@ -379,6 +392,11 @@ impl PendingCiphertext { )); } + // Reclaim consumed prefix when physical storage is the only limiter. + if self.pos > 0 && self.buf.len() + plaintext.len() > self.max_len { + self.compact_consumed_prefix(); + } + let start = self.buf.len(); self.buf.reserve(plaintext.len()); self.buf.extend_from_slice(plaintext); @@ -777,3 +795,70 @@ impl AsyncWrite for PassthroughStream { Pin::new(&mut self.inner).poll_shutdown(cx) } } + +#[cfg(test)] +mod tests { + use super::*; + + fn test_ctr() -> AesCtr { + AesCtr::new(&[0x11; 32], 0x0102_0304_0506_0708_1112_1314_1516_1718) + } + + #[test] + fn pending_capacity_reclaims_after_partial_advance_without_compaction_threshold() { + let mut pending = PendingCiphertext::new(1024); + let mut ctr = test_ctr(); + let payload = vec![0x41; 900]; + pending.push_encrypted(&mut ctr, &payload).unwrap(); + + // Keep position below compaction threshold to validate logical-capacity accounting. + pending.advance(800); + assert_eq!(pending.pending_len(), 100); + assert_eq!(pending.remaining_capacity(), 924); + } + + #[test] + fn push_encrypted_respects_pending_limit() { + let mut pending = PendingCiphertext::new(64); + let mut ctr = test_ctr(); + + pending.push_encrypted(&mut ctr, &[0x10; 64]).unwrap(); + let err = pending.push_encrypted(&mut ctr, &[0x20]).unwrap_err(); + assert_eq!(err.kind(), ErrorKind::WouldBlock); + } + + #[test] + fn push_encrypted_compacts_prefix_when_physical_buffer_would_overflow() { + let mut pending = PendingCiphertext::new(64); + let mut ctr = test_ctr(); + + pending.push_encrypted(&mut ctr, &[0x22; 60]).unwrap(); + pending.advance(30); + pending.push_encrypted(&mut ctr, &[0x33; 30]).unwrap(); + + assert_eq!(pending.pending_len(), 60); + assert!(pending.buf.len() <= 64); + } + + #[test] + fn pending_ciphertext_preserves_stream_order_across_drain_and_append() { + let mut pending = PendingCiphertext::new(128); + let mut ctr = test_ctr(); + + let first = vec![0xA1; 80]; + let second = vec![0xB2; 40]; + + pending.push_encrypted(&mut ctr, &first).unwrap(); + pending.advance(50); + pending.push_encrypted(&mut ctr, &second).unwrap(); + + let mut baseline_ctr = test_ctr(); + let mut baseline_plain = Vec::with_capacity(first.len() + second.len()); + baseline_plain.extend_from_slice(&first); + baseline_plain.extend_from_slice(&second); + baseline_ctr.apply(&mut baseline_plain); + + let expected = &baseline_plain[50..]; + assert_eq!(pending.pending_slice(), expected); + } +} diff --git a/src/transport/middle_proxy/send.rs b/src/transport/middle_proxy/send.rs index 8867212..f68b1b9 100644 --- a/src/transport/middle_proxy/send.rs +++ b/src/transport/middle_proxy/send.rs @@ -1,8 +1,10 @@ +use std::cmp::Reverse; use std::net::SocketAddr; use std::sync::Arc; use std::sync::atomic::Ordering; use std::time::Duration; +use tokio::sync::mpsc::error::TrySendError; use tracing::{debug, warn}; use crate::error::{ProxyError, Result}; @@ -43,15 +45,17 @@ impl MePool { loop { if let Some(current) = self.registry.get_writer(conn_id).await { - let send_res = { - current - .tx - .send(WriterCommand::Data(payload.clone())) - .await - }; - match send_res { + match current.tx.try_send(WriterCommand::Data(payload.clone())) { Ok(()) => return Ok(()), - Err(_) => { + Err(TrySendError::Full(cmd)) => { + if current.tx.send(cmd).await.is_ok() { + return Ok(()); + } + warn!(writer_id = current.writer_id, "ME writer channel closed"); + self.remove_writer_and_close_clients(current.writer_id).await; + continue; + } + Err(TrySendError::Closed(_)) => { warn!(writer_id = current.writer_id, "ME writer channel closed"); self.remove_writer_and_close_clients(current.writer_id).await; continue; @@ -135,10 +139,11 @@ impl MePool { let w = &writers_snapshot[*idx]; let degraded = w.degraded.load(Ordering::Relaxed); let stale = (w.generation < self.current_generation()) as usize; - (stale, degraded as usize) + (stale, degraded as usize, Reverse(w.tx.capacity())) }); let start = self.rr.fetch_add(1, Ordering::Relaxed) as usize % candidate_indices.len(); + let mut fallback_blocking_idx: Option = None; for offset in 0..candidate_indices.len() { let idx = candidate_indices[(start + offset) % candidate_indices.len()]; @@ -146,29 +151,41 @@ impl MePool { if !self.writer_accepts_new_binding(w) { continue; } - if w.tx.send(WriterCommand::Data(payload.clone())).await.is_ok() { - self.registry - .bind_writer(conn_id, w.id, w.tx.clone(), meta.clone()) - .await; - if w.generation < self.current_generation() { - self.stats.increment_pool_stale_pick_total(); - debug!( - conn_id, - writer_id = w.id, - writer_generation = w.generation, - current_generation = self.current_generation(), - "Selected stale ME writer for fallback bind" - ); + match w.tx.try_send(WriterCommand::Data(payload.clone())) { + Ok(()) => { + self.registry + .bind_writer(conn_id, w.id, w.tx.clone(), meta.clone()) + .await; + if w.generation < self.current_generation() { + self.stats.increment_pool_stale_pick_total(); + debug!( + conn_id, + writer_id = w.id, + writer_generation = w.generation, + current_generation = self.current_generation(), + "Selected stale ME writer for fallback bind" + ); + } + return Ok(()); + } + Err(TrySendError::Full(_)) => { + if fallback_blocking_idx.is_none() { + fallback_blocking_idx = Some(idx); + } + } + Err(TrySendError::Closed(_)) => { + warn!(writer_id = w.id, "ME writer channel closed"); + self.remove_writer_and_close_clients(w.id).await; + continue; } - return Ok(()); - } else { - warn!(writer_id = w.id, "ME writer channel closed"); - self.remove_writer_and_close_clients(w.id).await; - continue; } } - let w = writers_snapshot[candidate_indices[start]].clone(); + let Some(blocking_idx) = fallback_blocking_idx else { + continue; + }; + + let w = writers_snapshot[blocking_idx].clone(); if !self.writer_accepts_new_binding(&w) { continue; } From 47b12f94891fcfa9b1235367f1403638d66c600b Mon Sep 17 00:00:00 2001 From: Alexey <247128645+axkurcom@users.noreply.github.com> Date: Sun, 1 Mar 2026 04:02:32 +0300 Subject: [PATCH 98/98] UpstreamManager Health-check for ME Pool over SOCKS Co-Authored-By: brekotis <93345790+brekotis@users.noreply.github.com> --- src/main.rs | 2 + src/transport/upstream.rs | 444 ++++++++++++++++++++++++++++---------- 2 files changed, 328 insertions(+), 118 deletions(-) diff --git a/src/main.rs b/src/main.rs index a87dd99..2675509 100644 --- a/src/main.rs +++ b/src/main.rs @@ -770,12 +770,14 @@ async fn main() -> std::result::Result<(), Box> { // Background tasks let um_clone = upstream_manager.clone(); let decision_clone = decision.clone(); + let dc_overrides_for_health = config.dc_overrides.clone(); tokio::spawn(async move { um_clone .run_health_checks( prefer_ipv6, decision_clone.ipv4_dc, decision_clone.ipv6_dc, + dc_overrides_for_health, ) .await; }); diff --git a/src/transport/upstream.rs b/src/transport/upstream.rs index 8411f5a..fa7b0a6 100644 --- a/src/transport/upstream.rs +++ b/src/transport/upstream.rs @@ -4,7 +4,7 @@ #![allow(deprecated)] -use std::collections::HashMap; +use std::collections::{BTreeSet, HashMap}; use std::net::{SocketAddr, IpAddr}; use std::sync::Arc; use std::sync::atomic::{AtomicUsize, Ordering}; @@ -29,6 +29,12 @@ const NUM_DCS: usize = 5; const DC_PING_TIMEOUT_SECS: u64 = 5; /// Timeout for direct TG DC TCP connect readiness. const DIRECT_CONNECT_TIMEOUT_SECS: u64 = 10; +/// Interval between upstream health-check cycles. +const HEALTH_CHECK_INTERVAL_SECS: u64 = 30; +/// Timeout for a single health-check connect attempt. +const HEALTH_CHECK_CONNECT_TIMEOUT_SECS: u64 = 10; +/// Upstream is considered healthy when at least this many DC groups are reachable. +const MIN_HEALTHY_DC_GROUPS: usize = 3; // ============= RTT Tracking ============= @@ -167,6 +173,13 @@ pub struct UpstreamEgressInfo { pub socks_proxy_addr: Option, } +#[derive(Debug, Clone)] +struct HealthCheckGroup { + dc_idx: i16, + primary: Vec, + fallback: Vec, +} + // ============= Upstream Manager ============= #[derive(Clone)] @@ -987,41 +1000,144 @@ impl UpstreamManager { Ok(start.elapsed().as_secs_f64() * 1000.0) } + fn required_healthy_group_count(total_groups: usize) -> usize { + if total_groups == 0 { + 0 + } else { + total_groups.min(MIN_HEALTHY_DC_GROUPS) + } + } + + fn build_health_check_groups( + prefer_ipv6: bool, + ipv4_enabled: bool, + ipv6_enabled: bool, + dc_overrides: &HashMap>, + ) -> Vec { + let mut v4_by_dc: HashMap> = HashMap::new(); + let mut v6_by_dc: HashMap> = HashMap::new(); + + if ipv4_enabled { + for (idx, dc_ip) in TG_DATACENTERS_V4.iter().enumerate() { + let dc_idx = (idx + 1) as i16; + v4_by_dc + .entry(dc_idx) + .or_default() + .push(SocketAddr::new(*dc_ip, TG_DATACENTER_PORT)); + } + } + + if ipv6_enabled { + for (idx, dc_ip) in TG_DATACENTERS_V6.iter().enumerate() { + let dc_idx = (idx + 1) as i16; + v6_by_dc + .entry(dc_idx) + .or_default() + .push(SocketAddr::new(*dc_ip, TG_DATACENTER_PORT)); + } + } + + for (dc_key, addrs) in dc_overrides { + let dc_idx = match dc_key.parse::() { + Ok(v) if v > 0 => v, + _ => { + warn!(dc = %dc_key, "Invalid dc_overrides key for health-check, skipping"); + continue; + } + }; + + for addr_str in addrs { + match addr_str.parse::() { + Ok(addr) if addr.is_ipv6() => { + if ipv6_enabled { + v6_by_dc.entry(dc_idx).or_default().push(addr); + } + } + Ok(addr) => { + if ipv4_enabled { + v4_by_dc.entry(dc_idx).or_default().push(addr); + } + } + Err(_) => { + warn!( + dc = %dc_idx, + addr = %addr_str, + "Invalid dc_overrides address for health-check, skipping" + ); + } + } + } + } + + for addrs in v4_by_dc.values_mut() { + addrs.sort_unstable(); + addrs.dedup(); + } + for addrs in v6_by_dc.values_mut() { + addrs.sort_unstable(); + addrs.dedup(); + } + + let mut all_dcs = BTreeSet::new(); + all_dcs.extend(v4_by_dc.keys().copied()); + all_dcs.extend(v6_by_dc.keys().copied()); + + let mut groups = Vec::with_capacity(all_dcs.len()); + for dc_idx in all_dcs { + let v4_endpoints = v4_by_dc.remove(&dc_idx).unwrap_or_default(); + let v6_endpoints = v6_by_dc.remove(&dc_idx).unwrap_or_default(); + let (primary, fallback) = if prefer_ipv6 { + (v6_endpoints, v4_endpoints) + } else { + (v4_endpoints, v6_endpoints) + }; + + if primary.is_empty() && fallback.is_empty() { + continue; + } + + groups.push(HealthCheckGroup { + dc_idx, + primary, + fallback, + }); + } + + groups + } + // ============= Health Checks ============= - /// Background health check: rotates through DCs, 30s interval. - /// Uses preferred IP version based on config. - pub async fn run_health_checks(&self, prefer_ipv6: bool, ipv4_enabled: bool, ipv6_enabled: bool) { - let mut dc_rotation = 0usize; + /// Background health check based on reachable DC groups through each upstream. + /// Upstream stays healthy while at least `MIN_HEALTHY_DC_GROUPS` groups are reachable. + pub async fn run_health_checks( + &self, + prefer_ipv6: bool, + ipv4_enabled: bool, + ipv6_enabled: bool, + dc_overrides: HashMap>, + ) { + let groups = Self::build_health_check_groups( + prefer_ipv6, + ipv4_enabled, + ipv6_enabled, + &dc_overrides, + ); + let required_healthy_groups = Self::required_healthy_group_count(groups.len()); + let mut endpoint_rotation: HashMap<(usize, i16, bool), usize> = HashMap::new(); + + if groups.is_empty() { + warn!("No DC groups available for upstream health-checks"); + } loop { - tokio::time::sleep(Duration::from_secs(30)).await; + tokio::time::sleep(Duration::from_secs(HEALTH_CHECK_INTERVAL_SECS)).await; - let dc_zero_idx = dc_rotation % NUM_DCS; - dc_rotation += 1; - - let primary_v6 = SocketAddr::new(TG_DATACENTERS_V6[dc_zero_idx], TG_DATACENTER_PORT); - let primary_v4 = SocketAddr::new(TG_DATACENTERS_V4[dc_zero_idx], TG_DATACENTER_PORT); - let dc_addr = if prefer_ipv6 && ipv6_enabled { - primary_v6 - } else if ipv4_enabled { - primary_v4 - } else if ipv6_enabled { - primary_v6 - } else { + if groups.is_empty() || required_healthy_groups == 0 { continue; - }; - - let fallback_addr = if dc_addr.is_ipv6() && ipv4_enabled { - Some(primary_v4) - } else if dc_addr.is_ipv4() && ipv6_enabled { - Some(primary_v6) - } else { - None - }; + } let count = self.upstreams.read().await.len(); - for i in 0..count { let (config, bind_rr) = { let guard = self.upstreams.read().await; @@ -1029,104 +1145,123 @@ impl UpstreamManager { (u.config.clone(), u.bind_rr.clone()) }; - let start = Instant::now(); - let result = tokio::time::timeout( - Duration::from_secs(10), - self.connect_via_upstream(&config, dc_addr, Some(bind_rr.clone())) - ).await; + let mut healthy_groups = 0usize; + let mut latency_updates: Vec<(usize, f64)> = Vec::new(); - match result { - Ok(Ok(_stream)) => { - let rtt_ms = start.elapsed().as_secs_f64() * 1000.0; - let mut guard = self.upstreams.write().await; - let u = &mut guard[i]; - u.dc_latency[dc_zero_idx].update(rtt_ms); + for group in &groups { + let mut group_ok = false; + let mut group_rtt_ms = None; - if !u.healthy { - info!( - rtt = format!("{:.0} ms", rtt_ms), - dc = dc_zero_idx + 1, - "Upstream recovered" - ); - } - u.healthy = true; - u.fails = 0; - u.last_check = std::time::Instant::now(); - } - Ok(Err(_)) | Err(_) => { - // Try fallback - debug!(dc = dc_zero_idx + 1, "Health check failed, trying fallback"); - - if let Some(fallback_addr) = fallback_addr { - let start2 = Instant::now(); - let result2 = tokio::time::timeout( - Duration::from_secs(10), - self.connect_via_upstream(&config, fallback_addr, Some(bind_rr.clone())) - ).await; - - let mut guard = self.upstreams.write().await; - let u = &mut guard[i]; - - match result2 { - Ok(Ok(_stream)) => { - let rtt_ms = start2.elapsed().as_secs_f64() * 1000.0; - u.dc_latency[dc_zero_idx].update(rtt_ms); - - if !u.healthy { - info!( - rtt = format!("{:.0} ms", rtt_ms), - dc = dc_zero_idx + 1, - "Upstream recovered (fallback)" - ); - } - u.healthy = true; - u.fails = 0; - } - Ok(Err(e)) => { - u.fails += 1; - debug!(dc = dc_zero_idx + 1, fails = u.fails, - "Health check failed (both): {}", e); - if u.fails >= self.unhealthy_fail_threshold { - u.healthy = false; - warn!( - fails = u.fails, - threshold = self.unhealthy_fail_threshold, - "Upstream unhealthy (fails)" - ); - } - } - Err(_) => { - u.fails += 1; - debug!(dc = dc_zero_idx + 1, fails = u.fails, - "Health check timeout (both)"); - if u.fails >= self.unhealthy_fail_threshold { - u.healthy = false; - warn!( - fails = u.fails, - threshold = self.unhealthy_fail_threshold, - "Upstream unhealthy (timeout)" - ); - } - } - } - u.last_check = std::time::Instant::now(); + for (is_primary, endpoints) in [(true, &group.primary), (false, &group.fallback)] { + if endpoints.is_empty() { continue; } - let mut guard = self.upstreams.write().await; - let u = &mut guard[i]; - u.fails += 1; - if u.fails >= self.unhealthy_fail_threshold { - u.healthy = false; - warn!( - fails = u.fails, - threshold = self.unhealthy_fail_threshold, - "Upstream unhealthy (no fallback family)" - ); + let rotation_key = (i, group.dc_idx, is_primary); + let start_idx = *endpoint_rotation.entry(rotation_key).or_insert(0) % endpoints.len(); + let mut next_idx = (start_idx + 1) % endpoints.len(); + + for step in 0..endpoints.len() { + let endpoint_idx = (start_idx + step) % endpoints.len(); + let endpoint = endpoints[endpoint_idx]; + + let start = Instant::now(); + let result = tokio::time::timeout( + Duration::from_secs(HEALTH_CHECK_CONNECT_TIMEOUT_SECS), + self.connect_via_upstream(&config, endpoint, Some(bind_rr.clone())), + ) + .await; + + match result { + Ok(Ok(_stream)) => { + group_ok = true; + group_rtt_ms = Some(start.elapsed().as_secs_f64() * 1000.0); + next_idx = (endpoint_idx + 1) % endpoints.len(); + break; + } + Ok(Err(e)) => { + debug!( + upstream = i, + dc = group.dc_idx, + endpoint = %endpoint, + primary = is_primary, + error = %e, + "Health-check endpoint failed" + ); + } + Err(_) => { + debug!( + upstream = i, + dc = group.dc_idx, + endpoint = %endpoint, + primary = is_primary, + "Health-check endpoint timed out" + ); + } + } + } + + endpoint_rotation.insert(rotation_key, next_idx); + + if group_ok { + break; + } + } + + if group_ok { + healthy_groups += 1; + if let (Some(dc_array_idx), Some(rtt_ms)) = + (UpstreamState::dc_array_idx(group.dc_idx), group_rtt_ms) + { + latency_updates.push((dc_array_idx, rtt_ms)); } - u.last_check = std::time::Instant::now(); } } + + let mut guard = self.upstreams.write().await; + let u = &mut guard[i]; + + for (dc_array_idx, rtt_ms) in latency_updates { + u.dc_latency[dc_array_idx].update(rtt_ms); + } + + if healthy_groups >= required_healthy_groups { + if !u.healthy { + info!( + upstream = i, + healthy_groups, + total_groups = groups.len(), + required_groups = required_healthy_groups, + "Upstream recovered by DC-group health threshold" + ); + } + u.healthy = true; + u.fails = 0; + } else { + u.fails += 1; + debug!( + upstream = i, + healthy_groups, + total_groups = groups.len(), + required_groups = required_healthy_groups, + fails = u.fails, + "Upstream health-check below DC-group threshold" + ); + if u.fails >= self.unhealthy_fail_threshold { + u.healthy = false; + warn!( + upstream = i, + healthy_groups, + total_groups = groups.len(), + required_groups = required_healthy_groups, + fails = u.fails, + threshold = self.unhealthy_fail_threshold, + "Upstream unhealthy (insufficient reachable DC groups)" + ); + } + } + + u.last_check = std::time::Instant::now(); } } } @@ -1157,3 +1292,76 @@ impl UpstreamManager { Some(SocketAddr::new(ip, TG_DATACENTER_PORT)) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn required_healthy_group_count_applies_three_group_threshold() { + assert_eq!(UpstreamManager::required_healthy_group_count(0), 0); + assert_eq!(UpstreamManager::required_healthy_group_count(1), 1); + assert_eq!(UpstreamManager::required_healthy_group_count(2), 2); + assert_eq!(UpstreamManager::required_healthy_group_count(3), 3); + assert_eq!(UpstreamManager::required_healthy_group_count(5), 3); + } + + #[test] + fn build_health_check_groups_merges_family_endpoints_with_preference() { + let mut overrides = HashMap::new(); + overrides.insert( + "2".to_string(), + vec![ + "203.0.113.10:443".to_string(), + "203.0.113.11:443".to_string(), + "[2001:db8::10]:443".to_string(), + ], + ); + + let groups = UpstreamManager::build_health_check_groups(true, true, true, &overrides); + let dc2 = groups + .iter() + .find(|g| g.dc_idx == 2) + .expect("dc2 must be present"); + + assert!(dc2.primary.iter().all(|addr| addr.is_ipv6())); + assert!(dc2.fallback.iter().all(|addr| addr.is_ipv4())); + assert!(dc2 + .primary + .contains(&"[2001:db8::10]:443".parse::().unwrap())); + assert!(dc2 + .fallback + .contains(&"203.0.113.10:443".parse::().unwrap())); + assert!(dc2 + .fallback + .contains(&"203.0.113.11:443".parse::().unwrap())); + } + + #[test] + fn build_health_check_groups_keeps_multiple_endpoints_per_group() { + let mut overrides = HashMap::new(); + overrides.insert( + "9".to_string(), + vec![ + "198.51.100.1:443".to_string(), + "198.51.100.2:443".to_string(), + "198.51.100.1:443".to_string(), + ], + ); + + let groups = UpstreamManager::build_health_check_groups(false, true, false, &overrides); + let dc9 = groups + .iter() + .find(|g| g.dc_idx == 9) + .expect("override-only dc group must be present"); + + assert_eq!(dc9.primary.len(), 2); + assert!(dc9 + .primary + .contains(&"198.51.100.1:443".parse::().unwrap())); + assert!(dc9 + .primary + .contains(&"198.51.100.2:443".parse::().unwrap())); + assert!(dc9.fallback.is_empty()); + } +}